From 85a3340d0edde3c7cab6bd88a874ceacce0a5018 Mon Sep 17 00:00:00 2001 From: Aoi Kurokawa Date: Mon, 31 Jul 2023 01:53:03 +0000 Subject: [PATCH 01/20] Implement liveness BeaconAPI (#4343) ## Issue Addressed #4243 ## Proposed Changes - create a new endpoint for liveness/{endpoint} ## Additional Info This is my first PR. --- beacon_node/http_api/src/lib.rs | 40 ++++++++++++++++ beacon_node/http_api/tests/tests.rs | 71 +++++++++++++++++++++++++++++ common/eth2/src/lib.rs | 18 ++++++++ common/eth2/src/types.rs | 7 +++ 4 files changed, 136 insertions(+) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 1f93f461108..8c87c315ebe 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3301,6 +3301,45 @@ pub fn serve( }, ); + // POST vaidator/liveness/{epoch} + let post_validator_liveness_epoch = eth_v1 + .and(warp::path("validator")) + .and(warp::path("liveness")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and_then( + |epoch: Epoch, indices: Vec, chain: Arc>| { + blocking_json_task(move || { + // Ensure the request is for either the current, previous or next epoch. + let current_epoch = chain + .epoch() + .map_err(warp_utils::reject::beacon_chain_error)?; + let prev_epoch = current_epoch.saturating_sub(Epoch::new(1)); + let next_epoch = current_epoch.saturating_add(Epoch::new(1)); + + if epoch < prev_epoch || epoch > next_epoch { + return Err(warp_utils::reject::custom_bad_request(format!( + "request epoch {} is more than one epoch from the current epoch {}", + epoch, current_epoch + ))); + } + + let liveness: Vec = indices + .iter() + .cloned() + .map(|index| { + let is_live = chain.validator_seen_at_epoch(index as usize, epoch); + api_types::StandardLivenessResponseData { index, is_live } + }) + .collect(); + + Ok(api_types::GenericResponse::from(liveness)) + }) + }, + ); + // POST lighthouse/liveness let post_lighthouse_liveness = warp::path("lighthouse") .and(warp::path("liveness")) @@ -3963,6 +4002,7 @@ pub fn serve( .uor(post_validator_sync_committee_subscriptions) .uor(post_validator_prepare_beacon_proposer) .uor(post_validator_register_validator) + .uor(post_validator_liveness_epoch) .uor(post_lighthouse_liveness) .uor(post_lighthouse_database_reconstruct) .uor(post_lighthouse_database_historical_blocks) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 741ee1ffc06..dc8ca49d203 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -2980,6 +2980,69 @@ impl ApiTester { self } + pub async fn test_post_validator_liveness_epoch(self) -> Self { + let epoch = self.chain.epoch().unwrap(); + let head_state = self.chain.head_beacon_state_cloned(); + let indices = (0..head_state.validators().len()) + .map(|i| i as u64) + .collect::>(); + + // Construct the expected response + let expected: Vec = head_state + .validators() + .iter() + .enumerate() + .map(|(index, _)| StandardLivenessResponseData { + index: index as u64, + is_live: false, + }) + .collect(); + + let result = self + .client + .post_validator_liveness_epoch(epoch, indices.clone()) + .await + .unwrap() + .data; + + assert_eq!(result, expected); + + // Attest to the current slot + self.client + .post_beacon_pool_attestations(self.attestations.as_slice()) + .await + .unwrap(); + + let result = self + .client + .post_validator_liveness_epoch(epoch, indices.clone()) + .await + .unwrap() + .data; + + let committees = head_state + .get_beacon_committees_at_slot(self.chain.slot().unwrap()) + .unwrap(); + let attesting_validators: Vec = committees + .into_iter() + .flat_map(|committee| committee.committee.iter().cloned()) + .collect(); + // All attesters should now be considered live + let expected = expected + .into_iter() + .map(|mut a| { + if attesting_validators.contains(&(a.index as usize)) { + a.is_live = true; + } + a + }) + .collect::>(); + + assert_eq!(result, expected); + + self + } + // Helper function for tests that require a valid RANDAO signature. async fn get_test_randao(&self, slot: Slot, epoch: Epoch) -> (u64, SignatureBytes) { let fork = self.chain.canonical_head.cached_head().head_fork(); @@ -4870,6 +4933,14 @@ async fn builder_works_post_capella() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_liveness_epoch() { + ApiTester::new() + .await + .test_post_validator_liveness_epoch() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index e34916bebab..5fcddbc46d6 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1636,6 +1636,24 @@ impl BeaconNodeHttpClient { .await } + /// `POST validator/liveness/{epoch}` + pub async fn post_validator_liveness_epoch( + &self, + epoch: Epoch, + indices: Vec, + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("liveness") + .push(&epoch.to_string()); + + self.post_with_timeout_and_response(path, &indices, self.timeouts.liveness) + .await + } + /// `POST validator/duties/attester/{epoch}` pub async fn post_validator_duties_attester( &self, diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 5f2e1ada7be..f451d3b8f2e 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1225,6 +1225,13 @@ impl FromStr for Accept { } } +#[derive(PartialEq, Debug, Serialize, Deserialize)] +pub struct StandardLivenessResponseData { + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + pub is_live: bool, +} + #[derive(Debug, Serialize, Deserialize)] pub struct LivenessRequestData { pub epoch: Epoch, From eafe08780c50bb02bd8a2e9829b894102375bad4 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 31 Jul 2023 01:53:04 +0000 Subject: [PATCH 02/20] Restore upstream arbitrary (#4372) ## Proposed Changes Remove patch for `arbitrary` in favour of upstream, now that the `arithmetic_side_effects` lint no longer triggers in derive macro code. ## Additional Info ~~Blocked on Rust 1.71.0, to be released 13 July 23~~ --- Cargo.lock | 10 ++++++---- Cargo.toml | 1 - 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fda8cd761f9..ec8ec6f5fce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -263,7 +263,8 @@ checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "arbitrary" version = "1.3.0" -source = "git+https://github.com/michaelsproul/arbitrary?rev=f002b99989b561ddce62e4cf2887b0f8860ae991#f002b99989b561ddce62e4cf2887b0f8860ae991" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" dependencies = [ "derive_arbitrary", ] @@ -1768,12 +1769,13 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.3.0" -source = "git+https://github.com/michaelsproul/arbitrary?rev=f002b99989b561ddce62e4cf2887b0f8860ae991#f002b99989b561ddce62e4cf2887b0f8860ae991" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.16", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 8b820d2a215..cb09d26a5dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,7 +90,6 @@ resolver = "2" [patch] [patch.crates-io] warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } -arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" } [profile.maxperf] inherits = "release" From b96cfcaaa4ca02d4531537a143df6b2de3b29887 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 31 Jul 2023 01:53:05 +0000 Subject: [PATCH 03/20] Fix bug in lcli transition-blocks and improve pretty-ssz (#4513) ## Proposed Changes - Fix bad `state_root` reuse in `lcli transition-blocks` that resulted in invalid results at skipped slots. - Modernise `lcli pretty-ssz` to include fork-generic decoders for `SignedBeaconBlock` and `BeaconState` which respect the `--network`/`--testnet-dir` flag. ## Additional Info Breaking change: the underscore names like `signed_block_merge` are removed in favour of the fork-generic name `SignedBeaconBlock`, and fork-specific names which match the superstruct variants, e.g. `SignedBeaconBlockMerge`. --- lcli/src/main.rs | 4 +- lcli/src/parse_ssz.rs | 71 +++++++++++++++----- lcli/src/transition_blocks.rs | 11 ++- scripts/local_testnet/start_local_testnet.sh | 2 +- 4 files changed, 64 insertions(+), 24 deletions(-) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 8fbf5638b4c..38fec2ebb48 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -968,7 +968,9 @@ fn run( .map_err(|e| format!("Failed to skip slots: {}", e)) } ("pretty-ssz", Some(matches)) => { - run_parse_ssz::(matches).map_err(|e| format!("Failed to pretty print hex: {}", e)) + let network_config = get_network_config()?; + run_parse_ssz::(network_config, matches) + .map_err(|e| format!("Failed to pretty print hex: {}", e)) } ("deploy-deposit-contract", Some(matches)) => { deploy_deposit_contract::run::(env, matches) diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs index 5d988ee1815..5c306f4fdc1 100644 --- a/lcli/src/parse_ssz.rs +++ b/lcli/src/parse_ssz.rs @@ -1,5 +1,6 @@ use clap::ArgMatches; use clap_utils::parse_required; +use eth2_network_config::Eth2NetworkConfig; use serde::Serialize; use snap::raw::Decoder; use ssz::Decode; @@ -26,7 +27,10 @@ impl FromStr for OutputFormat { } } -pub fn run_parse_ssz(matches: &ArgMatches) -> Result<(), String> { +pub fn run_parse_ssz( + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { let type_str = matches.value_of("type").ok_or("No type supplied")?; let filename = matches.value_of("ssz-file").ok_or("No file supplied")?; let format = parse_required(matches, "format")?; @@ -44,44 +48,79 @@ pub fn run_parse_ssz(matches: &ArgMatches) -> Result<(), String> { bytes }; - info!("Using {} spec", T::spec_name()); - info!("Type: {:?}", type_str); + let spec = &network_config.chain_spec::()?; + info!( + "Using {} network config ({} preset)", + spec.config_name.as_deref().unwrap_or("unknown"), + T::spec_name() + ); + info!("Type: {type_str}"); + // More fork-specific decoders may need to be added in future, but shouldn't be 100% necessary, + // as the fork-generic decoder will always be available (requires correct --network flag). match type_str { - "signed_block_base" => decode_and_print::>(&bytes, format)?, - "signed_block_altair" => decode_and_print::>(&bytes, format)?, - "signed_block_merge" => decode_and_print::>(&bytes, format)?, - "block_base" => decode_and_print::>(&bytes, format)?, - "block_altair" => decode_and_print::>(&bytes, format)?, - "block_merge" => decode_and_print::>(&bytes, format)?, - "state_base" => decode_and_print::>(&bytes, format)?, - "state_altair" => decode_and_print::>(&bytes, format)?, - "state_merge" => decode_and_print::>(&bytes, format)?, + "SignedBeaconBlock" => decode_and_print::>( + &bytes, + |bytes| SignedBeaconBlock::from_ssz_bytes(bytes, spec), + format, + )?, + "SignedBeaconBlockBase" | "SignedBeaconBlockPhase0" => { + decode_and_print(&bytes, SignedBeaconBlockBase::::from_ssz_bytes, format)? + } + "SignedBeaconBlockAltair" => { + decode_and_print(&bytes, SignedBeaconBlockAltair::::from_ssz_bytes, format)? + } + "SignedBeaconBlockMerge" | "SignedBeaconBlockBellatrix" => { + decode_and_print(&bytes, SignedBeaconBlockMerge::::from_ssz_bytes, format)? + } + "SignedBeaconBlockCapella" => decode_and_print( + &bytes, + SignedBeaconBlockCapella::::from_ssz_bytes, + format, + )?, + "BeaconState" => decode_and_print::>( + &bytes, + |bytes| BeaconState::from_ssz_bytes(bytes, spec), + format, + )?, + "BeaconStateBase" | "BeaconStatePhase0" => { + decode_and_print(&bytes, BeaconStateBase::::from_ssz_bytes, format)? + } + "BeaconStateAltair" => { + decode_and_print(&bytes, BeaconStateAltair::::from_ssz_bytes, format)? + } + "BeaconStateMerge" | "BeaconStateBellatrix" => { + decode_and_print(&bytes, BeaconStateMerge::::from_ssz_bytes, format)? + } + "BeaconStateCapella" => { + decode_and_print(&bytes, BeaconStateCapella::::from_ssz_bytes, format)? + } other => return Err(format!("Unknown type: {}", other)), }; Ok(()) } -fn decode_and_print( +fn decode_and_print( bytes: &[u8], + decoder: impl FnOnce(&[u8]) -> Result, output_format: OutputFormat, ) -> Result<(), String> { - let item = T::from_ssz_bytes(bytes).map_err(|e| format!("SSZ decode failed: {:?}", e))?; + let item = decoder(bytes).map_err(|e| format!("SSZ decode failed: {e:?}"))?; match output_format { OutputFormat::Json => { println!( "{}", serde_json::to_string(&item) - .map_err(|e| format!("Unable to write object to JSON: {:?}", e))? + .map_err(|e| format!("Unable to write object to JSON: {e:?}"))? ); } OutputFormat::Yaml => { println!( "{}", serde_yaml::to_string(&item) - .map_err(|e| format!("Unable to write object to YAML: {:?}", e))? + .map_err(|e| format!("Unable to write object to YAML: {e:?}"))? ); } } diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 85705177dcb..23b0ae26206 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -73,9 +73,10 @@ use eth2::{ }; use eth2_network_config::Eth2NetworkConfig; use ssz::Encode; +use state_processing::state_advance::complete_state_advance; use state_processing::{ - block_signature_verifier::BlockSignatureVerifier, per_block_processing, per_slot_processing, - BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, + block_signature_verifier::BlockSignatureVerifier, per_block_processing, BlockSignatureStrategy, + ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, }; use std::borrow::Cow; use std::fs::File; @@ -332,10 +333,8 @@ fn do_transition( // Transition the parent state to the block slot. let t = Instant::now(); - for i in pre_state.slot().as_u64()..block.slot().as_u64() { - per_slot_processing(&mut pre_state, Some(state_root), spec) - .map_err(|e| format!("Failed to advance slot on iteration {}: {:?}", i, e))?; - } + complete_state_advance(&mut pre_state, Some(state_root), block.slot(), spec) + .map_err(|e| format!("Unable to perform complete advance: {e:?}"))?; debug!("Slot processing: {:?}", t.elapsed()); let t = Instant::now(); diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index 64111d56271..4b8357b993e 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -103,7 +103,7 @@ echo "executing: ./setup.sh >> $LOG_DIR/setup.log" ./setup.sh >> $LOG_DIR/setup.log 2>&1 # Update future hardforks time in the EL genesis file based on the CL genesis time -GENESIS_TIME=$(lcli pretty-ssz state_merge $TESTNET_DIR/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d') +GENESIS_TIME=$(lcli pretty-ssz --testnet-dir $TESTNET_DIR BeaconState $TESTNET_DIR/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d') echo $GENESIS_TIME CAPELLA_TIME=$((GENESIS_TIME + (CAPELLA_FORK_EPOCH * 32 * SECONDS_PER_SLOT))) echo $CAPELLA_TIME From b5337c0ea57adb30bd933ea2a7201fce12372f6f Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 31 Jul 2023 01:53:06 +0000 Subject: [PATCH 04/20] Fix incorrect ideal rewards calculation (#4520) ## Issue Addressed The PR fixes a bug where the the ideal rewards for source and head were incorrectly set. Output from testing a validator that performed optimally in a Phase 0 epoch , note the `source` and `target` under ideal rewards is incorrect (compared to the actual `total_rewards` below): ```json { "ideal_rewards": [ ... { "effective_balance": "32000000000", "head": "18771", "target": "18770", "source": "18729", "inclusion_delay": "17083", "inactivity": "0" } ], "total_rewards": [ { "validator_index": "0", "head": "18729", "target": "18770", "source": "18771", "inclusion_delay": "17083", "inactivity": "0" } ] ``` --- beacon_node/beacon_chain/src/attestation_rewards.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 9fc21b668fa..94bd28f98fd 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -330,7 +330,7 @@ impl BeaconChain { // compute ideal head rewards let head = get_attestation_component_delta( true, - total_balances.previous_epoch_attesters(), + total_balances.previous_epoch_head_attesters(), total_balances, base_reward, finality_delay, @@ -352,7 +352,7 @@ impl BeaconChain { // compute ideal source rewards let source = get_attestation_component_delta( true, - total_balances.previous_epoch_head_attesters(), + total_balances.previous_epoch_attesters(), total_balances, base_reward, finality_delay, From 117802cef130bedac96a0b5441bb46e6b03ad491 Mon Sep 17 00:00:00 2001 From: Gua00va Date: Mon, 31 Jul 2023 01:53:07 +0000 Subject: [PATCH 05/20] Add Eth Version Header (#4528) ## Issue Addressed Closes #4525 ## Proposed Changes `GET /eth/v1/validator/blinded_blocks` endpoint and `GET /eth/v1/validator/blocks` now send `Eth-Version` header. Co-authored-by: Gua00va <105484243+Gua00va@users.noreply.github.com> --- beacon_node/http_api/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 8c87c315ebe..b45c4285aac 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2697,6 +2697,7 @@ pub fn serve( fork_versioned_response(endpoint_version, fork_name, block) .map(|response| warp::reply::json(&response).into_response()) + .map(|res| add_consensus_version_header(res, fork_name)) }, ); @@ -2754,6 +2755,7 @@ pub fn serve( // Pose as a V2 endpoint so we return the fork `version`. fork_versioned_response(V2, fork_name, block) .map(|response| warp::reply::json(&response).into_response()) + .map(|res| add_consensus_version_header(res, fork_name)) }, ); From 8654f20028704512d472f96c42512cc3d206d319 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 31 Jul 2023 01:53:08 +0000 Subject: [PATCH 06/20] Development feature flag - Disable backfill (#4537) Often when testing I have to create a hack which is annoying to maintain. I think it might be handy to add a custom compile-time flag that developers can use if they want to test things locally without having to backfill a bunch of blocks. There is probably an argument to have a feature called "backfill" which is enabled by default and can be disabled. I didn't go this route because I think it's counter-intuitive to have a feature that enables a core and necessary behaviour. --- beacon_node/network/Cargo.toml | 6 +++++- beacon_node/network/src/service.rs | 6 ++++++ beacon_node/network/src/sync/manager.rs | 3 +++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index a5cc12bbc55..c37b0fa45d7 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -48,4 +48,8 @@ operation_pool = { path = "../operation_pool" } execution_layer = { path = "../execution_layer" } beacon_processor = { path = "../beacon_processor" } parking_lot = "0.12.0" -environment = { path = "../../lighthouse/environment" } \ No newline at end of file +environment = { path = "../../lighthouse/environment" } + +[features] +# NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill +disable-backfill = [] diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index c2719477f1f..b517d57df3b 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -232,6 +232,12 @@ impl NetworkService { // build the channels for external comms let (network_senders, network_recievers) = NetworkSenders::new(); + #[cfg(feature = "disable-backfill")] + warn!( + network_log, + "Backfill is disabled. DO NOT RUN IN PRODUCTION" + ); + // try and construct UPnP port mappings if required. if let Some(upnp_config) = crate::nat::UPnPConfig::from_config(config) { let upnp_log = network_log.new(o!("service" => "UPnP")); diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 72542752c51..670e88eac5e 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -395,6 +395,7 @@ impl SyncManager { // If we would otherwise be synced, first check if we need to perform or // complete a backfill sync. + #[cfg(not(feature = "disable_backfill"))] if matches!(sync_state, SyncState::Synced) { // Determine if we need to start/resume/restart a backfill sync. match self.backfill_sync.start(&mut self.network) { @@ -419,6 +420,7 @@ impl SyncManager { } Some((RangeSyncType::Finalized, start_slot, target_slot)) => { // If there is a backfill sync in progress pause it. + #[cfg(not(feature = "disable_backfill"))] self.backfill_sync.pause(); SyncState::SyncingFinalized { @@ -428,6 +430,7 @@ impl SyncManager { } Some((RangeSyncType::Head, start_slot, target_slot)) => { // If there is a backfill sync in progress pause it. + #[cfg(not(feature = "disable_backfill"))] self.backfill_sync.pause(); SyncState::SyncingHead { From e8c411c288e6d4d4726f123ac19a5942f7f1cf8c Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 31 Jul 2023 23:51:37 +0000 Subject: [PATCH 07/20] add ssz support in request body for /beacon/blocks endpoints (v1 & v2) (#4479) ## Issue Addressed [#4457](https://github.com/sigp/lighthouse/issues/4457) ## Proposed Changes add ssz support in request body for /beacon/blocks endpoints (v1 & v2) ## Additional Info --- Cargo.lock | 1 + beacon_node/http_api/Cargo.toml | 1 + beacon_node/http_api/src/lib.rs | 92 ++++++++++++++++++- .../tests/broadcast_validation_tests.rs | 42 +++++++++ beacon_node/http_api/tests/tests.rs | 55 +++++++++++ common/eth2/src/lib.rs | 87 +++++++++++++++++- 6 files changed, 276 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec8ec6f5fce..a7e83112310 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3524,6 +3524,7 @@ version = "0.1.0" dependencies = [ "beacon_chain", "bs58", + "bytes", "directory", "environment", "eth1", diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 2b117b26cef..4b4a28b51ec 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -42,6 +42,7 @@ operation_pool = { path = "../operation_pool" } sensitive_url = { path = "../../common/sensitive_url" } unused_port = {path = "../../common/unused_port"} store = { path = "../store" } +bytes = "1.1.0" [dev-dependencies] environment = { path = "../../lighthouse/environment" } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index b45c4285aac..7d1475809a7 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -29,6 +29,7 @@ use beacon_chain::{ BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped, }; pub use block_id::BlockId; +use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, @@ -1236,6 +1237,41 @@ pub fn serve( }, ); + let post_beacon_blocks_ssz = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::path::end()) + .and(warp::body::bytes()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .and_then( + |block_bytes: Bytes, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| async move { + let block = match SignedBeaconBlock::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) { + Ok(data) => data, + Err(e) => { + return Err(warp_utils::reject::custom_bad_request(format!("{:?}", e))) + } + }; + publish_blocks::publish_block( + None, + ProvenancedBlock::local(Arc::new(block)), + chain, + &network_tx, + log, + BroadcastValidation::default(), + ) + .await + .map(|()| warp::reply().into_response()) + }, + ); + let post_beacon_blocks_v2 = eth_v2 .and(warp::path("beacon")) .and(warp::path("blocks")) @@ -1274,6 +1310,57 @@ pub fn serve( }, ); + let post_beacon_blocks_v2_ssz = eth_v2 + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::body::bytes()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .then( + |validation_level: api_types::BroadcastValidationQuery, + block_bytes: Bytes, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| async move { + let block = match SignedBeaconBlock::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) { + Ok(data) => data, + Err(_) => { + return warp::reply::with_status( + StatusCode::BAD_REQUEST, + eth2::StatusCode::BAD_REQUEST, + ) + .into_response(); + } + }; + match publish_blocks::publish_block( + None, + ProvenancedBlock::local(Arc::new(block)), + chain, + &network_tx, + log, + validation_level.broadcast_validation, + ) + .await + { + Ok(()) => warp::reply().into_response(), + Err(e) => match warp_utils::reject::handle_rejection(e).await { + Ok(reply) => reply.into_response(), + Err(_) => warp::reply::with_status( + StatusCode::INTERNAL_SERVER_ERROR, + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(), + }, + } + }, + ); + /* * beacon/blocks */ @@ -3984,7 +4071,10 @@ pub fn serve( .boxed() .uor( warp::post().and( - post_beacon_blocks + warp::header::exact("Content-Type", "application/octet-stream") + // Routes which expect `application/octet-stream` go within this `and`. + .and(post_beacon_blocks_ssz.uor(post_beacon_blocks_v2_ssz)) + .uor(post_beacon_blocks) .uor(post_beacon_blinded_blocks) .uor(post_beacon_blocks_v2) .uor(post_beacon_blinded_blocks_v2) diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 4819dd99e7a..457276d7023 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -175,6 +175,48 @@ pub async fn gossip_full_pass() { .block_is_known_to_fork_choice(&block.canonical_root())); } +// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn gossip_full_pass_ssz() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2_ssz(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn consensus_invalid() { diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index dc8ca49d203..7c3872925a3 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1247,6 +1247,22 @@ impl ApiTester { self } + pub async fn test_post_beacon_blocks_ssz_valid(mut self) -> Self { + let next_block = &self.next_block; + + self.client + .post_beacon_blocks_ssz(next_block) + .await + .unwrap(); + + assert!( + self.network_rx.network_recv.recv().await.is_some(), + "valid blocks should be sent to network" + ); + + self + } + pub async fn test_post_beacon_blocks_invalid(mut self) -> Self { let block = self .harness @@ -1270,6 +1286,29 @@ impl ApiTester { self } + pub async fn test_post_beacon_blocks_ssz_invalid(mut self) -> Self { + let block = self + .harness + .make_block_with_modifier( + self.harness.get_current_state(), + self.harness.get_current_slot(), + |b| { + *b.state_root_mut() = Hash256::zero(); + }, + ) + .await + .0; + + assert!(self.client.post_beacon_blocks_ssz(&block).await.is_err()); + + assert!( + self.network_rx.network_recv.recv().await.is_some(), + "gossip valid blocks should be sent to network" + ); + + self + } + pub async fn test_beacon_blocks(self) -> Self { for block_id in self.interesting_block_ids() { let expected = block_id @@ -4451,6 +4490,22 @@ async fn post_beacon_blocks_valid() { ApiTester::new().await.test_post_beacon_blocks_valid().await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_beacon_blocks_ssz_valid() { + ApiTester::new() + .await + .test_post_beacon_blocks_ssz_valid() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_post_beacon_blocks_ssz_invalid() { + ApiTester::new() + .await + .test_post_beacon_blocks_ssz_invalid() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_beacon_blocks_invalid() { ApiTester::new() diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 5fcddbc46d6..661f9a09eb3 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -21,10 +21,14 @@ use futures_util::StreamExt; use lighthouse_network::PeerId; use pretty_reqwest_error::PrettyReqwestError; pub use reqwest; -use reqwest::{IntoUrl, RequestBuilder, Response}; +use reqwest::{ + header::{HeaderMap, HeaderValue}, + Body, IntoUrl, RequestBuilder, Response, +}; pub use reqwest::{StatusCode, Url}; pub use sensitive_url::{SensitiveError, SensitiveUrl}; use serde::{de::DeserializeOwned, Serialize}; +use ssz::Encode; use std::convert::TryFrom; use std::fmt; use std::iter::Iterator; @@ -322,6 +326,25 @@ impl BeaconNodeHttpClient { ok_or_error(response).await } + /// Generic POST function supporting arbitrary responses and timeouts. + async fn post_generic_with_ssz_body, U: IntoUrl>( + &self, + url: U, + body: T, + timeout: Option, + ) -> Result { + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder + .header("Content-Type", "application/octet-stream") + .body(body) + .send() + .await?; + ok_or_error(response).await + } + /// Generic POST function supporting arbitrary responses and timeouts. async fn post_generic_with_consensus_version( &self, @@ -342,6 +365,31 @@ impl BeaconNodeHttpClient { ok_or_error(response).await } + /// Generic POST function supporting arbitrary responses and timeouts. + async fn post_generic_with_consensus_version_and_ssz_body, U: IntoUrl>( + &self, + url: U, + body: T, + timeout: Option, + fork: ForkName, + ) -> Result { + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let mut headers = HeaderMap::new(); + headers.insert( + CONSENSUS_VERSION_HEADER, + HeaderValue::from_str(&fork.to_string()).expect("Failed to create header value"), + ); + headers.insert( + "Content-Type", + HeaderValue::from_static("application/octet-stream"), + ); + let response = builder.headers(headers).body(body).send().await?; + ok_or_error(response).await + } + /// `GET beacon/genesis` /// /// ## Errors @@ -654,6 +702,26 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST beacon/blocks` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn post_beacon_blocks_ssz>( + &self, + block: &SignedBeaconBlock, + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks"); + + self.post_generic_with_ssz_body(path, block.as_ssz_bytes(), Some(self.timeouts.proposal)) + .await?; + + Ok(()) + } + /// `POST beacon/blinded_blocks` /// /// Returns `Ok(None)` on a 404 error. @@ -727,6 +795,23 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST v2/beacon/blocks` + pub async fn post_beacon_blocks_v2_ssz>( + &self, + block: &SignedBeaconBlock, + validation_level: Option, + ) -> Result<(), Error> { + self.post_generic_with_consensus_version_and_ssz_body( + self.post_beacon_blocks_v2_path(validation_level)?, + block.as_ssz_bytes(), + Some(self.timeouts.proposal), + block.message().body().fork_name(), + ) + .await?; + + Ok(()) + } + /// `POST v2/beacon/blinded_blocks` pub async fn post_beacon_blinded_blocks_v2( &self, From cb275e746dce455d473bef4d653fe85748477718 Mon Sep 17 00:00:00 2001 From: chonghe Date: Mon, 31 Jul 2023 23:51:38 +0000 Subject: [PATCH 08/20] Update Lighthouse book FAQ (#4510) Some updates in the FAQ based on issues seen on Discord. Additionally, corrected the disk usage on the default SPRP as the previously provided value is not correct. Co-authored-by: chonghe <44791194+chong-he@users.noreply.github.com> --- book/src/advanced_database.md | 4 +-- book/src/advanced_networking.md | 2 +- book/src/builders.md | 2 ++ book/src/faq.md | 51 +++++++++++++++++++++++++++++++-- 4 files changed, 53 insertions(+), 6 deletions(-) diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index d9511040549..20c5d7443b1 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -28,8 +28,8 @@ some example values. | Research | 32 | 3.4 TB | 155 ms | | Block explorer/analysis | 128 | 851 GB | 620 ms | | Enthusiast (prev. default) | 2048 | 53.6 GB | 10.2 s | -| Hobbyist | 4096 | 26.8 GB | 20.5 s | -| Validator only (default) | 8192 | 8.1 GB | 41 s | +| Hobbyist | 4096 | 26.8 GB | 20.5 s | +| Validator only (default) | 8192 | 12.7 GB | 41 s | *Last update: May 2023. diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index 586503cb96c..ba07a6f87f9 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -172,7 +172,7 @@ In order to do so, lighthouse provides the following CLI options/parameters. advertises some address, must be reachable both over UDP and TCP. -In the general case, an user will not require to set these explicitly. Update +In the general case, a user will not require to set these explicitly. Update these options only if you can guarantee your node is reachable with these values. diff --git a/book/src/builders.md b/book/src/builders.md index 6db360d70e7..2be4841ddf3 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -201,6 +201,8 @@ else: use local payload ``` +If you would like to always use the builder payload, you can add the flag `--always-prefer-builder-payload` to the beacon node. + ## Checking your builder config You can check that your builder is configured correctly by looking for these log messages. diff --git a/book/src/faq.md b/book/src/faq.md index d3e25438a79..15c5757064f 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -10,6 +10,8 @@ - [My beacon node logs `WARN BlockProcessingFailure outcome: MissingBeaconBlock`, what should I do?](#bn-missing-beacon) - [After checkpoint sync, the progress of `downloading historical blocks` is slow. Why?](#bn-download-slow) - [My beacon node logs `WARN Error processing HTTP API request`, what should I do?](#bn-http) +- [My beacon node logs `WARN Error signalling fork choice waiter`, what should I do?](#bn-fork-choice) +- [My beacon node logs `ERRO Aggregate attestation queue full`, what should I do?](#bn-queue-full) ## [Validator](#validator-1) - [Why does it take so long for a validator to be activated?](#vc-activation) @@ -30,7 +32,7 @@ - [My beacon node and validator client are on different servers. How can I point the validator client to the beacon node?](#net-bn-vc) - [Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address?](#net-ip) - [How to change the TCP/UDP port 9000 that Lighthouse listens on?](#net-port) - +- [Lighthouse `v4.3.0` introduces a change where a node will subscribe to only 2 subnets in total. I am worried that this will impact my validators return.](#net-subnet) ## [Miscellaneous](#miscellaneous-1) - [What should I do if I lose my slashing protection database?](#misc-slashing) @@ -74,7 +76,7 @@ The `WARN Execution engine called failed` log is shown when the beacon node cann `error: Reqwest(reqwest::Error { kind: Request, url: Url { scheme: "http", cannot_be_a_base: false, username: "", password: None, host: Some(Ipv4(127.0.0.1)), port: Some(8551), path: "/", query: None, fragment: None }, source: TimedOut }), service: exec` -which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. One option is to add the flag `--execution-timeout-multiplier 3` to the beacon node. However, if the error persists, it is worth digging further to find out the cause. There are a few reasons why this can occur: +which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. One option is to add the flags `--execution-timeout-multiplier 3` and `--disable-lock-timeouts` to the beacon node. However, if the error persists, it is worth digging further to find out the cause. There are a few reasons why this can occur: 1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced. 1. The computer is overloaded. Check the CPU and RAM usage to see if it has overloaded. You can use `htop` to check for CPU and RAM usage. 1. Your SSD is slow. Check if your SSD is in "The Bad" list [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). If your SSD is in "The Bad" list, it means it cannot keep in sync to the network and you may want to consider upgrading to a better SSD. @@ -170,6 +172,27 @@ ERRO Failed to download attester duties err: FailedToDownloadAttesters("Som This means that the validator client is sending requests to the beacon node. However, as the beacon node is still syncing, it is therefore unable to fulfil the request. The error will disappear once the beacon node is synced. +### My beacon node logs `WARN Error signalling fork choice waiter`, what should I do? + +An example of the full log is shown below: + +``` +WARN Error signalling fork choice waiter slot: 6763073, error: ForkChoiceSignalOutOfOrder { current: Slot(6763074), latest: Slot(6763073) }, service: state_advance +``` + +This suggests that the computer resources are being overwhelmed. It could be due to high CPU usage or high disk I/O usage. This can happen, e.g., when the beacon node is downloading historical blocks, or when the execution client is syncing. The error will disappear when the resources used return to normal or when the node is synced. + + +### My beacon node logs `ERRO Aggregate attestation queue full`, what should I do? + +An example of the full log is shown below: +``` +ERRO Aggregate attestation queue full, queue_len: 4096, msg: the system has insufficient resources for load, module: network::beacon_processor:1542 +``` + +This suggests that the computer resources are being overwhelmed. It could be due to high CPU usage or high disk I/O usage. This can happen, e.g., when the beacon node is downloading historical blocks, or when the execution client is syncing. The error will disappear when the resources used return to normal or when the node is synced. + + ## Validator ### Why does it take so long for a validator to be activated? @@ -279,12 +302,26 @@ The first thing is to ensure both consensus and execution clients are synced wit - the internet is working well - you have sufficient peers -You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). Once the above points are good, missing attestation should be a rare occurrence. +You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). + +Another cause for missing attestations is delays during block processing. When this happens, the debug logs will show (debug logs can be found under `$datadir/beacon/logs`): + +``` +DEBG Delayed head block set_as_head_delay: Some(93.579425ms), imported_delay: Some(1.460405278s), observed_delay: Some(2.540811921s), block_delay: 4.094796624s, slot: 6837344, proposer_index: 211108, block_root: 0x2c52231c0a5a117401f5231585de8aa5dd963bc7cbc00c544e681342eedd1700, service: beacon +``` + +The fields to look for are `imported_delay > 1s` and `observed_delay < 3s`. The `imported_delay` is how long the node took to process the block. The `imported_delay` of larger than 1 second suggests that there is slowness in processing the block. It could be due to high CPU usage, high I/O disk usage or the clients are doing some background maintenance processes. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). The `observed_delay` of less than 3 seconds means that the block is not arriving late from the block proposer. Combining the above, this implies that the validator should have been able to attest to the block, but failed due to slowness in the node processing the block. + ### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? In general, it is unavoidable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone elses performance. +You could also check for the sync aggregate participation percentage on block explorers such as [beaconcha.in](https://beaconcha.in/). A low sync aggregate participation percentage (e.g., 60-70%) indicates that the block that you are assigned to attest to may be published late. As a result, your validator fails to correctly attest to the block. + +Another possible reason for missing the head vote is due to a chain "reorg". A reorg can happen if the proposer publishes block `n` late, and the proposer of block `n+1` builds upon block `n-1` instead of `n`. This is called a "reorg". Due to the reorg, block `n` was never included in the chain. If you are assigned to attest at slot `n`, it is possible you may still attest to block `n` despite most of the network recognizing the block as being late. In this case you will miss the head reward. + + ### Can I submit a voluntary exit message without running a beacon node? Yes. Beaconcha.in provides the tool to broadcast the message. You can create the voluntary exit message file with [ethdo](https://github.com/wealdtech/ethdo/releases/tag/v1.30.0) and submit the message via the [beaconcha.in](https://beaconcha.in/tools/broadcast) website. A guide on how to use `ethdo` to perform voluntary exit can be found [here](https://github.com/eth-educators/ethstaker-guides/blob/main/voluntary-exit.md). @@ -425,6 +462,14 @@ No. Lighthouse will auto-detect the change and update your Ethereum Node Record ### How to change the TCP/UDP port 9000 that Lighthouse listens on? Use the flag ```--port ``` in the beacon node. This flag can be useful when you are running two beacon nodes at the same time. You can leave one beacon node as the default port 9000, and configure the second beacon node to listen on, e.g., ```--port 9001```. +### Lighthouse `v4.3.0` introduces a change where a node will subscribe to only 2 subnets in total. I am worried that this will impact my validators return. + +Previously, having more validators means subscribing to more subnets. Since the change, a node will now only subscribe to 2 subnets in total. This will bring about significant reductions in bandwidth for nodes with multiple validators. + +While subscribing to more subnets can ensure you have peers on a wider range of subnets, these subscriptions consume resources and bandwidth. This does not significantly increase the performance of the node, however it does benefit other nodes on the network. + +If you would still like to subscribe to all subnets, you can use the flag `subscribe-all-subnets`. This may improve the block rewards by 1-5%, though it comes at the cost of a much higher bandwidth requirement. + ## Miscellaneous ### What should I do if I lose my slashing protection database? From 73764d0dd2c0a457b7f438f003b4dbc6b94a65e3 Mon Sep 17 00:00:00 2001 From: Gua00va Date: Mon, 31 Jul 2023 23:51:39 +0000 Subject: [PATCH 09/20] Deprecate `exchangeTransitionConfiguration` functionality (#4517) ## Issue Addressed Solves #4442 ## Proposed Changes EL clients log errors if we don't query this endpoint, but they are making releases that remove this error logging. After those are out we can stop calling it, after which point EL teams will remove the endpoint entirely. Refer https://hackmd.io/@n0ble/deprecate-exchgTC --- .../beacon_chain/src/merge_readiness.rs | 17 ----- beacon_node/client/src/builder.rs | 3 - beacon_node/client/src/notifier.rs | 8 --- beacon_node/execution_layer/src/engine_api.rs | 11 +-- .../execution_layer/src/engine_api/http.rs | 26 ------- beacon_node/execution_layer/src/lib.rs | 67 ------------------- .../src/test_utils/handle_rpc.rs | 9 --- .../execution_layer/src/test_utils/mod.rs | 1 - .../src/test_rig.rs | 10 --- 9 files changed, 3 insertions(+), 149 deletions(-) diff --git a/beacon_node/beacon_chain/src/merge_readiness.rs b/beacon_node/beacon_chain/src/merge_readiness.rs index c66df39eedf..bfc2b36fdb2 100644 --- a/beacon_node/beacon_chain/src/merge_readiness.rs +++ b/beacon_node/beacon_chain/src/merge_readiness.rs @@ -86,9 +86,6 @@ pub enum MergeReadiness { #[serde(serialize_with = "serialize_uint256")] current_difficulty: Option, }, - /// The transition configuration with the EL failed, there might be a problem with - /// connectivity, authentication or a difference in configuration. - ExchangeTransitionConfigurationFailed { error: String }, /// The EL can be reached and has the correct configuration, however it's not yet synced. NotSynced, /// The user has not configured this node to use an execution endpoint. @@ -109,12 +106,6 @@ impl fmt::Display for MergeReadiness { params, current_difficulty ) } - MergeReadiness::ExchangeTransitionConfigurationFailed { error } => write!( - f, - "Could not confirm the transition configuration with the \ - execution endpoint: {:?}", - error - ), MergeReadiness::NotSynced => write!( f, "The execution endpoint is connected and configured, \ @@ -155,14 +146,6 @@ impl BeaconChain { /// Attempts to connect to the EL and confirm that it is ready for the merge. pub async fn check_merge_readiness(&self) -> MergeReadiness { if let Some(el) = self.execution_layer.as_ref() { - if let Err(e) = el.exchange_transition_configuration(&self.spec).await { - // The EL was either unreachable, responded with an error or has a different - // configuration. - return MergeReadiness::ExchangeTransitionConfigurationFailed { - error: format!("{:?}", e), - }; - } - if !el.is_synced_for_notifier().await { // The EL is not synced. return MergeReadiness::NotSynced; diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index b1a507eaa59..14edbb97309 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -850,9 +850,6 @@ where execution_layer.spawn_clean_proposer_caches_routine::( beacon_chain.slot_clock.clone(), ); - - // Spawns a routine that polls the `exchange_transition_configuration` endpoint. - execution_layer.spawn_transition_configuration_poll(beacon_chain.spec.clone()); } // Spawn a service to publish BLS to execution changes at the Capella fork. diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1ff469fe300..7d81594ee62 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -404,14 +404,6 @@ async fn merge_readiness_logging( "config" => ?other ), }, - readiness @ MergeReadiness::ExchangeTransitionConfigurationFailed { error: _ } => { - error!( - log, - "Not ready for merge"; - "info" => %readiness, - "hint" => "try updating Lighthouse and/or the execution layer", - ) - } readiness @ MergeReadiness::NotSynced => warn!( log, "Not ready for merge"; diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 826294d5ff6..359dcb52239 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,9 +1,8 @@ use crate::engines::ForkchoiceState; use crate::http::{ - ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ENGINE_FORKCHOICE_UPDATED_V1, - ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, - ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, + ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, + ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, + ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, }; use eth2::types::{SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2}; pub use ethers_core::types::Transaction; @@ -450,7 +449,6 @@ pub struct EngineCapabilities { pub get_payload_bodies_by_range_v1: bool, pub get_payload_v1: bool, pub get_payload_v2: bool, - pub exchange_transition_configuration_v1: bool, } impl EngineCapabilities { @@ -480,9 +478,6 @@ impl EngineCapabilities { if self.get_payload_v2 { response.push(ENGINE_GET_PAYLOAD_V2); } - if self.exchange_transition_configuration_v1 { - response.push(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1); - } response } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 029866d95b5..0ce03e60294 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -46,10 +46,6 @@ pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesB pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1: &str = "engine_getPayloadBodiesByRangeV1"; pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10); -pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = - "engine_exchangeTransitionConfigurationV1"; -pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_secs(1); - pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1); @@ -68,7 +64,6 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, - ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ]; /// This is necessary because a user might run a capella-enabled version of @@ -83,7 +78,6 @@ pub static PRE_CAPELLA_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilit get_payload_bodies_by_range_v1: false, get_payload_v1: true, get_payload_v2: false, - exchange_transition_configuration_v1: true, }; /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. @@ -934,24 +928,6 @@ impl HttpJsonRpc { .collect()) } - pub async fn exchange_transition_configuration_v1( - &self, - transition_configuration: TransitionConfigurationV1, - ) -> Result { - let params = json!([transition_configuration]); - - let response = self - .rpc_request( - ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, - params, - ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT - * self.execution_timeout_multiplier, - ) - .await?; - - Ok(response) - } - pub async fn exchange_capabilities(&self) -> Result { let params = json!([LIGHTHOUSE_CAPABILITIES]); @@ -982,8 +958,6 @@ impl HttpJsonRpc { .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1), get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), - exchange_transition_configuration_v1: capabilities - .contains(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1), }), } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index d72686baf55..579bebdacba 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -74,8 +74,6 @@ const EXECUTION_BLOCKS_LRU_CACHE_SIZE: usize = 128; const DEFAULT_SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; -const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60); - /// A payload alongside some information about where it came from. pub enum ProvenancedPayload

{ /// A good ol' fashioned farm-to-table payload from your local EE. @@ -502,24 +500,6 @@ impl ExecutionLayer { self.spawn(preparation_cleaner, "exec_preparation_cleanup"); } - /// Spawns a routine that polls the `exchange_transition_configuration` endpoint. - pub fn spawn_transition_configuration_poll(&self, spec: ChainSpec) { - let routine = |el: ExecutionLayer| async move { - loop { - if let Err(e) = el.exchange_transition_configuration(&spec).await { - error!( - el.log(), - "Failed to check transition config"; - "error" => ?e - ); - } - sleep(CONFIG_POLL_INTERVAL).await; - } - }; - - self.spawn(routine, "exec_config_poll"); - } - /// Returns `true` if the execution engine is synced and reachable. pub async fn is_synced(&self) -> bool { self.engine().is_synced().await @@ -1318,53 +1298,6 @@ impl ExecutionLayer { .map_err(Error::EngineError) } - pub async fn exchange_transition_configuration(&self, spec: &ChainSpec) -> Result<(), Error> { - let local = TransitionConfigurationV1 { - terminal_total_difficulty: spec.terminal_total_difficulty, - terminal_block_hash: spec.terminal_block_hash, - terminal_block_number: 0, - }; - - let result = self - .engine() - .request(|engine| engine.api.exchange_transition_configuration_v1(local)) - .await; - - match result { - Ok(remote) => { - if local.terminal_total_difficulty != remote.terminal_total_difficulty - || local.terminal_block_hash != remote.terminal_block_hash - { - error!( - self.log(), - "Execution client config mismatch"; - "msg" => "ensure lighthouse and the execution client are up-to-date and \ - configured consistently", - "remote" => ?remote, - "local" => ?local, - ); - Err(Error::EngineError(Box::new(EngineError::Api { - error: ApiError::TransitionConfigurationMismatch, - }))) - } else { - debug!( - self.log(), - "Execution client config is OK"; - ); - Ok(()) - } - } - Err(e) => { - error!( - self.log(), - "Unable to get transition config"; - "error" => ?e, - ); - Err(Error::EngineError(Box::new(e))) - } - } - } - /// Returns the execution engine capabilities resulting from a call to /// engine_exchangeCapabilities. If the capabilities cache is not populated, /// or if it is populated with a cached result of age >= `age_limit`, this diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 79468b21169..62cab5ad285 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -357,15 +357,6 @@ pub async fn handle_rpc( Ok(serde_json::to_value(response).unwrap()) } - ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1 => { - let block_generator = ctx.execution_block_generator.read(); - let transition_config: TransitionConfigurationV1 = TransitionConfigurationV1 { - terminal_total_difficulty: block_generator.terminal_total_difficulty, - terminal_block_hash: block_generator.terminal_block_hash, - terminal_block_number: block_generator.terminal_block_number, - }; - Ok(serde_json::to_value(transition_config).unwrap()) - } ENGINE_EXCHANGE_CAPABILITIES => { let engine_capabilities = ctx.engine_capabilities.read(); Ok(serde_json::to_value(engine_capabilities.to_response()).unwrap()) diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index a8e7bab270a..99d264aa7b8 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -43,7 +43,6 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { get_payload_bodies_by_range_v1: true, get_payload_v1: true, get_payload_v2: true, - exchange_transition_configuration_v1: true, }; mod execution_block_generator; diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 726019a8480..654b8628b8f 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -204,16 +204,6 @@ impl TestRig { let account1 = ethers_core::types::Address::from_slice(&hex::decode(ACCOUNT1).unwrap()); let account2 = ethers_core::types::Address::from_slice(&hex::decode(ACCOUNT2).unwrap()); - /* - * Check the transition config endpoint. - */ - for ee in [&self.ee_a, &self.ee_b] { - ee.execution_layer - .exchange_transition_configuration(&self.spec) - .await - .unwrap(); - } - /* * Read the terminal block hash from both pairs, check it's equal. */ From ff9b09d9646b712b2fd9fe26feeed5758daa0aa6 Mon Sep 17 00:00:00 2001 From: Divma Date: Wed, 2 Aug 2023 00:59:34 +0000 Subject: [PATCH 10/20] upgrade to libp2p 0.52 (#4431) ## Issue Addressed Upgrade libp2p to v0.52 ## Proposed Changes - **Workflows**: remove installation of `protoc` - **Book**: remove installation of `protoc` - **`Dockerfile`s and `cross`**: remove custom base `Dockerfile` for cross since it's no longer needed. Remove `protoc` from remaining `Dockerfiles`s - **Upgrade `discv5` to `v0.3.1`:** we have some cool stuff in there: no longer needs `protoc` and faster ip updates on cold start - **Upgrade `prometheus` to `0.21.0`**, now it no longer needs encoding checks - **things that look like refactors:** bunch of api types were renamed and need to be accessed in a different (clearer) way - **Lighthouse network** - connection limits is now a behaviour - banned peers no longer exist on the swarm level, but at the behaviour level - `connection_event_buffer_size` now is handled per connection with a buffer size of 4 - `mplex` is deprecated and was removed - rpc handler now logs the peer to which it belongs ## Additional Info Tried to keep as much behaviour unchanged as possible. However, there is a great deal of improvements we can do _after_ this upgrade: - Smart connection limits: Connection limits have been checked only based on numbers, we can now use information about the incoming peer to decide if we want it - More powerful peer management: Dial attempts from other behaviours can be rejected early - Incoming connections can be rejected early - Banning can be returned exclusively to the peer management: We should not get connections to banned peers anymore making use of this - TCP Nat updates: We might be able to take advantage of confirmed external addresses to check out tcp ports/ips Co-authored-by: Age Manning Co-authored-by: Akihito Nakano --- .github/workflows/local-testnet.yml | 4 - .github/workflows/release.yml | 9 - .github/workflows/test-suite.yml | 68 +- Cargo.lock | 2694 ++++++----------- Cross.toml | 4 +- Dockerfile | 4 +- beacon_node/http_api/src/test_utils.rs | 11 +- beacon_node/http_metrics/src/metrics.rs | 8 +- beacon_node/lighthouse_network/Cargo.toml | 8 +- beacon_node/lighthouse_network/src/config.rs | 25 +- .../lighthouse_network/src/discovery/enr.rs | 4 +- .../src/discovery/enr_ext.rs | 100 +- .../lighthouse_network/src/discovery/mod.rs | 88 +- .../src/peer_manager/mod.rs | 3 +- .../src/peer_manager/network_behaviour.rs | 74 +- .../lighthouse_network/src/rpc/handler.rs | 417 +-- beacon_node/lighthouse_network/src/rpc/mod.rs | 141 +- .../lighthouse_network/src/rpc/protocol.rs | 14 +- .../src/rpc/self_limiter.rs | 4 +- .../src/service/api_types.rs | 2 +- .../src/service/behaviour.rs | 24 +- .../service/gossipsub_scoring_parameters.rs | 3 +- .../lighthouse_network/src/service/mod.rs | 170 +- .../lighthouse_network/src/service/utils.rs | 102 +- .../lighthouse_network/src/types/pubsub.rs | 10 +- .../lighthouse_network/tests/common.rs | 4 +- beacon_node/network/src/service.rs | 6 +- book/src/installation-source.md | 13 +- book/src/pi.md | 2 +- book/src/setup.md | 2 - boot_node/src/config.rs | 2 +- common/eth2_network_config/Cargo.toml | 2 +- lcli/Dockerfile | 4 +- scripts/cross/Dockerfile | 14 - testing/antithesis/Dockerfile.libvoidstar | 4 +- 35 files changed, 1615 insertions(+), 2429 deletions(-) delete mode 100644 scripts/cross/Dockerfile diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 9223c40e15d..ea4c1e24887 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -21,10 +21,6 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install geth (ubuntu) if: matrix.os == 'ubuntu-22.04' run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 81421844157..30e4211b88c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -79,15 +79,6 @@ jobs: if: startsWith(matrix.arch, 'x86_64-windows') run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - # ============================== - # Windows & Mac dependencies - # ============================== - - name: Install Protoc - if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows') - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - # ============================== # Builds # ============================== diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index ff7a9cf2f10..ab31b3a92bc 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -60,10 +60,6 @@ jobs: - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 - name: Run tests in release @@ -83,7 +79,7 @@ jobs: node-version: '14' - name: Install windows build tools run: | - choco install python protoc visualstudio2019-workload-vctools -y + choco install python visualstudio2019-workload-vctools -y npm config set msvs_version 2019 - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 @@ -108,10 +104,6 @@ jobs: - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run beacon_chain tests for all known forks run: make test-beacon-chain op-pool-tests: @@ -122,10 +114,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run operation_pool tests for all known forks run: make test-op-pool slasher-tests: @@ -148,10 +136,6 @@ jobs: - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 - name: Run tests in debug @@ -164,10 +148,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run state_transition_vectors in release. run: make run-state-transition-tests ef-tests-ubuntu: @@ -180,10 +160,6 @@ jobs: - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run consensus-spec-tests with blst, milagro and fake_crypto run: make test-ef dockerfile-ubuntu: @@ -206,10 +182,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 - name: Run the beacon chain sim that starts from an eth1 contract @@ -222,10 +194,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 - name: Run the beacon chain sim and go through the merge transition @@ -238,10 +206,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run the beacon chain sim without an eth1 connection run: cargo run --release --bin simulator no-eth1-sim syncing-simulator-ubuntu: @@ -252,10 +216,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 - name: Run the syncing simulator @@ -268,10 +228,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install geth run: | sudo add-apt-repository -y ppa:ethereum/ethereum @@ -303,10 +259,6 @@ jobs: dotnet-version: '6.0.201' - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run exec engine integration tests in release run: make test-exec-engine check-benchmarks: @@ -317,10 +269,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Typecheck benchmark code without running it run: make check-benches clippy: @@ -331,10 +279,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Lint code for quality and style with Clippy run: make lint - name: Certify Cargo.lock freshness @@ -347,10 +291,6 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run cargo check run: cargo check --workspace arbitrary-check: @@ -389,10 +329,6 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust (${{ env.PINNED_NIGHTLY }}) run: rustup toolchain install $PINNED_NIGHTLY - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install cargo-udeps run: cargo install cargo-udeps --locked --force - name: Create Cargo config dir @@ -410,7 +346,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install dependencies - run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler + run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang - name: Use Rust beta run: rustup override set beta - name: Run make diff --git a/Cargo.lock b/Cargo.lock index a7e83112310..b2e8188eec4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -60,9 +60,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" dependencies = [ "gimli", ] @@ -79,15 +79,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" -[[package]] -name = "aead" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" -dependencies = [ - "generic-array", -] - [[package]] name = "aead" version = "0.4.3" @@ -95,28 +86,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" dependencies = [ "generic-array", - "rand_core 0.6.4", -] - -[[package]] -name = "aead" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" -dependencies = [ - "crypto-common", - "generic-array", -] - -[[package]] -name = "aes" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" -dependencies = [ - "aes-soft", - "aesni", - "cipher 0.2.5", ] [[package]] @@ -126,78 +95,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ "cfg-if", - "cipher 0.3.0", + "cipher", "cpufeatures", - "ctr 0.8.0", + "ctr", "opaque-debug", ] -[[package]] -name = "aes" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" -dependencies = [ - "cfg-if", - "cipher 0.4.4", - "cpufeatures", -] - [[package]] name = "aes-gcm" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "cipher 0.3.0", - "ctr 0.8.0", - "ghash 0.4.4", - "subtle", -] - -[[package]] -name = "aes-gcm" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "209b47e8954a928e1d72e86eca7000ebb6655fe1436d33eefc2201cad027e237" -dependencies = [ - "aead 0.5.2", - "aes 0.8.2", - "cipher 0.4.4", - "ctr 0.9.2", - "ghash 0.5.0", + "aead", + "aes", + "cipher", + "ctr", + "ghash", "subtle", ] -[[package]] -name = "aes-soft" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" -dependencies = [ - "cipher 0.2.5", - "opaque-debug", -] - -[[package]] -name = "aesni" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" -dependencies = [ - "cipher 0.2.5", - "opaque-debug", -] - [[package]] name = "ahash" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "once_cell", "version_check", ] @@ -215,18 +139,30 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + [[package]] name = "amcl" version = "0.3.0" source = "git+https://github.com/sigp/milagro_bls?tag=v1.4.2#16655aa033175a90c10ef02aa144e2835de23aec" +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -256,9 +192,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.71" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" +checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" [[package]] name = "arbitrary" @@ -276,83 +212,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] -name = "arrayref" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" - -[[package]] -name = "arrayvec" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" - -[[package]] -name = "asn1-rs" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ff05a702273012438132f449575dbc804e27b2f3cbe3069aa237d26c98fa33" -dependencies = [ - "asn1-rs-derive 0.1.0", - "asn1-rs-impl", - "displaydoc", - "nom 7.1.3", - "num-traits", - "rusticata-macros", - "thiserror", - "time 0.3.21", -] - -[[package]] -name = "asn1-rs" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" -dependencies = [ - "asn1-rs-derive 0.4.0", - "asn1-rs-impl", - "displaydoc", - "nom 7.1.3", - "num-traits", - "rusticata-macros", - "thiserror", - "time 0.3.21", -] - -[[package]] -name = "asn1-rs-derive" -version = "0.1.0" +name = "array-init" +version = "0.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8b7511298d5b7784b40b092d9e9dcd3a627a5707e4b5e507931ab0d44eeebf" +checksum = "23589ecb866b460d3a0f1278834750268c607e8e28a1b982c907219f3178cd72" dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", + "nodrop", ] [[package]] -name = "asn1-rs-derive" -version = "0.4.0" +name = "arrayref" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", -] +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] -name = "asn1-rs-impl" -version = "0.1.0" +name = "arrayvec" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "asn1_der" @@ -374,7 +252,7 @@ dependencies = [ "log", "parking", "polling", - "rustix", + "rustix 0.37.23", "slab", "socket2 0.4.9", "waker-fn", @@ -397,7 +275,7 @@ checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", ] [[package]] @@ -408,18 +286,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] name = "async-trait" -version = "0.1.68" +version = "0.1.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" +checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] @@ -435,23 +313,17 @@ dependencies = [ [[package]] name = "asynchronous-codec" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06a0daa378f5fd10634e44b0a29b2a87b890657658e072a30d6f26e57ddee182" +checksum = "4057f2c32adbb2fc158e22fb38433c8e9bbf76b75a4732c7c0cbaf695fb65568" dependencies = [ "bytes", "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", ] -[[package]] -name = "atomic-waker" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" - [[package]] name = "attohttpc" version = "0.16.3" @@ -504,13 +376,13 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.18" +version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" +checksum = "a6a1de45611fdb535bfde7b7de4fd54f4fd2b17b1737c0a59b69bf9b92074b8c" dependencies = [ "async-trait", "axum-core", - "bitflags", + "bitflags 1.3.2", "bytes", "futures-util", "http", @@ -521,7 +393,7 @@ dependencies = [ "memchr", "mime", "percent-encoding", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "rustversion", "serde", "serde_json", @@ -553,15 +425,15 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" +checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" dependencies = [ "addr2line", "cc", "cfg-if", "libc", - "miniz_oxide 0.6.2", + "miniz_oxide", "object", "rustc-demangle", ] @@ -592,9 +464,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f1e31e207a6b8fb791a38ea3105e6cb541f55e4d029902d3039a4ad07cc4105" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" @@ -663,7 +535,7 @@ dependencies = [ "slog", "sloggers", "slot_clock", - "smallvec", + "smallvec 1.11.0", "ssz_types", "state_processing", "store", @@ -752,7 +624,7 @@ version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr", "clang-sys", "lazy_static", @@ -771,6 +643,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" + [[package]] name = "bitvec" version = "0.20.4" @@ -823,16 +701,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "block-modes" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a0e8073e8baa88212fb5823574c02ebccb395136ba9a164ab89379ec6072f0" -dependencies = [ - "block-padding", - "cipher 0.2.5", -] - [[package]] name = "block-padding" version = "0.2.1" @@ -914,6 +782,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" +[[package]] +name = "bs58" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +dependencies = [ + "tinyvec", +] + [[package]] name = "buf_redux" version = "0.8.4" @@ -938,9 +815,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.2" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byte-slice-cast" @@ -994,25 +871,25 @@ dependencies = [ "ethereum_ssz_derive", "quickcheck", "quickcheck_macros", - "smallvec", + "smallvec 1.11.0", "ssz_types", "tree_hash", ] [[package]] name = "camino" -version = "1.1.4" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c530edf18f37068ac2d977409ed5cd50d53d73bc653c7647b48eb78976ac9ae2" +checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" dependencies = [ "serde", ] [[package]] name = "cargo-platform" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" +checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479" dependencies = [ "serde", ] @@ -1025,7 +902,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.17", + "semver 1.0.18", "serde", "serde_json", "thiserror", @@ -1043,17 +920,6 @@ version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" -[[package]] -name = "ccm" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca1a8fbc20b50ac9673ff014abfb2b5f4085ee1a850d408f14a159c5853ac7" -dependencies = [ - "aead 0.3.2", - "cipher 0.2.5", - "subtle", -] - [[package]] name = "cexpr" version = "0.6.0" @@ -1076,7 +942,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ "cfg-if", - "cipher 0.3.0", + "cipher", "cpufeatures", "zeroize", ] @@ -1087,22 +953,22 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" dependencies = [ - "aead 0.4.3", + "aead", "chacha20", - "cipher 0.3.0", + "cipher", "poly1305", "zeroize", ] [[package]] name = "chrono" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ + "android-tzdata", "iana-time-zone", "js-sys", - "num-integer", "num-traits", "serde", "time 0.1.45", @@ -1110,15 +976,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "cipher" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" -dependencies = [ - "generic-array", -] - [[package]] name = "cipher" version = "0.3.0" @@ -1128,16 +985,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "cipher" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" -dependencies = [ - "crypto-common", - "inout", -] - [[package]] name = "clang-sys" version = "1.6.1" @@ -1157,7 +1004,7 @@ checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "ansi_term", "atty", - "bitflags", + "bitflags 1.3.2", "strsim 0.8.0", "textwrap", "unicode-width", @@ -1217,7 +1064,7 @@ dependencies = [ "state_processing", "store", "task_executor", - "time 0.3.21", + "time 0.3.24", "timer", "tokio", "types", @@ -1258,9 +1105,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" +checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747" [[package]] name = "convert_case" @@ -1295,28 +1142,13 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] -[[package]] -name = "crc" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" -dependencies = [ - "crc-catalog", -] - -[[package]] -name = "crc-catalog" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" - [[package]] name = "crc32fast" version = "1.3.2" @@ -1385,22 +1217,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg 1.1.0", "cfg-if", "crossbeam-utils", - "memoffset 0.8.0", + "memoffset 0.9.0", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] @@ -1442,7 +1274,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core 0.6.4", "typenum", ] @@ -1468,9 +1299,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b015497079b9a9d69c02ad25de6c0a6edef051ea6360a327d0bd05802ef64ad" +checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086" dependencies = [ "csv-core", "itoa", @@ -1493,26 +1324,17 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" dependencies = [ - "cipher 0.3.0", -] - -[[package]] -name = "ctr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" -dependencies = [ - "cipher 0.4.4", + "cipher", ] [[package]] name = "ctrlc" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04d778600249295e82b6ab12e291ed9029407efee0cfb7baf67157edc65964df" +checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" dependencies = [ "nix 0.26.2", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -1530,9 +1352,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-rc.2" +version = "4.0.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585" +checksum = "8d4ba9852b42210c7538b75484f9daa0655e9a3ac04f693747bb0f02cf3cfe16" dependencies = [ "cfg-if", "digest 0.10.7", @@ -1549,18 +1371,8 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - -[[package]] -name = "darling" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" -dependencies = [ - "darling_core 0.14.4", - "darling_macro 0.14.4", + "darling_core", + "darling_macro", ] [[package]] @@ -1577,38 +1389,13 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "darling_core" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", -] - [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core 0.13.4", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" -dependencies = [ - "darling_core 0.14.4", + "darling_core", "quote", "syn 1.0.109", ] @@ -1702,7 +1489,7 @@ dependencies = [ "hex", "reqwest", "serde_json", - "sha2 0.10.6", + "sha2 0.10.7", "tree_hash", "types", ] @@ -1714,47 +1501,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ "const-oid", - "pem-rfc7468", "zeroize", ] [[package]] name = "der" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56acb310e15652100da43d130af8d97b509e95af61aab1c5a7939ef24337ee17" +checksum = "0c7ed52955ce76b1554f509074bb357d3fb8ac9b51288a65a3fd480d1dfba946" dependencies = [ "const-oid", + "pem-rfc7468", "zeroize", ] [[package]] -name = "der-parser" -version = "7.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe398ac75057914d7d07307bf67dc7f3f574a26783b4fc7805a20ffa9f506e82" -dependencies = [ - "asn1-rs 0.3.1", - "displaydoc", - "nom 7.1.3", - "num-bigint", - "num-traits", - "rusticata-macros", -] - -[[package]] -name = "der-parser" -version = "8.2.0" +name = "deranged" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" -dependencies = [ - "asn1-rs 0.5.2", - "displaydoc", - "nom 7.1.3", - "num-bigint", - "num-traits", - "rusticata-macros", -] +checksum = "8810e7e2cf385b1e9b50d68264908ec367ba642c96d02edfe61c39e88e2a3c01" [[package]] name = "derivative" @@ -1775,60 +1540,29 @@ checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", -] - -[[package]] -name = "derive_builder" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07adf7be193b71cc36b193d0f5fe60b918a3a9db4dad0449f57bcfd519704a3" -dependencies = [ - "derive_builder_macro", + "syn 2.0.28", ] [[package]] -name = "derive_builder_core" -version = "0.11.2" +name = "derive_more" +version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ - "darling 0.14.4", + "convert_case", "proc-macro2", "quote", + "rustc_version 0.4.0", "syn 1.0.109", ] [[package]] -name = "derive_builder_macro" -version = "0.11.2" +name = "diesel" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" +checksum = "f7a532c1f99a0f596f6960a60d1e119e91582b24b39e2d83a190e61262c3ef0c" dependencies = [ - "derive_builder_core", - "syn 1.0.109", -] - -[[package]] -name = "derive_more" -version = "0.99.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" -dependencies = [ - "convert_case", - "proc-macro2", - "quote", - "rustc_version 0.4.0", - "syn 1.0.109", -] - -[[package]] -name = "diesel" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72eb77396836a4505da85bae0712fa324b74acfe1876d7c2f7e694ef3d0ee373" -dependencies = [ - "bitflags", + "bitflags 2.3.3", "byteorder", "diesel_derives", "itoa", @@ -1838,27 +1572,36 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad74fdcf086be3d4fdd142f67937678fe60ed431c3b2f08599e7687269410c4" +checksum = "74398b79d81e52e130d991afeed9c86034bb1b7735f46d2f5bf7deb261d80303" dependencies = [ - "proc-macro-error", + "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.28", ] [[package]] name = "diesel_migrations" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9ae22beef5e9d6fab9225ddb073c1c6c1a7a6ded5019d5da11d1e5c5adc34e2" +checksum = "6036b3f0120c5961381b570ee20a02432d7e2d27ea60de9578799cf9156914ac" dependencies = [ "diesel", "migrations_internals", "migrations_macros", ] +[[package]] +name = "diesel_table_macro_syntax" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" +dependencies = [ + "syn 2.0.28", +] + [[package]] name = "digest" version = "0.9.0" @@ -1932,28 +1675,29 @@ dependencies = [ [[package]] name = "discv5" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77f32d27968ba86689e3f0eccba0383414348a6fc5918b0a639c98dd81e20ed6" +checksum = "98c05fa26996c6141f78ac4fafbe297a7fa69690565ba4e0d1f2e60bde5ce501" dependencies = [ - "aes 0.7.5", - "aes-gcm 0.9.4", + "aes", + "aes-gcm", "arrayvec", "delay_map", - "enr 0.8.1", + "enr 0.9.0", "fnv", "futures", "hashlink 0.7.0", "hex", "hkdf", "lazy_static", - "libp2p-core 0.36.0", + "libp2p-core", + "libp2p-identity", "lru 0.7.8", "more-asserts", "parking_lot 0.11.2", "rand 0.8.5", "rlp", - "smallvec", + "smallvec 1.11.0", "socket2 0.4.9", "tokio", "tracing", @@ -1962,22 +1706,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "displaydoc" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.16", -] - [[package]] name = "dtoa" -version = "1.0.6" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65d09067bfacaa79114679b279d7f5885b53295b1e2cfb4e79c8e4bd3d633169" +checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" [[package]] name = "dunce" @@ -1999,11 +1732,11 @@ dependencies = [ [[package]] name = "ecdsa" -version = "0.16.7" +version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0997c976637b606099b9985693efa3581e84e41f5c11ba5255f88711058ad428" +checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4" dependencies = [ - "der 0.7.6", + "der 0.7.7", "digest 0.10.7", "elliptic-curve 0.13.5", "rfc6979 0.4.0", @@ -2046,15 +1779,15 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.0.0-rc.2" +version = "2.0.0-pre.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "798f704d128510932661a3489b08e3f4c934a01d61c5def59ae7b8e48f19665a" +checksum = "7bd577ba9d4bcab443cac60003d8fd32c638e7024a3ec92c200d7af5d2c397ed" dependencies = [ - "curve25519-dalek 4.0.0-rc.2", + "curve25519-dalek 4.0.0-rc.1", "ed25519 2.2.1", "rand_core 0.6.4", "serde", - "sha2 0.10.6", + "sha2 0.10.7", "zeroize", ] @@ -2091,9 +1824,9 @@ dependencies = [ [[package]] name = "either" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "elliptic-curve" @@ -2108,8 +1841,6 @@ dependencies = [ "ff 0.12.1", "generic-array", "group 0.12.1", - "hkdf", - "pem-rfc7468", "pkcs8 0.9.0", "rand_core 0.6.4", "sec1 0.3.0", @@ -2129,9 +1860,10 @@ dependencies = [ "ff 0.13.0", "generic-array", "group 0.13.0", + "pem-rfc7468", "pkcs8 0.10.2", "rand_core 0.6.4", - "sec1 0.7.2", + "sec1 0.7.3", "subtle", "zeroize", ] @@ -2152,7 +1884,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26fa0a0be8915790626d5759eb51fe47435a8eac92c2f212bd2da9aa7f30ea56" dependencies = [ "base64 0.13.1", - "bs58", + "bs58 0.4.0", "bytes", "hex", "k256 0.11.6", @@ -2166,19 +1898,20 @@ dependencies = [ [[package]] name = "enr" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf56acd72bb22d2824e66ae8e9e5ada4d0de17a69c7fd35569dde2ada8ec9116" +checksum = "0be7b2ac146c1f99fe245c02d16af0696450d8e06c135db75e10eeb9e642c20d" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", "bytes", - "ed25519-dalek 2.0.0-rc.2", + "ed25519-dalek 2.0.0-pre.0", "hex", "k256 0.13.1", "log", "rand 0.8.5", "rlp", "serde", + "serde-hex", "sha3 0.10.8", "zeroize", ] @@ -2240,15 +1973,21 @@ dependencies = [ "types", ] +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + [[package]] name = "errno" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" dependencies = [ "errno-dragonfly", "libc", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -2379,7 +2118,7 @@ dependencies = [ "hex", "num-bigint-dig", "ring", - "sha2 0.10.6", + "sha2 0.10.7", "zeroize", ] @@ -2387,7 +2126,7 @@ dependencies = [ name = "eth2_keystore" version = "0.1.0" dependencies = [ - "aes 0.7.5", + "aes", "bls", "eth2_key_derivation", "hex", @@ -2401,7 +2140,7 @@ dependencies = [ "sha2 0.9.9", "tempfile", "unicode-normalization", - "uuid 0.8.2", + "uuid", "zeroize", ] @@ -2431,7 +2170,7 @@ dependencies = [ "serde_repr", "tempfile", "tiny-bip39", - "uuid 0.8.2", + "uuid", ] [[package]] @@ -2510,7 +2249,7 @@ source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=e380108#e380 dependencies = [ "async-stream", "blst", - "bs58", + "bs58 0.4.0", "enr 0.6.2", "hex", "integer-sqrt", @@ -2566,7 +2305,7 @@ dependencies = [ "cpufeatures", "lazy_static", "ring", - "sha2 0.10.6", + "sha2 0.10.7", ] [[package]] @@ -2584,22 +2323,22 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32749e96305376af40d7a7ee8ea4c4c64c68d09ff94a81ab78c8d9bc7153c221" +checksum = "e61ffea29f26e8249d35128a82ec8d3bd4fbc80179ea5f5e5e3daafef6a80fcb" dependencies = [ "ethereum-types 0.14.1", "itertools", - "smallvec", + "smallvec 1.11.0", ] [[package]] name = "ethereum_ssz_derive" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9cac7ef2107926cea34c0064056f9bb134d2085eef882388d151d2e59174cf0" +checksum = "6085d7fd3cf84bd2b8fec150d54c8467fb491d8db9c460607c5534f653a0ee38" dependencies = [ - "darling 0.13.4", + "darling", "proc-macro2", "quote", "syn 1.0.109", @@ -2635,7 +2374,7 @@ dependencies = [ "dunce", "ethers-core", "eyre", - "getrandom 0.2.9", + "getrandom 0.2.10", "hex", "proc-macro2", "quote", @@ -2644,7 +2383,7 @@ dependencies = [ "serde", "serde_json", "syn 1.0.109", - "toml", + "toml 0.5.11", "url", "walkdir", ] @@ -2707,7 +2446,7 @@ dependencies = [ "futures-core", "futures-timer", "futures-util", - "getrandom 0.2.9", + "getrandom 0.2.10", "hashers", "hex", "http", @@ -2851,6 +2590,12 @@ dependencies = [ "instant", ] +[[package]] +name = "fastrand" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" + [[package]] name = "ff" version = "0.12.1" @@ -2885,11 +2630,11 @@ checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" [[package]] name = "field-offset" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3cf3a800ff6e860c863ca6d4b16fd999db8b752819c1606884047b73e468535" +checksum = "38e2275cc4e4fc009b0669731a1e5ab7ebf11f469eaede2bab9309a5b4d6057f" dependencies = [ - "memoffset 0.8.0", + "memoffset 0.9.0", "rustc_version 0.4.0", ] @@ -2926,12 +2671,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - [[package]] name = "flate2" version = "1.0.26" @@ -2940,7 +2679,7 @@ checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide 0.7.1", + "miniz_oxide", ] [[package]] @@ -2981,9 +2720,9 @@ dependencies = [ [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -3065,12 +2804,12 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ - "fastrand", + "fastrand 1.9.0", "futures-core", "futures-io", "memchr", "parking", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "waker-fn", ] @@ -3082,7 +2821,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] @@ -3108,6 +2847,17 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +[[package]] +name = "futures-ticker" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9763058047f713632a52e916cc7f6a4b3fc6e9fc1ff8c5b1dc49e5a89041682e" +dependencies = [ + "futures", + "futures-timer", + "instant", +] + [[package]] name = "futures-timer" version = "3.0.2" @@ -3127,7 +2877,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "pin-utils", "slab", ] @@ -3188,9 +2938,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "js-sys", @@ -3206,24 +2956,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" dependencies = [ "opaque-debug", - "polyval 0.5.3", -] - -[[package]] -name = "ghash" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" -dependencies = [ - "opaque-debug", - "polyval 0.6.0", + "polyval", ] [[package]] name = "gimli" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" [[package]] name = "git-version" @@ -3277,9 +3017,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" +checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" dependencies = [ "bytes", "fnv", @@ -3287,7 +3027,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap", + "indexmap 1.9.3", "slab", "tokio", "tokio-util 0.7.8", @@ -3342,6 +3082,16 @@ dependencies = [ "ahash 0.8.3", ] +[[package]] +name = "hashbrown" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +dependencies = [ + "ahash 0.8.3", + "allocator-api2", +] + [[package]] name = "hashers" version = "1.0.1" @@ -3362,11 +3112,11 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0761a1b9491c4f2e3d66aa0f62d0fba0af9a0e2852e4d48ea506632a4b56e6aa" +checksum = "312f66718a2d7789ffef4f4b7b213138ed9f1eb3aa1d0d82fc99f88fb3ffd26f" dependencies = [ - "hashbrown 0.13.2", + "hashbrown 0.14.0", ] [[package]] @@ -3376,7 +3126,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ "base64 0.13.1", - "bitflags", + "bitflags 1.3.2", "bytes", "headers-core", "http", @@ -3411,18 +3161,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "hex" @@ -3515,7 +3256,7 @@ checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", ] [[package]] @@ -3523,7 +3264,7 @@ name = "http_api" version = "0.1.0" dependencies = [ "beacon_chain", - "bs58", + "bs58 0.4.0", "bytes", "directory", "environment", @@ -3606,9 +3347,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.26" +version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", @@ -3620,7 +3361,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "socket2 0.4.9", "tokio", "tower-service", @@ -3630,15 +3371,16 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" +checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" dependencies = [ + "futures-util", "http", "hyper", - "rustls 0.21.1", + "rustls 0.21.5", "tokio", - "tokio-rustls 0.24.0", + "tokio-rustls 0.24.1", ] [[package]] @@ -3656,9 +3398,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -3696,9 +3438,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -3782,7 +3524,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.5.0", + "parity-scale-codec 3.6.4", ] [[package]] @@ -3840,12 +3582,13 @@ dependencies = [ ] [[package]] -name = "inout" -version = "0.1.3" +name = "indexmap" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ - "generic-array", + "equivalent", + "hashbrown 0.14.0", ] [[package]] @@ -3878,53 +3621,34 @@ dependencies = [ "num-traits", ] -[[package]] -name = "interceptor" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e8a11ae2da61704edada656798b61c94b35ecac2c58eb955156987d5e6be90b" -dependencies = [ - "async-trait", - "bytes", - "log", - "rand 0.8.5", - "rtcp", - "rtp", - "thiserror", - "tokio", - "waitgroup", - "webrtc-srtp", - "webrtc-util", -] - [[package]] name = "io-lifetimes" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.1", + "hermit-abi 0.3.2", "libc", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] name = "ipconfig" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.4.9", - "widestring 0.5.1", - "winapi", - "winreg", + "socket2 0.5.3", + "widestring 1.0.2", + "windows-sys", + "winreg 0.50.0", ] [[package]] name = "ipnet" -version = "2.7.2" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "itertools" @@ -3937,15 +3661,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.6" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "jemalloc-ctl" -version = "0.5.0" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1891c671f3db85d8ea8525dd43ab147f9977041911d24a03e5a36187a7bfde9" +checksum = "7cffc705424a344c054e135d12ee591402f4539245e8bbd64e6c9eaa9458b63c" dependencies = [ "jemalloc-sys", "libc", @@ -3954,9 +3678,9 @@ dependencies = [ [[package]] name = "jemalloc-sys" -version = "0.5.3+5.3.0-patched" +version = "0.5.4+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9bd5d616ea7ed58b571b2e209a65759664d7fb021a0819d7a790afc67e47ca1" +checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2" dependencies = [ "cc", "libc", @@ -3964,9 +3688,9 @@ dependencies = [ [[package]] name = "jemallocator" -version = "0.5.0" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16c2514137880c52b0b4822b563fadd38257c1f380858addb74a400889696ea6" +checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc" dependencies = [ "jemalloc-sys", "libc", @@ -3974,9 +3698,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.63" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -3987,7 +3711,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.1", + "base64 0.21.2", "pem", "ring", "serde", @@ -4004,7 +3728,7 @@ dependencies = [ "cfg-if", "ecdsa 0.14.8", "elliptic-curve 0.12.3", - "sha2 0.10.6", + "sha2 0.10.7", "sha3 0.10.8", ] @@ -4015,10 +3739,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" dependencies = [ "cfg-if", - "ecdsa 0.16.7", + "ecdsa 0.16.8", "elliptic-curve 0.13.5", "once_cell", - "sha2 0.10.6", + "sha2 0.10.7", "signature 2.1.0", ] @@ -4119,9 +3843,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.144" +version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "libflate" @@ -4170,10 +3894,10 @@ name = "libmdbx" version = "0.1.4" source = "git+https://github.com/sigp/libmdbx-rs?tag=v0.1.4#096da80a83d14343f8df833006483f48075cd135" dependencies = [ - "bitflags", + "bitflags 1.3.2", "byteorder", "derive_more", - "indexmap", + "indexmap 1.9.3", "libc", "mdbx-sys", "parking_lot 0.12.1", @@ -4182,112 +3906,63 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.50.1" +version = "0.52.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c7b0104790be871edcf97db9bd2356604984e623a08d825c3f27852290266b8" +checksum = "38039ba2df4f3255842050845daef4a004cc1f26da03dbc645535088b51910ef" dependencies = [ "bytes", "futures", "futures-timer", - "getrandom 0.2.9", + "getrandom 0.2.10", "instant", - "libp2p-core 0.38.0", + "libp2p-allow-block-list", + "libp2p-connection-limits", + "libp2p-core", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", + "libp2p-identity", "libp2p-mdns", "libp2p-metrics", - "libp2p-mplex", "libp2p-noise", "libp2p-plaintext", - "libp2p-quic", "libp2p-swarm", "libp2p-tcp", - "libp2p-webrtc", "libp2p-websocket", "libp2p-yamux", - "multiaddr 0.16.0", - "parking_lot 0.12.1", + "multiaddr 0.18.0", "pin-project", - "smallvec", ] [[package]] -name = "libp2p-core" -version = "0.36.0" +name = "libp2p-allow-block-list" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1fff5bd889c82a0aec668f2045edd066f559d4e5c40354e5a4c77ac00caac38" +checksum = "55b46558c5c0bf99d3e2a1a38fd54ff5476ca66dd1737b12466a1824dd219311" dependencies = [ - "asn1_der", - "bs58", - "ed25519-dalek 1.0.1", - "either", - "fnv", - "futures", - "futures-timer", - "instant", - "lazy_static", - "libsecp256k1", - "log", - "multiaddr 0.14.0", - "multihash 0.16.3", - "multistream-select 0.11.0", - "p256", - "parking_lot 0.12.1", - "pin-project", - "prost", - "prost-build", - "rand 0.8.5", - "rw-stream-sink", - "sha2 0.10.6", - "smallvec", - "thiserror", - "unsigned-varint 0.7.1", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", "void", - "zeroize", ] [[package]] -name = "libp2p-core" -version = "0.38.0" +name = "libp2p-connection-limits" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6a8fcd392ff67af6cc3f03b1426c41f7f26b6b9aff2dc632c1c56dd649e571f" +checksum = "2f5107ad45cb20b2f6c3628c7b6014b996fcb13a88053f4569c872c6e30abf58" dependencies = [ - "asn1_der", - "bs58", - "ed25519-dalek 1.0.1", - "either", - "fnv", - "futures", - "futures-timer", - "instant", - "libsecp256k1", - "log", - "multiaddr 0.16.0", - "multihash 0.16.3", - "multistream-select 0.12.1", - "once_cell", - "p256", - "parking_lot 0.12.1", - "pin-project", - "prost", - "prost-build", - "rand 0.8.5", - "rw-stream-sink", - "sec1 0.3.0", - "sha2 0.10.6", - "smallvec", - "thiserror", - "unsigned-varint 0.7.1", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", "void", - "zeroize", ] [[package]] name = "libp2p-core" -version = "0.39.2" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c1df63c0b582aa434fb09b2d86897fa2b419ffeccf934b36f87fcedc8e835c2" +checksum = "ef7dd7b09e71aac9271c60031d0e558966cdb3253ba0308ab369bb2de80630d0" dependencies = [ "either", "fnv", @@ -4296,16 +3971,16 @@ dependencies = [ "instant", "libp2p-identity", "log", - "multiaddr 0.17.1", - "multihash 0.17.0", - "multistream-select 0.12.1", + "multiaddr 0.18.0", + "multihash 0.19.0", + "multistream-select", "once_cell", "parking_lot 0.12.1", "pin-project", "quick-protobuf", "rand 0.8.5", "rw-stream-sink", - "smallvec", + "smallvec 1.11.0", "thiserror", "unsigned-varint 0.7.1", "void", @@ -4313,102 +3988,111 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.38.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e42a271c1b49f789b92f7fc87749fa79ce5c7bdc88cbdfacb818a4bca47fec5" +checksum = "fd4394c81c0c06d7b4a60f3face7e8e8a9b246840f98d2c80508d0721b032147" dependencies = [ "futures", - "libp2p-core 0.38.0", + "libp2p-core", + "libp2p-identity", "log", "parking_lot 0.12.1", - "smallvec", + "smallvec 1.11.0", "trust-dns-resolver", ] [[package]] name = "libp2p-gossipsub" -version = "0.43.0" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a173171c71c29bb156f98886c7c4824596de3903dadf01e2e79d2ccdcf38cd9f" +checksum = "8e378da62e8c9251f6e885ed173a561663f29b251e745586cf6ae6150b295c37" dependencies = [ "asynchronous-codec", - "base64 0.13.1", + "base64 0.21.2", "byteorder", "bytes", + "either", "fnv", "futures", + "futures-ticker", + "getrandom 0.2.10", "hex_fmt", "instant", - "libp2p-core 0.38.0", + "libp2p-core", + "libp2p-identity", "libp2p-swarm", "log", "prometheus-client", - "prost", - "prost-build", - "prost-codec", + "quick-protobuf", + "quick-protobuf-codec", "rand 0.8.5", "regex", - "sha2 0.10.6", - "smallvec", - "thiserror", + "sha2 0.10.7", + "smallvec 1.11.0", "unsigned-varint 0.7.1", - "wasm-timer", + "void", ] [[package]] name = "libp2p-identify" -version = "0.41.1" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c052d0026f4817b44869bfb6810f4e1112f43aec8553f2cb38881c524b563abf" +checksum = "6a29675a32dbcc87790db6cf599709e64308f1ae9d5ecea2d259155889982db8" dependencies = [ "asynchronous-codec", + "either", "futures", "futures-timer", - "libp2p-core 0.38.0", + "libp2p-core", + "libp2p-identity", "libp2p-swarm", "log", - "lru 0.8.1", - "prost", - "prost-build", - "prost-codec", - "smallvec", + "lru 0.10.1", + "quick-protobuf", + "quick-protobuf-codec", + "smallvec 1.11.0", "thiserror", "void", ] [[package]] name = "libp2p-identity" -version = "0.1.2" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e2d584751cecb2aabaa56106be6be91338a60a0f4e420cf2af639204f596fc1" +checksum = "a38d6012784fe4cc14e6d443eb415b11fc7c456dc15d9f0d90d9b70bc7ac3ec1" dependencies = [ - "bs58", + "asn1_der", + "bs58 0.5.0", "ed25519-dalek 1.0.1", + "libsecp256k1", "log", - "multiaddr 0.17.1", - "multihash 0.17.0", + "multihash 0.19.0", + "p256", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.6", + "sec1 0.7.3", + "sha2 0.10.7", "thiserror", + "void", "zeroize", ] [[package]] name = "libp2p-mdns" -version = "0.42.0" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f378264aade9872d6ccd315c0accc18be3a35d15fc1b9c36e5b6f983b62b5b" +checksum = "42a2567c305232f5ef54185e9604579a894fd0674819402bb0ac0246da82f52a" dependencies = [ "data-encoding", "futures", "if-watch", - "libp2p-core 0.38.0", + "libp2p-core", + "libp2p-identity", "libp2p-swarm", "log", "rand 0.8.5", - "smallvec", - "socket2 0.4.9", + "smallvec 1.11.0", + "socket2 0.5.3", "tokio", "trust-dns-proto", "void", @@ -4416,224 +4100,143 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.11.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ad8a64f29da86005c86a4d2728b8a0719e9b192f4092b609fd8790acb9dec55" +checksum = "3787ea81798dcc5bf1d8b40a8e8245cf894b168d04dd70aa48cb3ff2fff141d2" dependencies = [ - "libp2p-core 0.38.0", + "instant", + "libp2p-core", "libp2p-gossipsub", "libp2p-identify", + "libp2p-identity", "libp2p-swarm", + "once_cell", "prometheus-client", ] -[[package]] -name = "libp2p-mplex" -version = "0.38.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03805b44107aa013e7cbbfa5627b31c36cbedfdfb00603c0311998882bc4bace" -dependencies = [ - "asynchronous-codec", - "bytes", - "futures", - "libp2p-core 0.38.0", - "log", - "nohash-hasher", - "parking_lot 0.12.1", - "rand 0.8.5", - "smallvec", - "unsigned-varint 0.7.1", -] - [[package]] name = "libp2p-noise" -version = "0.41.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a978cb57efe82e892ec6f348a536bfbd9fee677adbe5689d7a93ad3a9bffbf2e" +checksum = "87945db2b3f977af09b62b9aa0a5f3e4870995a577ecd845cdeba94cdf6bbca7" dependencies = [ "bytes", "curve25519-dalek 3.2.0", "futures", - "libp2p-core 0.38.0", + "libp2p-core", + "libp2p-identity", "log", + "multiaddr 0.18.0", + "multihash 0.19.0", "once_cell", - "prost", - "prost-build", + "quick-protobuf", "rand 0.8.5", - "sha2 0.10.6", + "sha2 0.10.7", "snow", "static_assertions", "thiserror", - "x25519-dalek 1.1.1", + "x25519-dalek", "zeroize", ] [[package]] name = "libp2p-plaintext" -version = "0.38.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c43ab37fb4102682ae9a248dc2e6a8e7b941ec75cf24aed103060a788e0fd15" +checksum = "37266c683a757df713f7dcda0cdcb5ad4681355ffa1b37b77c113c176a531195" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.38.0", + "libp2p-core", + "libp2p-identity", "log", - "prost", - "prost-build", + "quick-protobuf", "unsigned-varint 0.7.1", - "void", ] [[package]] -name = "libp2p-quic" -version = "0.7.0-alpha" +name = "libp2p-swarm" +version = "0.43.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e7c867e95c8130667b24409d236d37598270e6da69b3baf54213ba31ffca59" +checksum = "43106820057e0f65c77b01a3873593f66e676da4e40c70c3a809b239109f1d30" dependencies = [ - "bytes", + "either", + "fnv", "futures", "futures-timer", - "if-watch", - "libp2p-core 0.38.0", - "libp2p-tls", + "instant", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm-derive", "log", - "parking_lot 0.12.1", - "quinn-proto", + "multistream-select", + "once_cell", "rand 0.8.5", - "rustls 0.20.8", - "thiserror", + "smallvec 1.11.0", "tokio", + "void", ] [[package]] -name = "libp2p-swarm" -version = "0.41.1" +name = "libp2p-swarm-derive" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2a35472fe3276b3855c00f1c032ea8413615e030256429ad5349cdf67c6e1a0" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", - "instant", - "libp2p-core 0.38.0", - "libp2p-swarm-derive", - "log", - "pin-project", - "rand 0.8.5", - "smallvec", - "thiserror", - "tokio", - "void", -] - -[[package]] -name = "libp2p-swarm-derive" -version = "0.31.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d527d5827582abd44a6d80c07ff8b50b4ee238a8979e05998474179e79dc400" +checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" dependencies = [ "heck", + "proc-macro-warning", + "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.28", ] [[package]] name = "libp2p-tcp" -version = "0.38.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b257baf6df8f2df39678b86c578961d48cc8b68642a12f0f763f56c8e5858d" +checksum = "09bfdfb6f945c5c014b87872a0bdb6e0aef90e92f380ef57cd9013f118f9289d" dependencies = [ "futures", "futures-timer", "if-watch", "libc", - "libp2p-core 0.38.0", - "log", - "socket2 0.4.9", - "tokio", -] - -[[package]] -name = "libp2p-tls" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff08d13d0dc66e5e9ba6279c1de417b84fa0d0adc3b03e5732928c180ec02781" -dependencies = [ - "futures", - "futures-rustls", - "libp2p-core 0.39.2", + "libp2p-core", "libp2p-identity", - "rcgen 0.10.0", - "ring", - "rustls 0.20.8", - "thiserror", - "webpki 0.22.0", - "x509-parser 0.14.0", - "yasna", -] - -[[package]] -name = "libp2p-webrtc" -version = "0.4.0-alpha" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb6cd86dd68cba72308ea05de1cebf3ba0ae6e187c40548167955d4e3970f6a" -dependencies = [ - "async-trait", - "asynchronous-codec", - "bytes", - "futures", - "futures-timer", - "hex", - "if-watch", - "libp2p-core 0.38.0", - "libp2p-noise", "log", - "multihash 0.16.3", - "prost", - "prost-build", - "prost-codec", - "rand 0.8.5", - "rcgen 0.9.3", - "serde", - "stun", - "thiserror", - "tinytemplate", + "socket2 0.5.3", "tokio", - "tokio-util 0.7.8", - "webrtc", ] [[package]] name = "libp2p-websocket" -version = "0.40.0" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d705506030d5c0aaf2882437c70dab437605f21c5f9811978f694e6917a3b54" +checksum = "956d981ebc84abc3377e5875483c06d94ff57bc6b25f725047f9fd52592f72d4" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.38.0", + "libp2p-core", + "libp2p-identity", "log", "parking_lot 0.12.1", "quicksink", "rw-stream-sink", "soketto", "url", - "webpki-roots", + "webpki-roots 0.23.1", ] [[package]] name = "libp2p-yamux" -version = "0.42.0" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f63594a0aa818642d9d4915c791945053877253f08a3626f13416b5cd928a29" +checksum = "c0a9b42ab6de15c6f076d8fb11dc5f48d899a10b55a2e16b12be9012a05287b0" dependencies = [ "futures", - "libp2p-core 0.38.0", + "libp2p-core", "log", - "parking_lot 0.12.1", "thiserror", "yamux", ] @@ -4699,9 +4302,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.9" +version = "1.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" +checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" dependencies = [ "cc", "pkg-config", @@ -4785,11 +4388,11 @@ dependencies = [ "regex", "serde", "serde_derive", - "sha2 0.10.6", + "sha2 0.10.7", "slog", "slog-async", "slog-term", - "smallvec", + "smallvec 1.11.0", "snap", "ssz_types", "strum", @@ -4829,12 +4432,18 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" +[[package]] +name = "linux-raw-sys" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" + [[package]] name = "lmdb-rkv" version = "0.14.0" source = "git+https://github.com/sigp/lmdb-rs?rev=f33845c6469b94265319aac0ed5085597862c27e#f33845c6469b94265319aac0ed5085597862c27e" dependencies = [ - "bitflags", + "bitflags 1.3.2", "byteorder", "libc", "lmdb-rkv-sys", @@ -4852,9 +4461,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg 1.1.0", "scopeguard", @@ -4870,12 +4479,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "logging" @@ -4906,11 +4512,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.8.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" +checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670" dependencies = [ - "hashbrown 0.12.3", + "hashbrown 0.13.2", ] [[package]] @@ -4968,7 +4574,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ - "regex-automata", + "regex-automata 0.1.10", ] [[package]] @@ -4979,9 +4585,15 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" -version = "0.7.0" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67827e6ea8ee8a7c4a72227ef4fc08957040acffdb5f122733b24fa12daff41b" + +[[package]] +name = "maybe-uninit" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "md-5" @@ -5005,9 +4617,9 @@ dependencies = [ [[package]] name = "mediatype" -version = "0.19.13" +version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea6e62614ab2fc0faa58bb15102a0382d368f896a9fa4776592589ab55c4de7" +checksum = "8c408dc227d302f1496c84d9dc68c00fec6f56f9228a18f3023f976f3ca7c945" [[package]] name = "memchr" @@ -5026,9 +4638,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg 1.1.0", ] @@ -5047,24 +4659,24 @@ dependencies = [ [[package]] name = "metastruct" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "734788dec2091fe9afa39530ca2ea7994f4a2c9aff3dbfebb63f2c1945c6f10b" +checksum = "ccfbb8826226b09b05bb62a0937cf6abb16f1f7d4b746eb95a83db14aec60f06" dependencies = [ "metastruct_macro", ] [[package]] name = "metastruct_macro" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ded15e7570c2a507a23e6c3a1c8d74507b779476e43afe93ddfc261d44173d" +checksum = "37cb4045d5677b7da537f8cb5d0730d5b6414e3cc81c61e4b50e1f0cbdc73909" dependencies = [ - "darling 0.13.4", + "darling", "itertools", "proc-macro2", "quote", - "smallvec", + "smallvec 1.11.0", "syn 1.0.109", ] @@ -5091,19 +4703,19 @@ dependencies = [ [[package]] name = "migrations_internals" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c493c09323068c01e54c685f7da41a9ccf9219735c3766fbfd6099806ea08fbc" +checksum = "0f23f71580015254b020e856feac3df5878c2c7a8812297edd6c0a485ac9dada" dependencies = [ "serde", - "toml", + "toml 0.7.6", ] [[package]] name = "migrations_macros" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a8ff27a350511de30cdabb77147501c36ef02e0451d957abea2f30caffb2b58" +checksum = "cce3325ac70e67bbab5bd837a31cae01f1a6db64e0e744a33cb03a543469ef08" dependencies = [ "migrations_internals", "proc-macro2", @@ -5144,15 +4756,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.7.1" @@ -5164,14 +4767,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys", ] [[package]] @@ -5207,27 +4809,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c580bfdd8803cce319b047d239559a22f809094aaea4ac13902a1fdcfcd4261" dependencies = [ "arrayref", - "bs58", - "byteorder", - "data-encoding", - "multihash 0.16.3", - "percent-encoding", - "serde", - "static_assertions", - "unsigned-varint 0.7.1", - "url", -] - -[[package]] -name = "multiaddr" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aebdb21e90f81d13ed01dc84123320838e53963c2ca94b60b305d3fa64f31e" -dependencies = [ - "arrayref", + "bs58 0.4.0", "byteorder", "data-encoding", - "multibase", "multihash 0.16.3", "percent-encoding", "serde", @@ -5238,16 +4822,16 @@ dependencies = [ [[package]] name = "multiaddr" -version = "0.17.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b36f567c7099511fa8612bbbb52dda2419ce0bdbacf31714e3a5ffdb766d3bd" +checksum = "92a651988b3ed3ad1bc8c87d016bb92f6f395b84ed1db9b926b32b1fc5a2c8b5" dependencies = [ "arrayref", "byteorder", "data-encoding", - "log", + "libp2p-identity", "multibase", - "multihash 0.17.0", + "multihash 0.19.0", "percent-encoding", "serde", "static_assertions", @@ -5275,18 +4859,17 @@ dependencies = [ "core2", "digest 0.10.7", "multihash-derive", - "sha2 0.10.6", + "sha2 0.10.7", "unsigned-varint 0.7.1", ] [[package]] name = "multihash" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" +checksum = "2fd59dcc2bbe70baabeac52cd22ae52c55eefe6c38ff11a9439f16a350a939f2" dependencies = [ "core2", - "multihash-derive", "unsigned-varint 0.7.1", ] @@ -5304,12 +4887,6 @@ dependencies = [ "synstructure", ] -[[package]] -name = "multimap" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" - [[package]] name = "multipart" version = "0.18.0" @@ -5330,29 +4907,15 @@ dependencies = [ [[package]] name = "multistream-select" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "363a84be6453a70e63513660f4894ef815daf88e3356bffcda9ca27d810ce83b" -dependencies = [ - "bytes", - "futures", - "log", - "pin-project", - "smallvec", - "unsigned-varint 0.7.1", -] - -[[package]] -name = "multistream-select" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8552ab875c1313b97b8d20cb857b9fd63e2d1d6a0a1b53ce9821e575405f27a" +checksum = "ea0df8e5eec2298a62b326ee4f0d7fe1a6b90a09dfcf9df37b38f947a8c42f19" dependencies = [ "bytes", "futures", "log", "pin-project", - "smallvec", + "smallvec 1.11.0", "unsigned-varint 0.7.1", ] @@ -5393,7 +4956,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9ea4302b9759a7a88242299225ea3688e63c85ea136371bb6cf94fd674efaab" dependencies = [ "anyhow", - "bitflags", + "bitflags 1.3.2", "byteorder", "libc", "netlink-packet-core", @@ -5477,7 +5040,7 @@ dependencies = [ "slog-term", "sloggers", "slot_clock", - "smallvec", + "smallvec 1.11.0", "ssz_types", "store", "strum", @@ -5494,7 +5057,7 @@ version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f3790c00a0150112de0f4cd161e3d7fc4b2d8a5542ffc35f099a2562aecb35c" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cc", "cfg-if", "libc", @@ -5507,10 +5070,9 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "libc", - "memoffset 0.6.5", ] [[package]] @@ -5519,7 +5081,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "libc", "static_assertions", @@ -5541,6 +5103,12 @@ dependencies = [ "validator_dir", ] +[[package]] +name = "nodrop" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" + [[package]] name = "nohash-hasher" version = "0.2.0" @@ -5608,7 +5176,7 @@ dependencies = [ "num-traits", "rand 0.7.3", "serde", - "smallvec", + "smallvec 1.11.0", "zeroize", ] @@ -5635,20 +5203,20 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" dependencies = [ "autocfg 1.1.0", ] [[package]] name = "num_cpus" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.2.6", + "hermit-abi 0.3.2", "libc", ] @@ -5663,36 +5231,18 @@ dependencies = [ [[package]] name = "object" -version = "0.30.3" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" dependencies = [ "memchr", ] -[[package]] -name = "oid-registry" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38e20717fa0541f39bd146692035c37bedfa532b3e5071b35761082407546b2a" -dependencies = [ - "asn1-rs 0.3.1", -] - -[[package]] -name = "oid-registry" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" -dependencies = [ - "asn1-rs 0.5.2", -] - [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "oneshot_broadcast" @@ -5744,7 +5294,7 @@ version = "0.10.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "foreign-types", "libc", @@ -5761,7 +5311,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] @@ -5772,9 +5322,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.25.3+1.1.1t" +version = "111.26.0+1.1.1u" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "924757a6a226bf60da5f7dd0311a34d2b52283dd82ddeb103208ddc66362f80c" +checksum = "efc62c9f12b22b8f5208c23a7200a442b2e5999f8bdf80233852122b5a4f6f37" dependencies = [ "cc", ] @@ -5824,24 +5374,14 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "p256" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" -dependencies = [ - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2 0.10.6", -] - -[[package]] -name = "p384" -version = "0.11.2" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2 0.10.6", + "ecdsa 0.16.8", + "elliptic-curve 0.13.5", + "primeorder", + "sha2 0.10.7", ] [[package]] @@ -5870,15 +5410,15 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.5.0" +version = "3.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ddb756ca205bd108aee3c62c6d3c994e1df84a59b9d6d4a5ea42ee1fd5a9a28" +checksum = "dd8e946cc0cc711189c0b0249fb8b599cbeeab9784d83c415719368bb8d4ac64" dependencies = [ "arrayvec", "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.1.4", + "parity-scale-codec-derive 3.6.4", "serde", ] @@ -5896,9 +5436,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.4" +version = "3.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" +checksum = "2a296c3079b5fefbc499e1de58dc26c09b1b9a5952d26694ee89f04a43ebbb3e" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5930,7 +5470,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.7", + "parking_lot_core 0.9.8", ] [[package]] @@ -5943,28 +5483,28 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.16", - "smallvec", + "smallvec 1.11.0", "winapi", ] [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.2.16", - "smallvec", - "windows-sys 0.45.0", + "redox_syscall 0.3.5", + "smallvec 1.11.0", + "windows-targets", ] [[package]] name = "paste" -version = "1.0.12" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "pbkdf2" @@ -6001,28 +5541,18 @@ dependencies = [ [[package]] name = "pem-rfc7468" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d159833a9105500e0398934e205e0773f0b27529557134ecfc51c27646adac" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" dependencies = [ "base64ct", ] [[package]] name = "percent-encoding" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" - -[[package]] -name = "petgraph" -version = "0.6.3" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" -dependencies = [ - "fixedbitset", - "indexmap", -] +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pharos" @@ -6036,40 +5566,40 @@ dependencies = [ [[package]] name = "phf" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928c6535de93548188ef63bb7c4036bd415cd8f36ad25af44b9789b2ee72a48c" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" dependencies = [ "phf_shared", ] [[package]] name = "phf_shared" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1fb5f6f826b772a8d4c0394209441e7d37cbbb967ae9c7e0e8134365c9ee676" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" dependencies = [ "siphasher", ] [[package]] name = "pin-project" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" +checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" +checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] @@ -6080,9 +5610,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" [[package]] name = "pin-utils" @@ -6106,7 +5636,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.6", + "der 0.7.7", "spki 0.7.2", ] @@ -6130,9 +5660,9 @@ checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" [[package]] name = "plotters" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" dependencies = [ "num-traits", "plotters-backend", @@ -6143,15 +5673,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" [[package]] name = "plotters-svg" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" dependencies = [ "plotters-backend", ] @@ -6163,13 +5693,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg 1.1.0", - "bitflags", + "bitflags 1.3.2", "cfg-if", "concurrent-queue", "libc", "log", - "pin-project-lite 0.2.9", - "windows-sys 0.48.0", + "pin-project-lite 0.2.10", + "windows-sys", ] [[package]] @@ -6180,7 +5710,7 @@ checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" dependencies = [ "cpufeatures", "opaque-debug", - "universal-hash 0.4.1", + "universal-hash", ] [[package]] @@ -6192,19 +5722,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash 0.4.1", -] - -[[package]] -name = "polyval" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef234e08c11dfcb2e56f79fd70f6f2eb7f025c0ce2333e82f4f0518ecad30c6" -dependencies = [ - "cfg-if", - "cpufeatures", - "opaque-debug", - "universal-hash 0.5.1", + "universal-hash", ] [[package]] @@ -6213,7 +5731,7 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b7fa9f396f51dffd61546fd8573ee20592287996568e6175ceb0f8699ad75d" dependencies = [ - "base64 0.21.1", + "base64 0.21.2", "byteorder", "bytes", "fallible-iterator", @@ -6221,7 +5739,7 @@ dependencies = [ "md-5", "memchr", "rand 0.8.5", - "sha2 0.10.6", + "sha2 0.10.7", "stringprep", ] @@ -6260,13 +5778,12 @@ dependencies = [ ] [[package]] -name = "prettyplease" -version = "0.1.25" +name = "primeorder" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" +checksum = "3c2fcef82c0ec6eefcc179b978446c399b3cdf73c392c35604e399eee6df1ee3" dependencies = [ - "proc-macro2", - "syn 1.0.109", + "elliptic-curve 0.13.5", ] [[package]] @@ -6303,7 +5820,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" dependencies = [ "thiserror", - "toml", + "toml 0.5.11", ] [[package]] @@ -6336,11 +5853,22 @@ version = "0.5.20+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" +[[package]] +name = "proc-macro-warning" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70550716265d1ec349c41f70dd4f964b4fd88394efe4405f0c1da679c4799a07" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.28", +] + [[package]] name = "proc-macro2" -version = "1.0.63" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] @@ -6374,21 +5902,21 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.18.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c" +checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" dependencies = [ "dtoa", "itoa", "parking_lot 0.12.1", - "prometheus-client-derive-text-encode", + "prometheus-client-derive-encode", ] [[package]] -name = "prometheus-client-derive-text-encode" -version = "0.3.0" +name = "prometheus-client-derive-encode" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a455fbcb954c1a7decf3c586e860fd7889cddf4b8e164be736dbac95a953cd" +checksum = "72b6a5217beb0ad503ee7fa752d451c905113d70721b937126158f3106a48cc1" dependencies = [ "proc-macro2", "quote", @@ -6396,97 +5924,30 @@ dependencies = [ ] [[package]] -name = "prost" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" +name = "proto_array" +version = "0.2.0" dependencies = [ - "bytes", - "prost-derive", + "ethereum_ssz", + "ethereum_ssz_derive", + "safe_arith", + "serde", + "serde_derive", + "serde_yaml", + "superstruct 0.5.0", + "types", ] [[package]] -name = "prost-build" -version = "0.11.9" +name = "protobuf" +version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" -dependencies = [ - "bytes", - "heck", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prettyplease", - "prost", - "prost-types", - "regex", - "syn 1.0.109", - "tempfile", - "which", -] +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] -name = "prost-codec" -version = "0.3.0" +name = "psutil" +version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dc34979ff898b6e141106178981ce2596c387ea6e62533facfc61a37fc879c0" -dependencies = [ - "asynchronous-codec", - "bytes", - "prost", - "thiserror", - "unsigned-varint 0.7.1", -] - -[[package]] -name = "prost-derive" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "prost-types" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" -dependencies = [ - "prost", -] - -[[package]] -name = "proto_array" -version = "0.2.0" -dependencies = [ - "ethereum_ssz", - "ethereum_ssz_derive", - "safe_arith", - "serde", - "serde_derive", - "serde_yaml", - "superstruct 0.5.0", - "types", -] - -[[package]] -name = "protobuf" -version = "2.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" - -[[package]] -name = "psutil" -version = "3.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f866af2b0f8e4b0d2d00aad8a9c5fc48fad33466cd99a64cbb3a4c1505f1a62d" +checksum = "f866af2b0f8e4b0d2d00aad8a9c5fc48fad33466cd99a64cbb3a4c1505f1a62d" dependencies = [ "cfg-if", "darwin-libproc", @@ -6516,6 +5977,19 @@ dependencies = [ "byteorder", ] +[[package]] +name = "quick-protobuf-codec" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" +dependencies = [ + "asynchronous-codec", + "bytes", + "quick-protobuf", + "thiserror", + "unsigned-varint 0.7.1", +] + [[package]] name = "quickcheck" version = "0.9.2" @@ -6550,29 +6024,11 @@ dependencies = [ "pin-project-lite 0.1.12", ] -[[package]] -name = "quinn-proto" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c10f662eee9c94ddd7135043e544f3c82fa839a1e7b865911331961b53186c" -dependencies = [ - "bytes", - "rand 0.8.5", - "ring", - "rustc-hash", - "rustls 0.20.8", - "slab", - "thiserror", - "tinyvec", - "tracing", - "webpki 0.22.0", -] - [[package]] name = "quote" -version = "1.0.27" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" +checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" dependencies = [ "proc-macro2", ] @@ -6669,7 +6125,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", ] [[package]] @@ -6712,38 +6168,13 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "rcgen" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" -dependencies = [ - "pem", - "ring", - "time 0.3.21", - "x509-parser 0.13.2", - "yasna", -] - -[[package]] -name = "rcgen" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" -dependencies = [ - "pem", - "ring", - "time 0.3.21", - "yasna", -] - [[package]] name = "redox_syscall" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -6752,7 +6183,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -6761,20 +6192,21 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.8.1" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" +checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.1", + "regex-automata 0.3.4", + "regex-syntax 0.7.4", ] [[package]] @@ -6786,6 +6218,17 @@ dependencies = [ "regex-syntax 0.6.29", ] +[[package]] +name = "regex-automata" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7b6d6190b7594385f61bd3911cd1be99dfddcfc365a4160cc2ab5bff4aed294" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.7.4", +] + [[package]] name = "regex-syntax" version = "0.6.29" @@ -6794,9 +6237,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.1" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" +checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" [[package]] name = "reqwest" @@ -6804,7 +6247,7 @@ version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "base64 0.21.1", + "base64 0.21.2", "bytes", "encoding_rs", "futures-core", @@ -6822,15 +6265,15 @@ dependencies = [ "native-tls", "once_cell", "percent-encoding", - "pin-project-lite 0.2.9", - "rustls 0.21.1", + "pin-project-lite 0.2.10", + "rustls 0.21.5", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", - "tokio-rustls 0.24.0", + "tokio-rustls 0.24.1", "tokio-util 0.7.8", "tower-service", "url", @@ -6838,8 +6281,8 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots", - "winreg", + "webpki-roots 0.22.6", + "winreg 0.10.1", ] [[package]] @@ -6925,17 +6368,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "rtcp" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1919efd6d4a6a85d13388f9487549bb8e359f17198cc03ffd72f79b553873691" -dependencies = [ - "bytes", - "thiserror", - "webrtc-util", -] - [[package]] name = "rtnetlink" version = "0.10.1" @@ -6951,32 +6383,18 @@ dependencies = [ "tokio", ] -[[package]] -name = "rtp" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2a095411ff00eed7b12e4c6a118ba984d113e1079582570d56a5ee723f11f80" -dependencies = [ - "async-trait", - "bytes", - "rand 0.8.5", - "serde", - "thiserror", - "webrtc-util", -] - [[package]] name = "rusqlite" version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" dependencies = [ - "bitflags", + "bitflags 1.3.2", "fallible-iterator", "fallible-streaming-iterator", - "hashlink 0.8.2", + "hashlink 0.8.3", "libsqlite3-sys", - "smallvec", + "smallvec 1.11.0", ] [[package]] @@ -7012,30 +6430,34 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.17", + "semver 1.0.18", ] [[package]] -name = "rusticata-macros" -version = "4.1.0" +name = "rustix" +version = "0.37.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" dependencies = [ - "nom 7.1.3", + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys", ] [[package]] name = "rustix" -version = "0.37.19" +version = "0.38.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" +checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" dependencies = [ - "bitflags", + "bitflags 2.3.3", "errno", - "io-lifetimes", "libc", - "linux-raw-sys", - "windows-sys 0.48.0", + "linux-raw-sys 0.4.5", + "windows-sys", ] [[package]] @@ -7065,23 +6487,23 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.1" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e" +checksum = "79ea77c539259495ce8ca47f53e66ae0330a8819f67e23ac96ca02f50e7b7d36" dependencies = [ "log", "ring", - "rustls-webpki", + "rustls-webpki 0.101.2", "sct 0.7.0", ] [[package]] name = "rustls-pemfile" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.1", + "base64 0.21.2", ] [[package]] @@ -7094,17 +6516,27 @@ dependencies = [ "untrusted", ] +[[package]] +name = "rustls-webpki" +version = "0.101.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "513722fd73ad80a71f72b61009ea1b584bcfa1483ca93949c8f290298837fa59" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustversion" -version = "1.0.12" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "rw-stream-sink" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" +checksum = "d8c9026ff5d2f23da5e45bbc283f156383001bfb09c4e44256d02c1a685fe9a1" dependencies = [ "futures", "pin-project", @@ -7113,9 +6545,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.13" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "safe_arith" @@ -7133,7 +6565,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecbd2eb639fd7cab5804a0837fe373cc2172d15437e804c054a9fb885cb923b0" dependencies = [ - "cipher 0.3.0", + "cipher", ] [[package]] @@ -7147,21 +6579,21 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.7.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b569c32c806ec3abdf3b5869fb8bf1e0d275a7c1c9b0b05603d9464632649edf" +checksum = "35c0a159d0c45c12b20c5a844feb1fe4bea86e28f17b92a5f0c42193634d3782" dependencies = [ "cfg-if", "derive_more", - "parity-scale-codec 3.5.0", + "parity-scale-codec 3.6.4", "scale-info-derive", ] [[package]] name = "scale-info-derive" -version = "2.6.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53012eae69e5aa5c14671942a5dd47de59d4cdcff8532a6dd0e081faf1119482" +checksum = "912e55f6d20e0e80d63733872b40e1227c0bce1e1ab81ba67d696339bfd7fd29" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -7171,11 +6603,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys 0.42.0", + "windows-sys", ] [[package]] @@ -7195,9 +6627,9 @@ checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] name = "scopeguard" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "scrypt" @@ -7231,18 +6663,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "sdp" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d22a5ef407871893fd72b4562ee15e4742269b173959db4b8df6f538c414e13" -dependencies = [ - "rand 0.8.5", - "substring", - "thiserror", - "url", -] - [[package]] name = "sec1" version = "0.3.0" @@ -7259,12 +6679,12 @@ dependencies = [ [[package]] name = "sec1" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0aec48e813d6b90b15f0b8948af3c63483992dee44c03e9930b3eebdabe046e" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct 0.2.0", - "der 0.7.6", + "der 0.7.7", "generic-array", "pkcs8 0.10.2", "subtle", @@ -7273,11 +6693,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.1" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -7286,9 +6706,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", @@ -7305,9 +6725,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" +checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" dependencies = [ "serde", ] @@ -7334,13 +6754,24 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.163" +version = "1.0.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" +checksum = "0ea67f183f058fe88a4e3ec6e2788e003840893b91bac4559cabedd00863b3ed" dependencies = [ "serde_derive", ] +[[package]] +name = "serde-hex" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca37e3e4d1b39afd7ff11ee4e947efae85adfddf4841787bfa47c470e96dc26d" +dependencies = [ + "array-init", + "serde", + "smallvec 0.6.14", +] + [[package]] name = "serde_array_query" version = "0.1.0" @@ -7363,20 +6794,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.163" +version = "1.0.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" +checksum = "24e744d7782b686ab3b73267ef05697159cc0e5abbed3f47f9933165e5219036" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] name = "serde_json" -version = "1.0.96" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" +checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" dependencies = [ "itoa", "ryu", @@ -7385,9 +6816,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b1b6471d7496b051e03f1958802a73f88b947866f5146f329e47e36554f4e55" +checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" dependencies = [ "itoa", "serde", @@ -7395,13 +6826,22 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.12" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" +checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", +] + +[[package]] +name = "serde_spanned" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +dependencies = [ + "serde", ] [[package]] @@ -7432,7 +6872,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling 0.13.4", + "darling", "proc-macro2", "quote", "syn 1.0.109", @@ -7444,7 +6884,7 @@ version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" dependencies = [ - "indexmap", + "indexmap 1.9.3", "ryu", "serde", "yaml-rust", @@ -7500,9 +6940,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if", "cpufeatures", @@ -7584,7 +7024,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.21", + "time 0.3.24", ] [[package]] @@ -7716,7 +7156,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.21", + "time 0.3.24", ] [[package]] @@ -7761,7 +7201,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.21", + "time 0.3.24", ] [[package]] @@ -7800,9 +7240,18 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.10.0" +version = "0.6.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" +dependencies = [ + "maybe-uninit", +] + +[[package]] +name = "smallvec" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" +checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" [[package]] name = "snap" @@ -7812,18 +7261,18 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774d05a3edae07ce6d68ea6984f3c05e9bba8927e3dd591e3b479e5b03213d0d" +checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733" dependencies = [ - "aes-gcm 0.9.4", + "aes-gcm", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-rc.2", + "curve25519-dalek 4.0.0-rc.1", "rand_core 0.6.4", "ring", "rustc_version 0.4.0", - "sha2 0.10.6", + "sha2 0.10.7", "subtle", ] @@ -7844,7 +7293,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -7855,7 +7304,6 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "flate2", "futures", "httparse", "log", @@ -7886,7 +7334,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ "base64ct", - "der 0.7.6", + "der 0.7.7", ] [[package]] @@ -7916,9 +7364,9 @@ dependencies = [ [[package]] name = "ssz_types" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e43767964a80b2fdeda7a79a57a2b6cbca966688d5b81da8fe91140a94f552a1" +checksum = "382939886cb24ee8ac885d09116a60f6262d827c7a9e36012b4f6d3d0116d0b3" dependencies = [ "arbitrary", "derivative", @@ -7927,7 +7375,7 @@ dependencies = [ "itertools", "serde", "serde_derive", - "smallvec", + "smallvec 1.11.0", "tree_hash", "typenum", ] @@ -7952,7 +7400,7 @@ dependencies = [ "merkle_proof", "rayon", "safe_arith", - "smallvec", + "smallvec 1.11.0", "ssz_types", "tokio", "tree_hash", @@ -8004,9 +7452,9 @@ dependencies = [ [[package]] name = "stringprep" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" +checksum = "db3737bde7edce97102e0e2b15365bf7a20bfdb5f60f4f9e8d7004258a51a8da" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -8046,34 +7494,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "stun" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7e94b1ec00bad60e6410e058b52f1c66de3dc5fe4d62d09b3e52bb7d3b73e25" -dependencies = [ - "base64 0.13.1", - "crc", - "lazy_static", - "md-5", - "rand 0.8.5", - "ring", - "subtle", - "thiserror", - "tokio", - "url", - "webrtc-util", -] - -[[package]] -name = "substring" -version = "1.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ee6433ecef213b2e72f587ef64a2f5943e7cd16fbd82dbe8bc07486c534c86" -dependencies = [ - "autocfg 1.1.0", -] - [[package]] name = "subtle" version = "2.4.1" @@ -8086,11 +7506,11 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a99807a055ff4ff5d249bb84c80d9eabb55ca3c452187daae43fd5b51ef695" dependencies = [ - "darling 0.13.4", + "darling", "itertools", "proc-macro2", "quote", - "smallvec", + "smallvec 1.11.0", "syn 1.0.109", ] @@ -8100,11 +7520,11 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75b9e5728aa1a87141cefd4e7509903fc01fa0dcb108022b1e841a67c5159fc5" dependencies = [ - "darling 0.13.4", + "darling", "itertools", "proc-macro2", "quote", - "smallvec", + "smallvec 1.11.0", "syn 1.0.109", ] @@ -8130,9 +7550,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.16" +version = "2.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6f671d4b5ffdb8eadec19c0ae67fe2639df8684bd7bc4b83d986b8db549cf01" +checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" dependencies = [ "proc-macro2", "quote", @@ -8178,7 +7598,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "system-configuration-sys", ] @@ -8246,15 +7666,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.5.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" +checksum = "5486094ee78b2e5038a6382ed7645bc084dc2ec433426ca4c3cb61e2007b8998" dependencies = [ "cfg-if", - "fastrand", + "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix", - "windows-sys 0.45.0", + "rustix 0.38.4", + "windows-sys", ] [[package]] @@ -8307,7 +7727,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", - "sha2 0.10.6", + "sha2 0.10.7", ] [[package]] @@ -8321,22 +7741,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.40" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" +checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.40" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] @@ -8371,10 +7791,11 @@ dependencies = [ [[package]] name = "time" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" +checksum = "b79eabcd964882a646b3584543ccabeae7869e9ac32a46f6f22b7a5bd405308b" dependencies = [ + "deranged", "itoa", "libc", "num_threads", @@ -8391,9 +7812,9 @@ checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" +checksum = "eb71511c991639bb078fd5bf97757e03914361c48100d52878b8e52b46fb92cd" dependencies = [ "time-core", ] @@ -8464,21 +7885,22 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.1" +version = "1.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" +checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" dependencies = [ "autocfg 1.1.0", + "backtrace", "bytes", "libc", "mio", "num_cpus", "parking_lot 0.12.1", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "signal-hook-registry", "socket2 0.4.9", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -8487,7 +7909,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "tokio", ] @@ -8499,7 +7921,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] @@ -8528,7 +7950,7 @@ dependencies = [ "parking_lot 0.12.1", "percent-encoding", "phf", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "postgres-protocol", "postgres-types", "socket2 0.5.3", @@ -8560,11 +7982,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0d409377ff5b1e3ca6437aa86c1eb7d40c134bfec254e44c830defa92669db5" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.1", + "rustls 0.21.5", "tokio", ] @@ -8575,7 +7997,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "tokio", "tokio-util 0.7.8", ] @@ -8606,7 +8028,7 @@ dependencies = [ "tokio-rustls 0.23.4", "tungstenite 0.17.3", "webpki 0.22.0", - "webpki-roots", + "webpki-roots 0.22.6", ] [[package]] @@ -8620,7 +8042,7 @@ dependencies = [ "futures-io", "futures-sink", "log", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "slab", "tokio", ] @@ -8633,9 +8055,8 @@ checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", - "futures-io", "futures-sink", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "slab", "tokio", "tracing", @@ -8650,6 +8071,40 @@ dependencies = [ "serde", ] +[[package]] +name = "toml" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" +dependencies = [ + "indexmap 2.0.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "tower" version = "0.4.13" @@ -8659,7 +8114,7 @@ dependencies = [ "futures-core", "futures-util", "pin-project", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "tokio", "tower-layer", "tower-service", @@ -8686,20 +8141,20 @@ checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" +checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] @@ -8744,7 +8199,7 @@ dependencies = [ "once_cell", "regex", "sharded-slab", - "smallvec", + "smallvec 1.11.0", "thread_local", "tracing", "tracing-core", @@ -8772,22 +8227,22 @@ dependencies = [ [[package]] name = "tree_hash" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8488e272d45adc36db8f6c99d09613f58a7cd06c7b347546c87d9a29ca11e8" +checksum = "5c998ac5fe2b07c025444bdd522e6258110b63861c6698eedc610c071980238d" dependencies = [ "ethereum-types 0.14.1", "ethereum_hashing", - "smallvec", + "smallvec 1.11.0", ] [[package]] name = "tree_hash_derive" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83baa26594d96889e5fef7638dfb0f41e16070301a5cf6da99b9a6a0804cec89" +checksum = "84303a9c7cda5f085a3ed9cd241d1e95e04d88aab1d679b02f212e653537ba86" dependencies = [ - "darling 0.13.4", + "darling", "quote", "syn 1.0.109", ] @@ -8819,7 +8274,7 @@ dependencies = [ "ipnet", "lazy_static", "rand 0.8.5", - "smallvec", + "smallvec 1.11.0", "socket2 0.4.9", "thiserror", "tinyvec", @@ -8841,7 +8296,7 @@ dependencies = [ "lru-cache", "parking_lot 0.12.1", "resolv-conf", - "smallvec", + "smallvec 1.11.0", "thiserror", "tokio", "tracing", @@ -8894,25 +8349,6 @@ dependencies = [ "webpki 0.22.0", ] -[[package]] -name = "turn" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4712ee30d123ec7ae26d1e1b218395a16c87cdbaf4b3925d170d684af62ea5e8" -dependencies = [ - "async-trait", - "base64 0.13.1", - "futures", - "log", - "md-5", - "rand 0.8.5", - "ring", - "stun", - "thiserror", - "tokio", - "webrtc-util", -] - [[package]] name = "twoway" version = "0.1.8" @@ -8967,7 +8403,7 @@ dependencies = [ "serde_with", "serde_yaml", "slog", - "smallvec", + "smallvec 1.11.0", "ssz_types", "state_processing", "strum", @@ -9016,9 +8452,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" [[package]] name = "unicode-normalization" @@ -9051,16 +8487,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "universal-hash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" -dependencies = [ - "crypto-common", - "subtle", -] - [[package]] name = "unsigned-varint" version = "0.6.0" @@ -9098,12 +8524,12 @@ dependencies = [ [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", - "idna 0.3.0", + "idna 0.4.0", "percent-encoding", ] @@ -9119,19 +8545,10 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "serde", ] -[[package]] -name = "uuid" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" -dependencies = [ - "getrandom 0.2.9", -] - [[package]] name = "validator_client" version = "0.3.5" @@ -9235,15 +8652,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" -[[package]] -name = "waitgroup" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1f50000a783467e6c0200f9d10642f4bc424e39efc1b770203e88b488f79292" -dependencies = [ - "atomic-waker", -] - [[package]] name = "waker-fn" version = "1.1.0" @@ -9262,11 +8670,10 @@ dependencies = [ [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -9338,9 +8745,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -9348,24 +8755,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.36" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", @@ -9375,9 +8782,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9385,22 +8792,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-streams" @@ -9465,9 +8872,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.63" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -9530,211 +8937,12 @@ dependencies = [ ] [[package]] -name = "webrtc" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3bc9049bdb2cea52f5fd4f6f728184225bdb867ed0dc2410eab6df5bdd67bb" -dependencies = [ - "arc-swap", - "async-trait", - "bytes", - "hex", - "interceptor", - "lazy_static", - "log", - "rand 0.8.5", - "rcgen 0.9.3", - "regex", - "ring", - "rtcp", - "rtp", - "rustls 0.19.1", - "sdp", - "serde", - "serde_json", - "sha2 0.10.6", - "stun", - "thiserror", - "time 0.3.21", - "tokio", - "turn", - "url", - "waitgroup", - "webrtc-data", - "webrtc-dtls", - "webrtc-ice", - "webrtc-mdns", - "webrtc-media", - "webrtc-sctp", - "webrtc-srtp", - "webrtc-util", -] - -[[package]] -name = "webrtc-data" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef36a4d12baa6e842582fe9ec16a57184ba35e1a09308307b67d43ec8883100" -dependencies = [ - "bytes", - "derive_builder", - "log", - "thiserror", - "tokio", - "webrtc-sctp", - "webrtc-util", -] - -[[package]] -name = "webrtc-dtls" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a00f4242f2db33307347bd5be53263c52a0331c96c14292118c9a6bb48d267" -dependencies = [ - "aes 0.6.0", - "aes-gcm 0.10.2", - "async-trait", - "bincode", - "block-modes", - "byteorder", - "ccm", - "curve25519-dalek 3.2.0", - "der-parser 8.2.0", - "elliptic-curve 0.12.3", - "hkdf", - "hmac 0.12.1", - "log", - "p256", - "p384", - "rand 0.8.5", - "rand_core 0.6.4", - "rcgen 0.10.0", - "ring", - "rustls 0.19.1", - "sec1 0.3.0", - "serde", - "sha1", - "sha2 0.10.6", - "signature 1.6.4", - "subtle", - "thiserror", - "tokio", - "webpki 0.21.4", - "webrtc-util", - "x25519-dalek 2.0.0-rc.2", - "x509-parser 0.13.2", -] - -[[package]] -name = "webrtc-ice" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "465a03cc11e9a7d7b4f9f99870558fe37a102b65b93f8045392fef7c67b39e80" -dependencies = [ - "arc-swap", - "async-trait", - "crc", - "log", - "rand 0.8.5", - "serde", - "serde_json", - "stun", - "thiserror", - "tokio", - "turn", - "url", - "uuid 1.3.3", - "waitgroup", - "webrtc-mdns", - "webrtc-util", -] - -[[package]] -name = "webrtc-mdns" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f08dfd7a6e3987e255c4dbe710dde5d94d0f0574f8a21afa95d171376c143106" -dependencies = [ - "log", - "socket2 0.4.9", - "thiserror", - "tokio", - "webrtc-util", -] - -[[package]] -name = "webrtc-media" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f72e1650a8ae006017d1a5280efb49e2610c19ccc3c0905b03b648aee9554991" -dependencies = [ - "byteorder", - "bytes", - "rand 0.8.5", - "rtp", - "thiserror", -] - -[[package]] -name = "webrtc-sctp" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d47adcd9427eb3ede33d5a7f3424038f63c965491beafcc20bc650a2f6679c0" -dependencies = [ - "arc-swap", - "async-trait", - "bytes", - "crc", - "log", - "rand 0.8.5", - "thiserror", - "tokio", - "webrtc-util", -] - -[[package]] -name = "webrtc-srtp" -version = "0.9.1" +name = "webpki-roots" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6183edc4c1c6c0175f8812eefdce84dfa0aea9c3ece71c2bf6ddd3c964de3da5" +checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "aes-gcm 0.9.4", - "async-trait", - "byteorder", - "bytes", - "ctr 0.8.0", - "hmac 0.11.0", - "log", - "rtcp", - "rtp", - "sha-1 0.9.8", - "subtle", - "thiserror", - "tokio", - "webrtc-util", -] - -[[package]] -name = "webrtc-util" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f1db1727772c05cf7a2cfece52c3aca8045ca1e176cd517d323489aa3c6d87" -dependencies = [ - "async-trait", - "bitflags", - "bytes", - "cc", - "ipnet", - "lazy_static", - "libc", - "log", - "nix 0.24.3", - "rand 0.8.5", - "thiserror", - "tokio", - "winapi", + "rustls-webpki 0.100.1", ] [[package]] @@ -9756,9 +8964,9 @@ checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" [[package]] name = "widestring" -version = "0.5.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" +checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" [[package]] name = "wildmatch" @@ -9816,7 +9024,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets 0.48.0", + "windows-targets", ] [[package]] @@ -9831,75 +9039,30 @@ dependencies = [ "winapi", ] -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - -[[package]] -name = "windows-sys" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" -dependencies = [ - "windows-targets 0.42.2", -] - [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", -] - -[[package]] -name = "windows-targets" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows-targets", ] [[package]] name = "windows-targets" -version = "0.48.0" +version = "0.48.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" dependencies = [ - "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_gnullvm", "windows_aarch64_msvc 0.48.0", "windows_i686_gnu 0.48.0", "windows_i686_msvc 0.48.0", "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_gnullvm", "windows_x86_64_msvc 0.48.0", ] -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - [[package]] name = "windows_aarch64_gnullvm" version = "0.48.0" @@ -9912,12 +9075,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" - [[package]] name = "windows_aarch64_msvc" version = "0.48.0" @@ -9930,12 +9087,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" - [[package]] name = "windows_i686_gnu" version = "0.48.0" @@ -9948,12 +9099,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" -[[package]] -name = "windows_i686_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" - [[package]] name = "windows_i686_msvc" version = "0.48.0" @@ -9966,24 +9111,12 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" - [[package]] name = "windows_x86_64_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - [[package]] name = "windows_x86_64_gnullvm" version = "0.48.0" @@ -9998,15 +9131,18 @@ checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" [[package]] name = "windows_x86_64_msvc" -version = "0.42.2" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] -name = "windows_x86_64_msvc" -version = "0.48.0" +name = "winnow" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "f46aab759304e4d7b2075a9aecba26228bb073ee8c50db796b2c72c676b5d807" +dependencies = [ + "memchr", +] [[package]] name = "winreg" @@ -10017,6 +9153,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys", +] + [[package]] name = "ws_stream_wasm" version = "0.7.4" @@ -10062,60 +9208,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "x25519-dalek" -version = "2.0.0-rc.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fabd6e16dd08033932fc3265ad4510cc2eab24656058a6dcb107ffe274abcc95" -dependencies = [ - "curve25519-dalek 4.0.0-rc.2", - "rand_core 0.6.4", - "serde", - "zeroize", -] - -[[package]] -name = "x509-parser" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb9bace5b5589ffead1afb76e43e34cff39cd0f3ce7e170ae0c29e53b88eb1c" -dependencies = [ - "asn1-rs 0.3.1", - "base64 0.13.1", - "data-encoding", - "der-parser 7.0.0", - "lazy_static", - "nom 7.1.3", - "oid-registry 0.4.0", - "ring", - "rusticata-macros", - "thiserror", - "time 0.3.21", -] - -[[package]] -name = "x509-parser" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" -dependencies = [ - "asn1-rs 0.5.2", - "base64 0.13.1", - "data-encoding", - "der-parser 8.2.0", - "lazy_static", - "nom 7.1.3", - "oid-registry 0.6.1", - "rusticata-macros", - "thiserror", - "time 0.3.21", -] - [[package]] name = "xml-rs" -version = "0.8.11" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1690519550bfa95525229b9ca2350c63043a4857b3b0013811b2ccf4a2420b01" +checksum = "47430998a7b5d499ccee752b41567bc3afc57e1327dc855b1a2aa44ce29b5fa1" [[package]] name = "xmltree" @@ -10149,15 +9246,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "yasna" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" -dependencies = [ - "time 0.3.21", -] - [[package]] name = "zeroize" version = "1.6.0" @@ -10175,7 +9263,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.28", ] [[package]] diff --git a/Cross.toml b/Cross.toml index 9c3e441cba5..d5f7a5d5068 100644 --- a/Cross.toml +++ b/Cross.toml @@ -1,5 +1,5 @@ [target.x86_64-unknown-linux-gnu] -dockerfile = './scripts/cross/Dockerfile' +pre-build = ["apt-get install -y cmake clang-3.9"] [target.aarch64-unknown-linux-gnu] -dockerfile = './scripts/cross/Dockerfile' +pre-build = ["apt-get install -y cmake clang-3.9"] diff --git a/Dockerfile b/Dockerfile index be01ad7c572..f07c42dd85e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ FROM rust:1.68.2-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES ARG PROFILE=release @@ -13,4 +13,4 @@ RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-reco ca-certificates \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse +COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse \ No newline at end of file diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index 9880a8ca617..694402a3d78 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -9,12 +9,9 @@ use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, - libp2p::{ - core::connection::ConnectionId, - swarm::{ - behaviour::{ConnectionEstablished, FromSwarm}, - NetworkBehaviour, - }, + libp2p::swarm::{ + behaviour::{ConnectionEstablished, FromSwarm}, + ConnectionId, NetworkBehaviour, }, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, @@ -170,7 +167,7 @@ pub async fn create_api_server_on_port( local_addr: EXTERNAL_ADDR.parse().unwrap(), send_back_addr: EXTERNAL_ADDR.parse().unwrap(), }; - let connection_id = ConnectionId::new(1); + let connection_id = ConnectionId::new_unchecked(1); pm.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, connection_id, diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index 9b156942112..785206b757b 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -1,6 +1,6 @@ use crate::Context; use beacon_chain::BeaconChainTypes; -use lighthouse_metrics::{Encoder, TextEncoder}; +use lighthouse_metrics::TextEncoder; use lighthouse_network::prometheus_client::encoding::text::encode; use malloc_utils::scrape_allocator_metrics; @@ -9,7 +9,7 @@ pub use lighthouse_metrics::*; pub fn gather_prometheus_metrics( ctx: &Context, ) -> std::result::Result { - let mut buffer = vec![]; + let mut buffer = String::new(); let encoder = TextEncoder::new(); // There are two categories of metrics: @@ -50,7 +50,7 @@ pub fn gather_prometheus_metrics( } encoder - .encode(&lighthouse_metrics::gather(), &mut buffer) + .encode_utf8(&lighthouse_metrics::gather(), &mut buffer) .unwrap(); // encode gossipsub metrics also if they exist if let Some(registry) = ctx.gossipsub_registry.as_ref() { @@ -59,5 +59,5 @@ pub fn gather_prometheus_metrics( } } - String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e)) + Ok(buffer) } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 6d056d83505..f71845fed25 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2021" [dependencies] -discv5 = { version = "0.3.0", features = ["libp2p"]} +discv5 = { version = "0.3.1", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } ssz_types = "0.5.3" @@ -40,15 +40,15 @@ directory = { path = "../../common/directory" } regex = "1.5.5" strum = { version = "0.24.0", features = ["derive"] } superstruct = "0.5.0" -prometheus-client = "0.18.0" +prometheus-client = "0.21.0" unused_port = { path = "../../common/unused_port" } delay_map = "0.3.0" void = "1" [dependencies.libp2p] -version = "0.50.0" +version = "0.52" default-features = false -features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"] +features = ["websocket", "identify", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"] [dev-dependencies] slog-term = "2.6.0" diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 94675264588..6c8f20a24b9 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -6,10 +6,7 @@ use directory::{ DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR, }; use discv5::{Discv5Config, Discv5ConfigBuilder}; -use libp2p::gossipsub::{ - FastMessageId, GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId, - RawGossipsubMessage, ValidationMode, -}; +use libp2p::gossipsub; use libp2p::Multiaddr; use serde_derive::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; @@ -83,7 +80,7 @@ pub struct Config { /// Gossipsub configuration parameters. #[serde(skip)] - pub gs_config: GossipsubConfig, + pub gs_config: gossipsub::Config, /// Discv5 configuration parameters. #[serde(skip)] @@ -265,7 +262,7 @@ impl Default for Config { // Note: Using the default config here. Use `gossipsub_config` function for getting // Lighthouse specific configuration for gossipsub. - let gs_config = GossipsubConfigBuilder::default() + let gs_config = gossipsub::ConfigBuilder::default() .build() .expect("valid gossipsub configuration"); @@ -416,16 +413,16 @@ impl From for NetworkLoad { } /// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork. -pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> GossipsubConfig { +pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> gossipsub::Config { // The function used to generate a gossipsub message id // We use the first 8 bytes of SHA256(topic, data) for content addressing - let fast_gossip_message_id = |message: &RawGossipsubMessage| { + let fast_gossip_message_id = |message: &gossipsub::RawMessage| { let data = [message.topic.as_str().as_bytes(), &message.data].concat(); - FastMessageId::from(&Sha256::digest(data)[..8]) + gossipsub::FastMessageId::from(&Sha256::digest(data)[..8]) }; fn prefix( prefix: [u8; 4], - message: &GossipsubMessage, + message: &gossipsub::Message, fork_context: Arc, ) -> Vec { let topic_bytes = message.topic.as_str().as_bytes(); @@ -451,8 +448,8 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos } let is_merge_enabled = fork_context.fork_exists(ForkName::Merge); - let gossip_message_id = move |message: &GossipsubMessage| { - MessageId::from( + let gossip_message_id = move |message: &gossipsub::Message| { + gossipsub::MessageId::from( &Sha256::digest( prefix(MESSAGE_DOMAIN_VALID_SNAPPY, message, fork_context.clone()).as_slice(), )[..20], @@ -461,7 +458,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos let load = NetworkLoad::from(network_load); - GossipsubConfigBuilder::default() + gossipsub::ConfigBuilder::default() .max_transmit_size(gossip_max_size(is_merge_enabled)) .heartbeat_interval(load.heartbeat_interval) .mesh_n(load.mesh_n) @@ -474,7 +471,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos .max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large .history_gossip(load.history_gossip) .validate_messages() // require validation before propagation - .validation_mode(ValidationMode::Anonymous) + .validation_mode(gossipsub::ValidationMode::Anonymous) .duplicate_cache_time(DUPLICATE_CACHE_TIME) .message_id_fn(gossip_message_id) .fast_message_id_fn(fast_gossip_message_id) diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index f85c4b3e5cb..ef22f816a77 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -7,7 +7,7 @@ use super::ENR_FILENAME; use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use crate::NetworkConfig; use discv5::enr::EnrKey; -use libp2p::core::identity::Keypair; +use libp2p::identity::Keypair; use slog::{debug, warn}; use ssz::{Decode, Encode}; use ssz_types::BitVector; @@ -133,7 +133,7 @@ pub fn build_or_load_enr( // Build the local ENR. // Note: Discovery should update the ENR record's IP to the external IP as seen by the // majority of our peers, if the CLI doesn't expressly forbid it. - let enr_key = CombinedKey::from_libp2p(&local_key)?; + let enr_key = CombinedKey::from_libp2p(local_key)?; let mut local_enr = build_enr::(&enr_key, config, enr_fork_id)?; use_or_load_enr(&enr_key, &mut local_enr, config, log)?; diff --git a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs index 5ce0c55cacd..753da6292ca 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs @@ -1,10 +1,9 @@ //! ENR extension trait to support libp2p integration. + use crate::{Enr, Multiaddr, PeerId}; use discv5::enr::{CombinedKey, CombinedPublicKey}; -use libp2p::{ - core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol}, - identity::secp256k1, -}; +use libp2p::core::multiaddr::Protocol; +use libp2p::identity::{ed25519, secp256k1, KeyType, Keypair, PublicKey}; use tiny_keccak::{Hasher, Keccak}; /// Extend ENR for libp2p types. @@ -38,7 +37,8 @@ pub trait CombinedKeyPublicExt { /// Extend ENR CombinedKey for conversion to libp2p keys. pub trait CombinedKeyExt { /// Converts a libp2p key into an ENR combined key. - fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result; + fn from_libp2p(key: Keypair) -> Result; + /// Converts a [`secp256k1::Keypair`] into and Enr [`CombinedKey`]. fn from_secp256k1(key: &secp256k1::Keypair) -> CombinedKey; } @@ -93,14 +93,14 @@ impl EnrExt for Enr { if let Some(udp) = self.udp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Udp(udp)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } if let Some(tcp) = self.tcp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Tcp(tcp)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -108,14 +108,14 @@ impl EnrExt for Enr { if let Some(udp6) = self.udp6() { let mut multiaddr: Multiaddr = ip6.into(); multiaddr.push(Protocol::Udp(udp6)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } if let Some(tcp6) = self.tcp6() { let mut multiaddr: Multiaddr = ip6.into(); multiaddr.push(Protocol::Tcp(tcp6)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -133,7 +133,7 @@ impl EnrExt for Enr { if let Some(tcp) = self.tcp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Tcp(tcp)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -141,7 +141,7 @@ impl EnrExt for Enr { if let Some(tcp6) = self.tcp6() { let mut multiaddr: Multiaddr = ip6.into(); multiaddr.push(Protocol::Tcp(tcp6)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -159,7 +159,7 @@ impl EnrExt for Enr { if let Some(udp) = self.udp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Udp(udp)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -167,7 +167,7 @@ impl EnrExt for Enr { if let Some(udp6) = self.udp6() { let mut multiaddr: Multiaddr = ip6.into(); multiaddr.push(Protocol::Udp(udp6)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -204,18 +204,16 @@ impl CombinedKeyPublicExt for CombinedPublicKey { match self { Self::Secp256k1(pk) => { let pk_bytes = pk.to_sec1_bytes(); - let libp2p_pk = libp2p::core::PublicKey::Secp256k1( - libp2p::core::identity::secp256k1::PublicKey::decode(&pk_bytes) - .expect("valid public key"), - ); + let libp2p_pk: PublicKey = secp256k1::PublicKey::try_from_bytes(&pk_bytes) + .expect("valid public key") + .into(); PeerId::from_public_key(&libp2p_pk) } Self::Ed25519(pk) => { let pk_bytes = pk.to_bytes(); - let libp2p_pk = libp2p::core::PublicKey::Ed25519( - libp2p::core::identity::ed25519::PublicKey::decode(&pk_bytes) - .expect("valid public key"), - ); + let libp2p_pk: PublicKey = ed25519::PublicKey::try_from_bytes(&pk_bytes) + .expect("valid public key") + .into(); PeerId::from_public_key(&libp2p_pk) } } @@ -223,18 +221,25 @@ impl CombinedKeyPublicExt for CombinedPublicKey { } impl CombinedKeyExt for CombinedKey { - fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result { - match key { - Keypair::Secp256k1(key) => Ok(CombinedKey::from_secp256k1(key)), - Keypair::Ed25519(key) => { + fn from_libp2p(key: Keypair) -> Result { + match key.key_type() { + KeyType::Secp256k1 => { + let key = key.try_into_secp256k1().expect("right key type"); + let secret = + discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes()) + .expect("libp2p key must be valid"); + Ok(CombinedKey::Secp256k1(secret)) + } + KeyType::Ed25519 => { + let key = key.try_into_ed25519().expect("right key type"); let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes( - &(key.encode()[..32]) + &(key.to_bytes()[..32]) .try_into() .expect("libp2p key must be valid"), ); Ok(CombinedKey::from(ed_keypair)) } - Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"), + _ => Err("Unsupported keypair kind"), } } fn from_secp256k1(key: &secp256k1::Keypair) -> Self { @@ -251,37 +256,46 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result { - let uncompressed_key_bytes = &pk.encode_uncompressed()[1..]; + })?; + + match public_key.key_type() { + KeyType::Secp256k1 => { + let pk = public_key + .clone() + .try_into_secp256k1() + .expect("right key type"); + let uncompressed_key_bytes = &pk.to_bytes_uncompressed()[1..]; let mut output = [0_u8; 32]; let mut hasher = Keccak::v256(); hasher.update(uncompressed_key_bytes); hasher.finalize(&mut output); Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length")) } - PublicKey::Ed25519(pk) => { - let uncompressed_key_bytes = pk.encode(); + KeyType::Ed25519 => { + let pk = public_key + .clone() + .try_into_ed25519() + .expect("right key type"); + let uncompressed_key_bytes = pk.to_bytes(); let mut output = [0_u8; 32]; let mut hasher = Keccak::v256(); hasher.update(&uncompressed_key_bytes); hasher.finalize(&mut output); Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length")) } - PublicKey::Ecdsa(_) => Err(format!( - "Unsupported public key (Ecdsa) from peer {}", - peer_id - )), + + _ => Err(format!("Unsupported public key from peer {}", peer_id)), } } #[cfg(test)] mod tests { + use super::*; #[test] @@ -290,9 +304,9 @@ mod tests { let sk_bytes = hex::decode(sk_hex).unwrap(); let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_slice(&sk_bytes).unwrap(); - let libp2p_sk = libp2p::identity::secp256k1::SecretKey::from_bytes(sk_bytes).unwrap(); - let secp256k1_kp: libp2p::identity::secp256k1::Keypair = libp2p_sk.into(); - let libp2p_kp = Keypair::Secp256k1(secp256k1_kp); + let libp2p_sk = secp256k1::SecretKey::try_from_bytes(sk_bytes).unwrap(); + let secp256k1_kp: secp256k1::Keypair = libp2p_sk.into(); + let libp2p_kp: Keypair = secp256k1_kp.into(); let peer_id = libp2p_kp.public().to_peer_id(); let enr = discv5::enr::EnrBuilder::new("v4") @@ -311,9 +325,9 @@ mod tests { &sk_bytes.clone().try_into().unwrap(), ); - let libp2p_sk = libp2p::identity::ed25519::SecretKey::from_bytes(sk_bytes).unwrap(); - let secp256k1_kp: libp2p::identity::ed25519::Keypair = libp2p_sk.into(); - let libp2p_kp = Keypair::Ed25519(secp256k1_kp); + let libp2p_sk = ed25519::SecretKey::try_from_bytes(sk_bytes).unwrap(); + let secp256k1_kp: ed25519::Keypair = libp2p_sk.into(); + let libp2p_kp: Keypair = secp256k1_kp.into(); let peer_id = libp2p_kp.public().to_peer_id(); let enr = discv5::enr::EnrBuilder::new("v4") diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index d4d0baef6b7..0f8ddc53c1b 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -16,19 +16,20 @@ pub use enr::{ Eth2Enr, }; pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt}; -pub use libp2p::core::identity::{Keypair, PublicKey}; +pub use libp2p::identity::{Keypair, PublicKey}; use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; use futures::prelude::*; use futures::stream::FuturesUnordered; use libp2p::multiaddr::Protocol; use libp2p::swarm::behaviour::{DialFailure, FromSwarm}; -use libp2p::swarm::AddressScore; +use libp2p::swarm::THandlerInEvent; pub use libp2p::{ - core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}, + core::{ConnectedPoint, Multiaddr}, + identity::PeerId, swarm::{ - dummy::ConnectionHandler, DialError, NetworkBehaviour, NetworkBehaviourAction as NBAction, - NotifyHandler, PollParameters, SubstreamProtocol, + dummy::ConnectionHandler, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, + PollParameters, SubstreamProtocol, ToSwarm, }, }; use lru::LruCache; @@ -191,7 +192,7 @@ pub struct Discovery { impl Discovery { /// NOTE: Creating discovery requires running within a tokio execution environment. pub async fn new( - local_key: &Keypair, + local_key: Keypair, config: &NetworkConfig, network_globals: Arc>, log: &slog::Logger, @@ -925,22 +926,51 @@ impl Discovery { impl NetworkBehaviour for Discovery { // Discovery is not a real NetworkBehaviour... type ConnectionHandler = ConnectionHandler; - type OutEvent = DiscoveredPeers; + type ToSwarm = DiscoveredPeers; - fn new_handler(&mut self) -> Self::ConnectionHandler { - ConnectionHandler + fn handle_established_inbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _local_addr: &Multiaddr, + _remote_addr: &Multiaddr, + ) -> Result, libp2p::swarm::ConnectionDenied> { + // TODO: we might want to check discovery's banned ips here in the future. + Ok(ConnectionHandler) + } + + fn handle_established_outbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _addr: &Multiaddr, + _role_override: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + Ok(ConnectionHandler) + } + + fn on_connection_handler_event( + &mut self, + _peer_id: PeerId, + _connection_id: ConnectionId, + _event: void::Void, + ) { } - // Handles the libp2p request to obtain multiaddrs for peer_id's in order to dial them. - fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - if let Some(enr) = self.enr_of_peer(peer_id) { + fn handle_pending_outbound_connection( + &mut self, + _connection_id: ConnectionId, + maybe_peer: Option, + _addresses: &[Multiaddr], + _effective_role: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + if let Some(enr) = maybe_peer.and_then(|peer_id| self.enr_of_peer(&peer_id)) { // ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP // port is removed, which is assumed to be associated with the discv5 protocol (and // therefore irrelevant for other libp2p components). - enr.multiaddr_tcp() + Ok(enr.multiaddr_tcp()) } else { - // PeerId is not known - Vec::new() + Ok(vec![]) } } @@ -949,7 +979,7 @@ impl NetworkBehaviour for Discovery { &mut self, cx: &mut Context, _: &mut impl PollParameters, - ) -> Poll> { + ) -> Poll>> { if !self.started { return Poll::Pending; } @@ -960,7 +990,7 @@ impl NetworkBehaviour for Discovery { // Drive the queries and return any results from completed queries if let Some(peers) = self.poll_queries(cx) { // return the result to the peer manager - return Poll::Ready(NBAction::GenerateEvent(DiscoveredPeers { peers })); + return Poll::Ready(ToSwarm::GenerateEvent(DiscoveredPeers { peers })); } // Process the server event stream @@ -1034,10 +1064,7 @@ impl NetworkBehaviour for Discovery { if let Some(address) = addr { // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling // should handle this. - return Poll::Ready(NBAction::ReportObservedAddr { - address, - score: AddressScore::Finite(1), - }); + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(address)); } } Discv5Event::EnrAdded { .. } @@ -1065,8 +1092,9 @@ impl NetworkBehaviour for Discovery { | FromSwarm::ExpiredListenAddr(_) | FromSwarm::ListenerError(_) | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => { + | FromSwarm::NewExternalAddrCandidate(_) + | FromSwarm::ExternalAddrExpired(_) + | FromSwarm::ExternalAddrConfirmed(_) => { // Ignore events not relevant to discovery } } @@ -1077,10 +1105,8 @@ impl Discovery { fn on_dial_failure(&mut self, peer_id: Option, error: &DialError) { if let Some(peer_id) = peer_id { match error { - DialError::Banned - | DialError::LocalPeerId - | DialError::InvalidPeerId(_) - | DialError::ConnectionIo(_) + DialError::LocalPeerId { .. } + | DialError::Denied { .. } | DialError::NoAddresses | DialError::Transport(_) | DialError::WrongPeerId { .. } => { @@ -1088,9 +1114,7 @@ impl Discovery { debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id); self.disconnect_peer(&peer_id); } - DialError::ConnectionLimit(_) - | DialError::DialPeerConditionFalse(_) - | DialError::Aborted => {} + DialError::DialPeerConditionFalse(_) | DialError::Aborted => {} } } } @@ -1139,8 +1163,8 @@ mod tests { false, &log, ); - let keypair = Keypair::Secp256k1(keypair); - Discovery::new(&keypair, &config, Arc::new(globals), &log) + let keypair = keypair.into(); + Discovery::new(keypair, &config, Arc::new(globals), &log) .await .unwrap() } diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index c6c737caed0..4f3454f4033 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -21,7 +21,8 @@ use std::{ use strum::IntoEnumIterator; use types::{EthSpec, SyncSubnetId}; -pub use libp2p::core::{identity::Keypair, Multiaddr}; +pub use libp2p::core::Multiaddr; +pub use libp2p::identity::Keypair; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod peerdb; diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 24de83a61da..ce374bb9ab4 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -1,12 +1,14 @@ +//! Implementation of [`NetworkBehaviour`] for the [`PeerManager`]. + use std::task::{Context, Poll}; use futures::StreamExt; use libp2p::core::ConnectedPoint; +use libp2p::identity::PeerId; use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; use libp2p::swarm::dummy::ConnectionHandler; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use libp2p::PeerId; +use libp2p::swarm::{ConnectionId, NetworkBehaviour, PollParameters, ToSwarm}; use slog::{debug, error}; use types::EthSpec; @@ -19,20 +21,24 @@ use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource}; impl NetworkBehaviour for PeerManager { type ConnectionHandler = ConnectionHandler; - - type OutEvent = PeerManagerEvent; + type ToSwarm = PeerManagerEvent; /* Required trait members */ - fn new_handler(&mut self) -> Self::ConnectionHandler { - ConnectionHandler + fn on_connection_handler_event( + &mut self, + _peer_id: PeerId, + _connection_id: ConnectionId, + _event: libp2p::swarm::THandlerOutEvent, + ) { + // no events from the dummy handler } fn poll( &mut self, cx: &mut Context<'_>, _params: &mut impl PollParameters, - ) -> Poll> { + ) -> Poll> { // perform the heartbeat when necessary while self.heartbeat.poll_tick(cx).is_ready() { self.heartbeat(); @@ -84,19 +90,17 @@ impl NetworkBehaviour for PeerManager { } if !self.events.is_empty() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); + return Poll::Ready(ToSwarm::GenerateEvent(self.events.remove(0))); } else { self.events.shrink_to_fit(); } if let Some((peer_id, maybe_enr)) = self.peers_to_dial.pop_first() { self.inject_peer_connection(&peer_id, ConnectingType::Dialing, maybe_enr); - let handler = self.new_handler(); - return Poll::Ready(NetworkBehaviourAction::Dial { + return Poll::Ready(ToSwarm::Dial { opts: DialOpts::peer_id(peer_id) .condition(PeerCondition::Disconnected) .build(), - handler, }); } @@ -110,13 +114,31 @@ impl NetworkBehaviour for PeerManager { endpoint, other_established, .. - }) => self.on_connection_established(peer_id, endpoint, other_established), + }) => { + // NOTE: We still need to handle the [`ConnectionEstablished`] because the + // [`NetworkBehaviour::handle_established_inbound_connection`] and + // [`NetworkBehaviour::handle_established_outbound_connection`] are fallible. This + // means another behaviour can kill the connection early, and we can't assume a + // peer as connected until this event is received. + self.on_connection_established(peer_id, endpoint, other_established) + } FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, remaining_established, .. }) => self.on_connection_closed(peer_id, remaining_established), - FromSwarm::DialFailure(DialFailure { peer_id, .. }) => self.on_dial_failure(peer_id), + FromSwarm::DialFailure(DialFailure { + peer_id, + error, + connection_id: _, + }) => { + debug!(self.log, "Failed to dial peer"; "peer_id"=> ?peer_id, "error" => %error); + self.on_dial_failure(peer_id); + } + FromSwarm::ExternalAddrConfirmed(_) => { + // TODO: we likely want to check this against our assumed external tcp + // address + } FromSwarm::AddressChange(_) | FromSwarm::ListenFailure(_) | FromSwarm::NewListener(_) @@ -124,13 +146,35 @@ impl NetworkBehaviour for PeerManager { | FromSwarm::ExpiredListenAddr(_) | FromSwarm::ListenerError(_) | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => { + | FromSwarm::NewExternalAddrCandidate(_) + | FromSwarm::ExternalAddrExpired(_) => { // The rest of the events we ignore since they are handled in their associated // `SwarmEvent` } } } + + fn handle_established_inbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _local_addr: &libp2p::Multiaddr, + _remote_addr: &libp2p::Multiaddr, + ) -> Result, libp2p::swarm::ConnectionDenied> { + // TODO: we might want to check if we accept this peer or not in the future. + Ok(ConnectionHandler) + } + + fn handle_established_outbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _addr: &libp2p::Multiaddr, + _role_override: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + // TODO: we might want to check if we accept this peer or not in the future. + Ok(ConnectionHandler) + } } impl PeerManager { diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 8199bee2a79..d42248ad5f6 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -3,21 +3,21 @@ use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, ResponseTermination}; use super::outbound::OutboundRequestContainer; -use super::protocol::{max_rpc_size, InboundRequest, Protocol, RPCError, RPCProtocol}; +use super::protocol::{ + max_rpc_size, InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol, +}; use super::{RPCReceived, RPCSend, ReqId}; use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; use crate::rpc::protocol::InboundFramed; use fnv::FnvHashMap; use futures::prelude::*; use futures::{Sink, SinkExt}; -use libp2p::core::upgrade::{ - InboundUpgrade, NegotiationError, OutboundUpgrade, ProtocolError, UpgradeError, -}; use libp2p::swarm::handler::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, KeepAlive, StreamUpgradeError, SubstreamProtocol, }; -use libp2p::swarm::NegotiatedSubstream; +use libp2p::swarm::Stream; use slog::{crit, debug, trace, warn}; use smallvec::SmallVec; use std::{ @@ -47,7 +47,7 @@ const MAX_INBOUND_SUBSTREAMS: usize = 32; #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub struct SubstreamId(usize); -type InboundSubstream = InboundFramed; +type InboundSubstream = InboundFramed; /// Events the handler emits to the behaviour. pub type HandlerEvent = Result, HandlerErr>; @@ -195,12 +195,12 @@ pub enum OutboundSubstreamState { /// handler because GOODBYE requests can be handled and responses dropped instantly. RequestPendingResponse { /// The framed negotiated substream. - substream: Box>, + substream: Box>, /// Keeps track of the actual request sent. request: OutboundRequest, }, /// Closing an outbound substream> - Closing(Box>), + Closing(Box>), /// Temporary state during processing Poisoned, } @@ -212,7 +212,7 @@ where pub fn new( listen_protocol: SubstreamProtocol, ()>, fork_context: Arc, - log: &slog::Logger, + log: slog::Logger, ) -> Self { RPCHandler { listen_protocol, @@ -230,7 +230,7 @@ where outbound_io_error_retries: 0, fork_context, waker: None, - log: log.clone(), + log, } } @@ -315,8 +315,8 @@ where TSpec: EthSpec, Id: ReqId, { - type InEvent = RPCSend; - type OutEvent = HandlerEvent; + type FromBehaviour = RPCSend; + type ToBehaviour = HandlerEvent; type Error = RPCError; type InboundProtocol = RPCProtocol; type OutboundProtocol = OutboundRequestContainer; @@ -327,121 +327,7 @@ where self.listen_protocol.clone() } - fn inject_fully_negotiated_outbound( - &mut self, - out: >::Output, - request_info: Self::OutboundOpenInfo, - ) { - self.dial_negotiated -= 1; - let (id, request) = request_info; - let proto = request.versioned_protocol().protocol(); - - // accept outbound connections only if the handler is not deactivated - if matches!(self.state, HandlerState::Deactivated) { - self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::Disconnected, - proto, - id, - })); - } - - // add the stream to substreams if we expect a response, otherwise drop the stream. - let expected_responses = request.expected_responses(); - if expected_responses > 0 { - // new outbound request. Store the stream and tag the output. - let delay_key = self.outbound_substreams_delay.insert( - self.current_outbound_substream_id, - Duration::from_secs(RESPONSE_TIMEOUT), - ); - let awaiting_stream = OutboundSubstreamState::RequestPendingResponse { - substream: Box::new(out), - request, - }; - let expected_responses = if expected_responses > 1 { - // Currently enforced only for multiple responses - Some(expected_responses) - } else { - None - }; - if self - .outbound_substreams - .insert( - self.current_outbound_substream_id, - OutboundInfo { - state: awaiting_stream, - delay_key, - proto, - remaining_chunks: expected_responses, - req_id: id, - }, - ) - .is_some() - { - crit!(self.log, "Duplicate outbound substream id"; "id" => self.current_outbound_substream_id); - } - self.current_outbound_substream_id.0 += 1; - } - } - - fn inject_fully_negotiated_inbound( - &mut self, - substream: >::Output, - _info: Self::InboundOpenInfo, - ) { - // only accept new peer requests when active - if !matches!(self.state, HandlerState::Active) { - return; - } - - let (req, substream) = substream; - let expected_responses = req.expected_responses(); - - // store requests that expect responses - if expected_responses > 0 { - if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS { - // Store the stream and tag the output. - let delay_key = self.inbound_substreams_delay.insert( - self.current_inbound_substream_id, - Duration::from_secs(RESPONSE_TIMEOUT), - ); - let awaiting_stream = InboundState::Idle(substream); - self.inbound_substreams.insert( - self.current_inbound_substream_id, - InboundInfo { - state: awaiting_stream, - pending_items: VecDeque::with_capacity(std::cmp::min( - expected_responses, - 128, - ) as usize), - delay_key: Some(delay_key), - protocol: req.versioned_protocol().protocol(), - request_start_time: Instant::now(), - remaining_chunks: expected_responses, - }, - ); - } else { - self.events_out.push(Err(HandlerErr::Inbound { - id: self.current_inbound_substream_id, - proto: req.versioned_protocol().protocol(), - error: RPCError::HandlerRejected, - })); - return self.shutdown(None); - } - } - - // If we received a goodbye, shutdown the connection. - if let InboundRequest::Goodbye(_) = req { - self.shutdown(None); - } - - self.events_out.push(Ok(RPCReceived::Request( - self.current_inbound_substream_id, - req, - ))); - self.current_inbound_substream_id.0 += 1; - } - - fn inject_event(&mut self, rpc_event: Self::InEvent) { + fn on_behaviour_event(&mut self, rpc_event: Self::FromBehaviour) { match rpc_event { RPCSend::Request(id, req) => self.send_request(id, req), RPCSend::Response(inbound_id, response) => self.send_response(inbound_id, response), @@ -453,56 +339,6 @@ where } } - fn inject_dial_upgrade_error( - &mut self, - request_info: Self::OutboundOpenInfo, - error: ConnectionHandlerUpgrErr< - >::Error, - >, - ) { - let (id, req) = request_info; - if let ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(RPCError::IoError(_))) = error - { - self.outbound_io_error_retries += 1; - if self.outbound_io_error_retries < IO_ERROR_RETRIES { - self.send_request(id, req); - return; - } - } - - // This dialing is now considered failed - self.dial_negotiated -= 1; - - self.outbound_io_error_retries = 0; - // map the error - let error = match error { - ConnectionHandlerUpgrErr::Timer => RPCError::InternalError("Timer failed"), - ConnectionHandlerUpgrErr::Timeout => RPCError::NegotiationTimeout, - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)) => e, - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => { - RPCError::UnsupportedProtocol - } - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select( - NegotiationError::ProtocolError(e), - )) => match e { - ProtocolError::IoError(io_err) => RPCError::IoError(io_err.to_string()), - ProtocolError::InvalidProtocol => { - RPCError::InternalError("Protocol was deemed invalid") - } - ProtocolError::InvalidMessage | ProtocolError::TooManyProtocols => { - // Peer is sending invalid data during the negotiation phase, not - // participating in the protocol - RPCError::InvalidData("Invalid message during negotiation".to_string()) - } - }, - }; - self.events_out.push(Err(HandlerErr::Outbound { - error, - proto: req.versioned_protocol().protocol(), - id, - })); - } - fn connection_keep_alive(&self) -> KeepAlive { // Check that we don't have outbound items pending for dialing, nor dialing, nor // established. Also check that there are no established inbound substreams. @@ -535,7 +371,7 @@ where ConnectionHandlerEvent< Self::OutboundProtocol, Self::OutboundOpenInfo, - Self::OutEvent, + Self::ToBehaviour, Self::Error, >, > { @@ -548,7 +384,9 @@ where } // return any events that need to be reported if !self.events_out.is_empty() { - return Poll::Ready(ConnectionHandlerEvent::Custom(self.events_out.remove(0))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + self.events_out.remove(0), + )); } else { self.events_out.shrink_to_fit(); } @@ -612,7 +450,9 @@ where error: RPCError::StreamTimeout, }; // notify the user - return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err( + outbound_err, + ))); } else { crit!(self.log, "timed out substream not in the books"; "stream_id" => outbound_id.get_ref()); } @@ -872,7 +712,7 @@ where }), }; - return Poll::Ready(ConnectionHandlerEvent::Custom(received)); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(received)); } Poll::Ready(None) => { // stream closed @@ -887,7 +727,7 @@ where // notify the application error if request.expected_responses() > 1 { // return an end of stream result - return Poll::Ready(ConnectionHandlerEvent::Custom(Ok( + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok( RPCReceived::EndOfStream(request_id, request.stream_termination()), ))); } @@ -898,7 +738,9 @@ where proto: request.versioned_protocol().protocol(), error: RPCError::IncompleteStream, }; - return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err( + outbound_err, + ))); } Poll::Pending => { entry.get_mut().state = @@ -914,7 +756,9 @@ where error: e, }; entry.remove_entry(); - return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err( + outbound_err, + ))); } }, OutboundSubstreamState::Closing(mut substream) => { @@ -940,7 +784,7 @@ where }; if let Some(termination) = termination { - return Poll::Ready(ConnectionHandlerEvent::Custom(Ok( + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok( RPCReceived::EndOfStream(request_id, termination), ))); } @@ -989,6 +833,207 @@ where Poll::Pending } + + fn on_connection_event( + &mut self, + event: ConnectionEvent< + Self::InboundProtocol, + Self::OutboundProtocol, + Self::InboundOpenInfo, + Self::OutboundOpenInfo, + >, + ) { + match event { + ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { + protocol, + info: _, + }) => self.on_fully_negotiated_inbound(protocol), + ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { + protocol, + info, + }) => self.on_fully_negotiated_outbound(protocol, info), + ConnectionEvent::DialUpgradeError(DialUpgradeError { info, error }) => { + self.on_dial_upgrade_error(info, error) + } + ConnectionEvent::ListenUpgradeError(libp2p::swarm::handler::ListenUpgradeError { + info: _, + error: _, /* RPCError */ + }) => { + // This is going to be removed in the next libp2p release. I think its fine to do + // nothing. + } + ConnectionEvent::LocalProtocolsChange(_) => { + // This shouldn't effect this handler, we will still negotiate streams if we support + // the protocol as usual. + } + ConnectionEvent::RemoteProtocolsChange(_) => { + // This shouldn't effect this handler, we will still negotiate streams if we support + // the protocol as usual. + } + ConnectionEvent::AddressChange(_) => { + // We dont care about these changes as they have no bearing on our RPC internal + // logic. + } + } + } +} + +impl RPCHandler +where + Id: ReqId, + TSpec: EthSpec, +{ + fn on_fully_negotiated_inbound(&mut self, substream: InboundOutput) { + // only accept new peer requests when active + if !matches!(self.state, HandlerState::Active) { + return; + } + + let (req, substream) = substream; + let expected_responses = req.expected_responses(); + + // store requests that expect responses + if expected_responses > 0 { + if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS { + // Store the stream and tag the output. + let delay_key = self.inbound_substreams_delay.insert( + self.current_inbound_substream_id, + Duration::from_secs(RESPONSE_TIMEOUT), + ); + let awaiting_stream = InboundState::Idle(substream); + self.inbound_substreams.insert( + self.current_inbound_substream_id, + InboundInfo { + state: awaiting_stream, + pending_items: VecDeque::with_capacity(std::cmp::min( + expected_responses, + 128, + ) as usize), + delay_key: Some(delay_key), + protocol: req.versioned_protocol().protocol(), + request_start_time: Instant::now(), + remaining_chunks: expected_responses, + }, + ); + } else { + self.events_out.push(Err(HandlerErr::Inbound { + id: self.current_inbound_substream_id, + proto: req.versioned_protocol().protocol(), + error: RPCError::HandlerRejected, + })); + return self.shutdown(None); + } + } + + // If we received a goodbye, shutdown the connection. + if let InboundRequest::Goodbye(_) = req { + self.shutdown(None); + } + + self.events_out.push(Ok(RPCReceived::Request( + self.current_inbound_substream_id, + req, + ))); + self.current_inbound_substream_id.0 += 1; + } + + fn on_fully_negotiated_outbound( + &mut self, + substream: OutboundFramed, + (id, request): (Id, OutboundRequest), + ) { + self.dial_negotiated -= 1; + // Reset any io-retries counter. + self.outbound_io_error_retries = 0; + + let proto = request.versioned_protocol().protocol(); + + // accept outbound connections only if the handler is not deactivated + if matches!(self.state, HandlerState::Deactivated) { + self.events_out.push(Err(HandlerErr::Outbound { + error: RPCError::Disconnected, + proto, + id, + })); + } + + // add the stream to substreams if we expect a response, otherwise drop the stream. + let expected_responses = request.expected_responses(); + if expected_responses > 0 { + // new outbound request. Store the stream and tag the output. + let delay_key = self.outbound_substreams_delay.insert( + self.current_outbound_substream_id, + Duration::from_secs(RESPONSE_TIMEOUT), + ); + let awaiting_stream = OutboundSubstreamState::RequestPendingResponse { + substream: Box::new(substream), + request, + }; + let expected_responses = if expected_responses > 1 { + // Currently enforced only for multiple responses + Some(expected_responses) + } else { + None + }; + if self + .outbound_substreams + .insert( + self.current_outbound_substream_id, + OutboundInfo { + state: awaiting_stream, + delay_key, + proto, + remaining_chunks: expected_responses, + req_id: id, + }, + ) + .is_some() + { + crit!(self.log, "Duplicate outbound substream id"; "id" => self.current_outbound_substream_id); + } + self.current_outbound_substream_id.0 += 1; + } + } + fn on_dial_upgrade_error( + &mut self, + request_info: (Id, OutboundRequest), + error: StreamUpgradeError, + ) { + let (id, req) = request_info; + + // map the error + let error = match error { + StreamUpgradeError::Timeout => RPCError::NegotiationTimeout, + StreamUpgradeError::Apply(RPCError::IoError(e)) => { + self.outbound_io_error_retries += 1; + if self.outbound_io_error_retries < IO_ERROR_RETRIES { + self.send_request(id, req); + return; + } + RPCError::IoError(e) + } + StreamUpgradeError::NegotiationFailed => RPCError::UnsupportedProtocol, + StreamUpgradeError::Io(io_err) => { + self.outbound_io_error_retries += 1; + if self.outbound_io_error_retries < IO_ERROR_RETRIES { + self.send_request(id, req); + return; + } + RPCError::IoError(io_err.to_string()) + } + StreamUpgradeError::Apply(other) => other, + }; + + // This dialing is now considered failed + self.dial_negotiated -= 1; + + self.outbound_io_error_retries = 0; + self.events_out.push(Err(HandlerErr::Outbound { + error, + proto: req.versioned_protocol().protocol(), + id, + })); + } } impl slog::Value for SubstreamId { diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index ffdc193bbbd..4fd9b516d4c 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -6,11 +6,11 @@ use futures::future::FutureExt; use handler::{HandlerEvent, RPCHandler}; -use libp2p::core::connection::ConnectionId; use libp2p::swarm::{ - handler::ConnectionHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, - PollParameters, SubstreamProtocol, + handler::ConnectionHandler, ConnectionId, NetworkBehaviour, NotifyHandler, PollParameters, + ToSwarm, }; +use libp2p::swarm::{FromSwarm, SubstreamProtocol, THandlerInEvent}; use libp2p::PeerId; use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}; use slog::{crit, debug, o}; @@ -21,7 +21,7 @@ use types::{EthSpec, ForkContext}; pub(crate) use handler::HandlerErr; pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse}; -pub(crate) use protocol::{InboundRequest, RPCProtocol}; +pub(crate) use protocol::InboundRequest; pub use handler::SubstreamId; pub use methods::{ @@ -32,6 +32,7 @@ pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; use self::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig}; +use self::protocol::RPCProtocol; use self::self_limiter::SelfRateLimiter; pub(crate) mod codec; @@ -104,8 +105,7 @@ pub struct RPCMessage { pub event: HandlerEvent, } -type BehaviourAction = - NetworkBehaviourAction, RPCHandler>; +type BehaviourAction = ToSwarm, RPCSend>; /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. @@ -161,7 +161,7 @@ impl RPC { id: (ConnectionId, SubstreamId), event: RPCCodedResponse, ) { - self.events.push(NetworkBehaviourAction::NotifyHandler { + self.events.push(ToSwarm::NotifyHandler { peer_id, handler: NotifyHandler::One(id.0), event: RPCSend::Response(id.1, event), @@ -181,7 +181,7 @@ impl RPC { } } } else { - NetworkBehaviourAction::NotifyHandler { + ToSwarm::NotifyHandler { peer_id, handler: NotifyHandler::Any, event: RPCSend::Request(request_id, req), @@ -194,7 +194,7 @@ impl RPC { /// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This /// gracefully terminates the RPC behaviour with a goodbye message. pub fn shutdown(&mut self, peer_id: PeerId, id: Id, reason: GoodbyeReason) { - self.events.push(NetworkBehaviourAction::NotifyHandler { + self.events.push(ToSwarm::NotifyHandler { peer_id, handler: NotifyHandler::Any, event: RPCSend::Shutdown(id, reason), @@ -208,29 +208,83 @@ where Id: ReqId, { type ConnectionHandler = RPCHandler; - type OutEvent = RPCMessage; - - fn new_handler(&mut self) -> Self::ConnectionHandler { - RPCHandler::new( - SubstreamProtocol::new( - RPCProtocol { - fork_context: self.fork_context.clone(), - max_rpc_size: max_rpc_size(&self.fork_context), - enable_light_client_server: self.enable_light_client_server, - phantom: PhantomData, - }, - (), - ), - self.fork_context.clone(), - &self.log, - ) + type ToSwarm = RPCMessage; + + fn handle_established_inbound_connection( + &mut self, + _connection_id: ConnectionId, + peer_id: PeerId, + _local_addr: &libp2p::Multiaddr, + _remote_addr: &libp2p::Multiaddr, + ) -> Result, libp2p::swarm::ConnectionDenied> { + let protocol = SubstreamProtocol::new( + RPCProtocol { + fork_context: self.fork_context.clone(), + max_rpc_size: max_rpc_size(&self.fork_context), + enable_light_client_server: self.enable_light_client_server, + phantom: PhantomData, + }, + (), + ); + // NOTE: this is needed because PeerIds have interior mutability. + let peer_repr = peer_id.to_string(); + let log = self.log.new(slog::o!("peer_id" => peer_repr)); + let handler = RPCHandler::new(protocol, self.fork_context.clone(), log); + + Ok(handler) + } + + fn handle_established_outbound_connection( + &mut self, + _connection_id: ConnectionId, + peer_id: PeerId, + _addr: &libp2p::Multiaddr, + _role_override: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + let protocol = SubstreamProtocol::new( + RPCProtocol { + fork_context: self.fork_context.clone(), + max_rpc_size: max_rpc_size(&self.fork_context), + enable_light_client_server: self.enable_light_client_server, + phantom: PhantomData, + }, + (), + ); + + // NOTE: this is needed because PeerIds have interior mutability. + let peer_repr = peer_id.to_string(); + let log = self.log.new(slog::o!("peer_id" => peer_repr)); + let handler = RPCHandler::new(protocol, self.fork_context.clone(), log); + + Ok(handler) + } + + fn on_swarm_event(&mut self, event: FromSwarm) { + match event { + FromSwarm::ConnectionClosed(_) + | FromSwarm::ConnectionEstablished(_) + | FromSwarm::AddressChange(_) + | FromSwarm::DialFailure(_) + | FromSwarm::ListenFailure(_) + | FromSwarm::NewListener(_) + | FromSwarm::NewListenAddr(_) + | FromSwarm::ExpiredListenAddr(_) + | FromSwarm::ListenerError(_) + | FromSwarm::ListenerClosed(_) + | FromSwarm::NewExternalAddrCandidate(_) + | FromSwarm::ExternalAddrExpired(_) + | FromSwarm::ExternalAddrConfirmed(_) => { + // Rpc Behaviour does not act on these swarm events. We use a comprehensive match + // statement to ensure future events are dealt with appropriately. + } + } } - fn inject_event( + fn on_connection_handler_event( &mut self, peer_id: PeerId, conn_id: ConnectionId, - event: ::OutEvent, + event: ::ToBehaviour, ) { if let Ok(RPCReceived::Request(ref id, ref req)) = event { if let Some(limiter) = self.limiter.as_mut() { @@ -238,12 +292,11 @@ where match limiter.allows(&peer_id, req) { Ok(()) => { // send the event to the user - self.events - .push(NetworkBehaviourAction::GenerateEvent(RPCMessage { - peer_id, - conn_id, - event, - })) + self.events.push(ToSwarm::GenerateEvent(RPCMessage { + peer_id, + conn_id, + event, + })) } Err(RateLimitedErr::TooLarge) => { // we set the batch sizes, so this is a coding/config err for most protocols @@ -281,20 +334,18 @@ where } } else { // No rate limiting, send the event to the user - self.events - .push(NetworkBehaviourAction::GenerateEvent(RPCMessage { - peer_id, - conn_id, - event, - })) - } - } else { - self.events - .push(NetworkBehaviourAction::GenerateEvent(RPCMessage { + self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, conn_id, event, - })); + })) + } + } else { + self.events.push(ToSwarm::GenerateEvent(RPCMessage { + peer_id, + conn_id, + event, + })); } } @@ -302,7 +353,7 @@ where &mut self, cx: &mut Context, _: &mut impl PollParameters, - ) -> Poll> { + ) -> Poll>> { // let the rate limiter prune. if let Some(limiter) = self.limiter.as_mut() { let _ = limiter.poll_unpin(cx); diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index ea39c1423a3..22f9f19d680 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -7,7 +7,7 @@ use crate::rpc::{ use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; use futures::{FutureExt, StreamExt}; -use libp2p::core::{InboundUpgrade, ProtocolName, UpgradeInfo}; +use libp2p::core::{InboundUpgrade, UpgradeInfo}; use ssz::Encode; use ssz_types::VariableList; use std::io; @@ -313,6 +313,12 @@ pub struct ProtocolId { protocol_id: String, } +impl AsRef for ProtocolId { + fn as_ref(&self) -> &str { + self.protocol_id.as_ref() + } +} + impl ProtocolId { /// Returns min and max size for messages of given protocol id requests. pub fn rpc_request_limits(&self) -> RpcLimits { @@ -407,12 +413,6 @@ impl ProtocolId { } } -impl ProtocolName for ProtocolId { - fn protocol_name(&self) -> &[u8] { - self.protocol_id.as_bytes() - } -} - /* Inbound upgrade */ // The inbound protocol reads the request, decodes it and returns the stream to the protocol diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index 626917d6a7f..4348c1ec6d5 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -64,7 +64,7 @@ impl SelfRateLimiter { } /// Checks if the rate limiter allows the request. If it's allowed, returns the - /// [`NetworkBehaviourAction`] that should be emitted. When not allowed, the request is delayed + /// [`ToSwarm`] that should be emitted. When not allowed, the request is delayed /// until it can be sent. pub fn allows( &mut self, @@ -95,7 +95,7 @@ impl SelfRateLimiter { } /// Auxiliary function to deal with self rate limiting outcomes. If the rate limiter allows the - /// request, the [`NetworkBehaviourAction`] that should be emitted is returned. If the request + /// request, the [`ToSwarm`] that should be emitted is returned. If the request /// should be delayed, it's returned with the duration to wait. fn try_send_request( limiter: &mut RateLimiter, diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 5ab89fee51b..187c0ab1b1d 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use libp2p::core::connection::ConnectionId; +use libp2p::swarm::ConnectionId; use types::light_client_bootstrap::LightClientBootstrap; use types::{EthSpec, SignedBeaconBlock}; diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs index 7d20b87ad1c..6c52a07c14a 100644 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -3,21 +3,27 @@ use crate::peer_manager::PeerManager; use crate::rpc::{ReqId, RPC}; use crate::types::SnappyTransform; -use libp2p::gossipsub::subscription_filter::{ - MaxCountSubscriptionFilter, WhitelistSubscriptionFilter, -}; -use libp2p::gossipsub::Gossipsub as BaseGossipsub; -use libp2p::identify::Behaviour as Identify; +use libp2p::gossipsub; +use libp2p::identify; use libp2p::swarm::NetworkBehaviour; use types::EthSpec; use super::api_types::RequestId; -pub type SubscriptionFilter = MaxCountSubscriptionFilter; -pub type Gossipsub = BaseGossipsub; +pub type SubscriptionFilter = + gossipsub::MaxCountSubscriptionFilter; +pub type Gossipsub = gossipsub::Behaviour; #[derive(NetworkBehaviour)] -pub(crate) struct Behaviour { +pub(crate) struct Behaviour +where + AppReqId: ReqId, + TSpec: EthSpec, +{ + /// Peers banned. + pub banned_peers: libp2p::allow_block_list::Behaviour, + /// Keep track of active and pending connections to enforce hard limits. + pub connection_limits: libp2p::connection_limits::Behaviour, /// The routing pub-sub mechanism for eth2. pub gossipsub: Gossipsub, /// The Eth2 RPC specified in the wire-0 protocol. @@ -27,7 +33,7 @@ pub(crate) struct Behaviour { /// Keep regular connection to peers and disconnect if absent. // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. /// Provides IP addresses and peer information. - pub identify: Identify, + pub identify: identify::Behaviour, /// The peer manager that keeps track of peer's reputation and status. pub peer_manager: PeerManager, } diff --git a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs index 88becd686e5..b058fc0ff13 100644 --- a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs +++ b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs @@ -1,7 +1,8 @@ use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::{error, TopicHash}; use libp2p::gossipsub::{ - GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, TopicScoreParams, + Config as GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, + TopicScoreParams, }; use std::cmp::max; use std::collections::HashMap; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 129a4da25bb..1a25beee0a7 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -24,15 +24,12 @@ use api_types::{PeerRequestId, Request, RequestId, Response}; use futures::stream::StreamExt; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; use libp2p::bandwidth::BandwidthSinks; -use libp2p::gossipsub::error::PublishError; -use libp2p::gossipsub::metrics::Config as GossipsubMetricsConfig; -use libp2p::gossipsub::subscription_filter::MaxCountSubscriptionFilter; use libp2p::gossipsub::{ - GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, + self, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, }; -use libp2p::identify::{Behaviour as Identify, Config as IdentifyConfig, Event as IdentifyEvent}; +use libp2p::identify; use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; -use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p::swarm::{Swarm, SwarmBuilder, SwarmEvent}; use libp2p::PeerId; use slog::{crit, debug, info, o, trace, warn}; use std::path::PathBuf; @@ -66,10 +63,6 @@ pub enum NetworkEvent { PeerConnectedIncoming(PeerId), /// A peer has disconnected. PeerDisconnected(PeerId), - /// The peer needs to be banned. - PeerBanned(PeerId), - /// The peer has been unbanned. - PeerUnbanned(PeerId), /// An RPC Request that was sent failed. RPCFailed { /// The id of the failed request. @@ -229,7 +222,7 @@ impl Network { let update_gossipsub_scores = tokio::time::interval(params.decay_interval); let possible_fork_digests = ctx.fork_context.all_fork_digests(); - let filter = MaxCountSubscriptionFilter { + let filter = gossipsub::MaxCountSubscriptionFilter { filter: utils::create_whitelist_filter( possible_fork_digests, ctx.chain_spec.attestation_subnet_count, @@ -244,7 +237,7 @@ impl Network { // If metrics are enabled for gossipsub build the configuration let gossipsub_metrics = ctx .gossipsub_registry - .map(|registry| (registry, GossipsubMetricsConfig::default())); + .map(|registry| (registry, Default::default())); let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( @@ -273,26 +266,32 @@ impl Network { let discovery = { // Build and start the discovery sub-behaviour - let mut discovery = - Discovery::new(&local_keypair, &config, network_globals.clone(), &log).await?; + let mut discovery = Discovery::new( + local_keypair.clone(), + &config, + network_globals.clone(), + &log, + ) + .await?; // start searching for peers discovery.discover_peers(FIND_NODE_QUERY_CLOSEST_PEERS); discovery }; let identify = { + let local_public_key = local_keypair.public(); let identify_config = if config.private { - IdentifyConfig::new( + identify::Config::new( "".into(), - local_keypair.public(), // Still send legitimate public key + local_public_key, // Still send legitimate public key ) .with_cache_size(0) } else { - IdentifyConfig::new("eth2/1.0.0".into(), local_keypair.public()) + identify::Config::new("eth2/1.0.0".into(), local_public_key) .with_agent_version(lighthouse_version::version_with_platform()) .with_cache_size(0) }; - Identify::new(identify_config) + identify::Behaviour::new(identify_config) }; let peer_manager = { @@ -305,13 +304,38 @@ impl Network { PeerManager::new(peer_manager_cfg, network_globals.clone(), &log)? }; + let connection_limits = { + let limits = libp2p::connection_limits::ConnectionLimits::default() + .with_max_pending_incoming(Some(5)) + .with_max_pending_outgoing(Some(16)) + .with_max_established_incoming(Some( + (config.target_peers as f32 + * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) + .ceil() as u32, + )) + .with_max_established_outgoing(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32, + )) + .with_max_established(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS)) + .ceil() as u32, + )) + .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); + + libp2p::connection_limits::Behaviour::new(limits) + }; + + let banned_peers = libp2p::allow_block_list::Behaviour::default(); + let behaviour = { Behaviour { + banned_peers, gossipsub, eth2_rpc, discovery, identify, peer_manager, + connection_limits, } }; @@ -329,22 +353,6 @@ impl Network { } // sets up the libp2p connection limits - let limits = ConnectionLimits::default() - .with_max_pending_incoming(Some(5)) - .with_max_pending_outgoing(Some(16)) - .with_max_established_incoming(Some( - (config.target_peers as f32 - * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) - .ceil() as u32, - )) - .with_max_established_outgoing(Some( - (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32, - )) - .with_max_established(Some( - (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS)) - .ceil() as u32, - )) - .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); ( SwarmBuilder::with_executor( @@ -354,8 +362,7 @@ impl Network { Executor(executor), ) .notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero")) - .connection_event_buffer_size(64) - .connection_limits(limits) + .per_connection_event_buffer_size(4) .build(), bandwidth, ) @@ -396,7 +403,7 @@ impl Network { match self.swarm.listen_on(listen_multiaddr.clone()) { Ok(_) => { let mut log_address = listen_multiaddr; - log_address.push(MProtocol::P2p(enr.peer_id().into())); + log_address.push(MProtocol::P2p(enr.peer_id())); info!(self.log, "Listening established"; "address" => %log_address); } Err(err) => { @@ -493,7 +500,7 @@ impl Network { &mut self.swarm.behaviour_mut().discovery } /// Provides IP addresses and peer information. - pub fn identify_mut(&mut self) -> &mut Identify { + pub fn identify_mut(&mut self) -> &mut identify::Behaviour { &mut self.swarm.behaviour_mut().identify } /// The peer manager that keeps track of peer's reputation and status. @@ -514,7 +521,7 @@ impl Network { &self.swarm.behaviour().discovery } /// Provides IP addresses and peer information. - pub fn identify(&self) -> &Identify { + pub fn identify(&self) -> &identify::Behaviour { &self.swarm.behaviour().identify } /// The peer manager that keeps track of peer's reputation and status. @@ -1045,9 +1052,12 @@ impl Network { /* Sub-behaviour event handling functions */ /// Handle a gossipsub event. - fn inject_gs_event(&mut self, event: GossipsubEvent) -> Option> { + fn inject_gs_event( + &mut self, + event: gossipsub::Event, + ) -> Option> { match event { - GossipsubEvent::Message { + gossipsub::Event::Message { propagation_source, message_id: id, message: gs_msg, @@ -1077,7 +1087,7 @@ impl Network { } } } - GossipsubEvent::Subscribed { peer_id, topic } => { + gossipsub::Event::Subscribed { peer_id, topic } => { if let Ok(topic) = GossipTopic::decode(topic.as_str()) { if let Some(subnet_id) = topic.subnet_id() { self.network_globals @@ -1118,7 +1128,7 @@ impl Network { } } } - GossipsubEvent::Unsubscribed { peer_id, topic } => { + gossipsub::Event::Unsubscribed { peer_id, topic } => { if let Some(subnet_id) = subnet_from_topic_hash(&topic) { self.network_globals .peers @@ -1126,7 +1136,7 @@ impl Network { .remove_subscription(&peer_id, &subnet_id); } } - GossipsubEvent::GossipsubNotSupported { peer_id } => { + gossipsub::Event::GossipsubNotSupported { peer_id } => { debug!(self.log, "Peer does not support gossipsub"; "peer_id" => %peer_id); self.peer_manager_mut().report_peer( &peer_id, @@ -1340,10 +1350,10 @@ impl Network { /// Handle an identify event. fn inject_identify_event( &mut self, - event: IdentifyEvent, + event: identify::Event, ) -> Option> { match event { - IdentifyEvent::Received { peer_id, mut info } => { + identify::Event::Received { peer_id, mut info } => { if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { debug!( self.log, @@ -1354,9 +1364,9 @@ impl Network { // send peer info to the peer manager. self.peer_manager_mut().identify(&peer_id, &info); } - IdentifyEvent::Sent { .. } => {} - IdentifyEvent::Error { .. } => {} - IdentifyEvent::Pushed { .. } => {} + identify::Event::Sent { .. } => {} + identify::Event::Error { .. } => {} + identify::Event::Pushed { .. } => {} } None } @@ -1377,14 +1387,17 @@ impl Network { Some(NetworkEvent::PeerDisconnected(peer_id)) } PeerManagerEvent::Banned(peer_id, associated_ips) => { - self.swarm.ban_peer_id(peer_id); + self.swarm.behaviour_mut().banned_peers.block_peer(peer_id); self.discovery_mut().ban_peer(&peer_id, associated_ips); - Some(NetworkEvent::PeerBanned(peer_id)) + None } PeerManagerEvent::UnBanned(peer_id, associated_ips) => { - self.swarm.unban_peer_id(peer_id); + self.swarm + .behaviour_mut() + .banned_peers + .unblock_peer(peer_id); self.discovery_mut().unban_peer(&peer_id, associated_ips); - Some(NetworkEvent::PeerUnbanned(peer_id)) + None } PeerManagerEvent::Status(peer_id) => { // it's time to status. We don't keep a beacon chain reference here, so we inform @@ -1431,17 +1444,20 @@ impl Network { let maybe_event = match swarm_event { SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { // Handle sub-behaviour events. + BehaviourEvent::BannedPeers(void) => void::unreachable(void), BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), BehaviourEvent::Discovery(de) => self.inject_discovery_event(de), BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), + BehaviourEvent::ConnectionLimits(le) => void::unreachable(le), }, SwarmEvent::ConnectionEstablished { .. } => None, SwarmEvent::ConnectionClosed { .. } => None, SwarmEvent::IncomingConnection { local_addr, send_back_addr, + connection_id: _, } => { trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); None @@ -1450,19 +1466,41 @@ impl Network { local_addr, send_back_addr, error, + connection_id: _, } => { - debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); - None - } - SwarmEvent::OutgoingConnectionError { peer_id, error } => { - debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); + let error_repr = match error { + libp2p::swarm::ListenError::Aborted => { + "Incoming connection aborted".to_string() + } + libp2p::swarm::ListenError::WrongPeerId { obtained, endpoint } => { + format!("Wrong peer id, obtained {obtained}, endpoint {endpoint:?}") + } + libp2p::swarm::ListenError::LocalPeerId { endpoint } => { + format!("Dialing local peer id {endpoint:?}") + } + libp2p::swarm::ListenError::Denied { cause } => { + format!("Connection was denied with cause {cause}") + } + libp2p::swarm::ListenError::Transport(t) => match t { + libp2p::TransportError::MultiaddrNotSupported(m) => { + format!("Transport error: Multiaddr not supported: {m}") + } + libp2p::TransportError::Other(e) => { + format!("Transport error: other: {e}") + } + }, + }; + debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => error_repr); None } - SwarmEvent::BannedPeer { - peer_id, - endpoint: _, + SwarmEvent::OutgoingConnectionError { + peer_id: _, + error: _, + connection_id: _, } => { - debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); + // The Behaviour event is more general than the swarm event here. It includes + // connection failures. So we use that log for now, in the peer manager + // behaviour implementation. None } SwarmEvent::NewListenAddr { address, .. } => { @@ -1491,7 +1529,13 @@ impl Network { None } } - SwarmEvent::Dialing(_) => None, + SwarmEvent::Dialing { + peer_id, + connection_id: _, + } => { + debug!(self.log, "Swarm Dialing"; "peer_id" => ?peer_id); + None + } }; if let Some(ev) = maybe_event { diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index ac0dc57d7ba..21fd09b6b0f 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -4,13 +4,11 @@ use crate::types::{ error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind, }; use crate::{GossipTopic, NetworkConfig}; -use libp2p::bandwidth::{BandwidthLogging, BandwidthSinks}; -use libp2p::core::{ - identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed, -}; -use libp2p::gossipsub::subscription_filter::WhitelistSubscriptionFilter; -use libp2p::gossipsub::IdentTopic as Topic; -use libp2p::{core, noise, PeerId, Transport}; +use libp2p::bandwidth::BandwidthSinks; +use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed}; +use libp2p::gossipsub; +use libp2p::identity::{secp256k1, Keypair}; +use libp2p::{core, noise, yamux, PeerId, Transport, TransportExt}; use prometheus_client::registry::Registry; use slog::{debug, warn}; use ssz::Decode; @@ -52,30 +50,19 @@ pub fn build_transport( transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone)) }; - let (transport, bandwidth) = BandwidthLogging::new(transport); - - // mplex config - let mut mplex_config = libp2p::mplex::MplexConfig::new(); - mplex_config.set_max_buffer_size(256); - mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block); - // yamux config - let mut yamux_config = libp2p::yamux::YamuxConfig::default(); - yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); + let mut yamux_config = yamux::Config::default(); + yamux_config.set_window_update_mode(yamux::WindowUpdateMode::on_read()); + let (transport, bandwidth) = transport + .upgrade(core::upgrade::Version::V1) + .authenticate(generate_noise_config(&local_private_key)) + .multiplex(yamux_config) + .timeout(Duration::from_secs(10)) + .boxed() + .with_bandwidth_logging(); // Authentication - Ok(( - transport - .upgrade(core::upgrade::Version::V1) - .authenticate(generate_noise_config(&local_private_key)) - .multiplex(core::upgrade::SelectUpgrade::new( - yamux_config, - mplex_config, - )) - .timeout(Duration::from_secs(10)) - .boxed(), - bandwidth, - )) + Ok((transport, bandwidth)) } // Useful helper functions for debugging. Currently not used in the client. @@ -94,10 +81,10 @@ fn keypair_from_hex(hex_bytes: &str) -> error::Result { #[allow(dead_code)] fn keypair_from_bytes(mut bytes: Vec) -> error::Result { - libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes) + secp256k1::SecretKey::try_from_bytes(&mut bytes) .map(|secret| { - let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into(); - Keypair::Secp256k1(keypair) + let keypair: secp256k1::Keypair = secret.into(); + keypair.into() }) .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into()) } @@ -115,12 +102,10 @@ pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { Err(_) => debug!(log, "Could not read network key file"), Ok(_) => { // only accept secp256k1 keys for now - if let Ok(secret_key) = - libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes) - { - let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into(); + if let Ok(secret_key) = secp256k1::SecretKey::try_from_bytes(&mut key_bytes) { + let kp: secp256k1::Keypair = secret_key.into(); debug!(log, "Loaded network key from disk."); - return Keypair::Secp256k1(kp); + return kp.into(); } else { debug!(log, "Network key file is not a valid secp256k1 key"); } @@ -129,34 +114,27 @@ pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { } // if a key could not be loaded from disk, generate a new one and save it - let local_private_key = Keypair::generate_secp256k1(); - if let Keypair::Secp256k1(key) = local_private_key.clone() { - let _ = std::fs::create_dir_all(&config.network_dir); - match File::create(network_key_f.clone()) - .and_then(|mut f| f.write_all(&key.secret().to_bytes())) - { - Ok(_) => { - debug!(log, "New network key generated and written to disk"); - } - Err(e) => { - warn!( - log, - "Could not write node key to file: {:?}. error: {}", network_key_f, e - ); - } + let local_private_key = secp256k1::Keypair::generate(); + let _ = std::fs::create_dir_all(&config.network_dir); + match File::create(network_key_f.clone()) + .and_then(|mut f| f.write_all(&local_private_key.secret().to_bytes())) + { + Ok(_) => { + debug!(log, "New network key generated and written to disk"); + } + Err(e) => { + warn!( + log, + "Could not write node key to file: {:?}. error: {}", network_key_f, e + ); } } - local_private_key + local_private_key.into() } /// Generate authenticated XX Noise config from identity keys -fn generate_noise_config( - identity_keypair: &Keypair, -) -> noise::NoiseAuthenticated { - let static_dh_keys = noise::Keypair::::new() - .into_authentic(identity_keypair) - .expect("signing can fail only once during starting a node"); - noise::NoiseConfig::xx(static_dh_keys).into_authenticated() +fn generate_noise_config(identity_keypair: &Keypair) -> noise::Config { + noise::Config::new(identity_keypair).expect("signing can fail only once during starting a node") } /// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p @@ -236,11 +214,11 @@ pub(crate) fn create_whitelist_filter( possible_fork_digests: Vec<[u8; 4]>, attestation_subnet_count: u64, sync_committee_subnet_count: u64, -) -> WhitelistSubscriptionFilter { +) -> gossipsub::WhitelistSubscriptionFilter { let mut possible_hashes = HashSet::new(); for fork_digest in possible_fork_digests { let mut add = |kind| { - let topic: Topic = + let topic: gossipsub::IdentTopic = GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into(); possible_hashes.insert(topic.hash()); }; @@ -262,7 +240,7 @@ pub(crate) fn create_whitelist_filter( add(SyncCommitteeMessage(SyncSubnetId::new(id))); } } - WhitelistSubscriptionFilter(possible_hashes) + gossipsub::WhitelistSubscriptionFilter(possible_hashes) } /// Persist metadata to disk diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index bb0397de1e2..06732ac99fd 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -2,7 +2,7 @@ use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::TopicHash; -use libp2p::gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage}; +use libp2p::gossipsub; use snap::raw::{decompress_len, Decoder, Encoder}; use ssz::{Decode, Encode}; use std::boxed::Box; @@ -56,12 +56,12 @@ impl SnappyTransform { } } -impl DataTransform for SnappyTransform { +impl gossipsub::DataTransform for SnappyTransform { // Provides the snappy decompression from RawGossipsubMessages fn inbound_transform( &self, - raw_message: RawGossipsubMessage, - ) -> Result { + raw_message: gossipsub::RawMessage, + ) -> Result { // check the length of the raw bytes let len = decompress_len(&raw_message.data)?; if len > self.max_size_per_message { @@ -75,7 +75,7 @@ impl DataTransform for SnappyTransform { let decompressed_data = decoder.decompress_vec(&raw_message.data)?; // Build the GossipsubMessage struct - Ok(GossipsubMessage { + Ok(gossipsub::Message { source: raw_message.source, data: decompressed_data, sequence_number: raw_message.sequence_number, diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 64714cbc0a8..b48891335cc 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -1,5 +1,5 @@ #![cfg(test)] -use libp2p::gossipsub::GossipsubConfigBuilder; +use libp2p::gossipsub; use lighthouse_network::service::Network as LibP2PService; use lighthouse_network::Enr; use lighthouse_network::EnrExt; @@ -81,7 +81,7 @@ pub fn build_config(port: u16, mut boot_nodes: Vec) -> NetworkConfig { config.boot_nodes_enr.append(&mut boot_nodes); config.network_dir = path.into_path(); // Reduce gossipsub heartbeat parameters - config.gs_config = GossipsubConfigBuilder::from(config.gs_config) + config.gs_config = gossipsub::ConfigBuilder::from(config.gs_config) .heartbeat_initial_delay(Duration::from_millis(500)) .heartbeat_interval(Duration::from_millis(500)) .build() diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index b517d57df3b..c355c671e80 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -493,10 +493,8 @@ impl NetworkService { NetworkEvent::PeerConnectedOutgoing(peer_id) => { self.send_to_router(RouterMessage::StatusPeer(peer_id)); } - NetworkEvent::PeerConnectedIncoming(_) - | NetworkEvent::PeerBanned(_) - | NetworkEvent::PeerUnbanned(_) => { - // No action required for these events. + NetworkEvent::PeerConnectedIncoming(_) => { + // No action required for this event. } NetworkEvent::PeerDisconnected(peer_id) => { self.send_to_router(RouterMessage::PeerDisconnected(peer_id)); diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 1504b7ff0fe..58e6917eca9 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -28,7 +28,7 @@ operating system. Install the following packages: ```bash -sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler +sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang ``` > Tips: @@ -51,10 +51,6 @@ After this, you are ready to [build Lighthouse](#build-lighthouse). brew install cmake ``` -1. Install protoc using Homebrew: -``` -brew install protobuf -``` [Homebrew]: https://brew.sh/ @@ -71,7 +67,7 @@ After this, you are ready to [build Lighthouse](#build-lighthouse). Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) ``` > - To verify that Chocolatey is ready, run `choco` and it should return the version. -1. Install Make, CMake, LLVM and protoc using Chocolatey: +1. Install Make, CMake and LLVM using Chocolatey: ``` choco install make @@ -85,10 +81,6 @@ choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System' choco install llvm ``` -``` -choco install protoc -``` - These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section. @@ -217,4 +209,3 @@ look into [cross compilation](./cross-compiling.md), or use a [pre-built binary](https://github.com/sigp/lighthouse/releases). If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`. - diff --git a/book/src/pi.md b/book/src/pi.md index d8d154d765a..550415240b4 100644 --- a/book/src/pi.md +++ b/book/src/pi.md @@ -22,7 +22,7 @@ terminal and an Internet connection are necessary. Install the Ubuntu dependencies: ```bash -sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler +sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang ``` > Tips: diff --git a/book/src/setup.md b/book/src/setup.md index ea3c5664ac6..533e1d463d3 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -14,8 +14,6 @@ The additional requirements for developers are: don't have `anvil` available on your `PATH`. - [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by some dependencies. See [`Installation Guide`](./installation.md) for more info. -- [`protoc`](https://github.com/protocolbuffers/protobuf/releases) required for - the networking stack. - [`java 11 runtime`](https://openjdk.java.net/projects/jdk/). 11 is the minimum, used by web3signer_tests. - [`libpq-dev`](https://www.postgresql.org/docs/devel/libpq.html). Also know as diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index c4e36022a82..d006156bf9d 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -80,7 +80,7 @@ impl BootNodeConfig { } let private_key = load_private_key(&network_config, &logger); - let local_key = CombinedKey::from_libp2p(&private_key)?; + let local_key = CombinedKey::from_libp2p(private_key)?; let local_enr = if let Some(dir) = matches.value_of("network-dir") { let network_dir: PathBuf = dir.into(); diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 296d43b1a2e..338a2d243bc 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -18,4 +18,4 @@ serde_yaml = "0.8.13" types = { path = "../../consensus/types"} ethereum_ssz = "0.5.0" eth2_config = { path = "../eth2_config"} -discv5 = "0.3.0" \ No newline at end of file +discv5 = "0.3.1" \ No newline at end of file diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 98f33f21536..a50aa17027b 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -2,7 +2,7 @@ # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` FROM rust:1.68.2-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG PORTABLE ENV PORTABLE $PORTABLE @@ -10,4 +10,4 @@ RUN cd lighthouse && make install-lcli FROM ubuntu:22.04 RUN apt-get update && apt-get -y upgrade && apt-get clean && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/lcli /usr/local/bin/lcli +COPY --from=builder /usr/local/cargo/bin/lcli /usr/local/bin/lcli \ No newline at end of file diff --git a/scripts/cross/Dockerfile b/scripts/cross/Dockerfile deleted file mode 100644 index 5472b980bad..00000000000 --- a/scripts/cross/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -ARG CROSS_BASE_IMAGE -FROM $CROSS_BASE_IMAGE - -RUN apt-get update -y && apt-get upgrade -y - -RUN apt-get install -y unzip && \ - PB_REL="https://github.com/protocolbuffers/protobuf/releases" && \ - curl -L $PB_REL/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip -o protoc.zip && \ - unzip protoc.zip -d /usr && \ - chmod +x /usr/bin/protoc - -RUN apt-get install -y cmake clang-3.9 - -ENV PROTOC=/usr/bin/protoc diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar index ddc49e13cd7..c790e248dfe 100644 --- a/testing/antithesis/Dockerfile.libvoidstar +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -1,5 +1,5 @@ FROM rust:1.68.2-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse # Build lighthouse directly with a cargo build command, bypassing the Makefile. @@ -22,4 +22,4 @@ COPY --from=builder /lighthouse/testing/antithesis/libvoidstar/libvoidstar.so /u ENV LD_LIBRARY_PATH=/usr/lib # move the lighthouse binary and lcli binary COPY --from=builder /lighthouse/target/x86_64-unknown-linux-gnu/release/lighthouse /usr/local/bin/lighthouse -COPY --from=builder /lighthouse/target/release/lcli /usr/local/bin/lcli +COPY --from=builder /lighthouse/target/release/lcli /usr/local/bin/lcli \ No newline at end of file From fcf51d691ed3a9bb0e0d6d6050d1a83b81805ef8 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Wed, 2 Aug 2023 23:50:41 +0000 Subject: [PATCH 11/20] fix typo (#4555) --- beacon_node/beacon_chain/src/timeout_rw_lock.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/timeout_rw_lock.rs b/beacon_node/beacon_chain/src/timeout_rw_lock.rs index 28e3f4c29cc..b2eea762651 100644 --- a/beacon_node/beacon_chain/src/timeout_rw_lock.rs +++ b/beacon_node/beacon_chain/src/timeout_rw_lock.rs @@ -5,7 +5,7 @@ use std::time::Duration; /// A simple wrapper around `parking_lot::RwLock` that only permits read/write access with a /// time-out (i.e., no indefinitely-blocking operations). /// -/// Timeouts can be optionally be disabled at runtime for all instances of this type by calling +/// Timeouts can be optionally disabled at runtime for all instances of this type by calling /// `TimeoutRwLock::disable_timeouts()`. pub struct TimeoutRwLock(RwLock); From b5e25aeb2fcc4338730b864c8178e14fc2cce232 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Wed, 2 Aug 2023 23:50:42 +0000 Subject: [PATCH 12/20] CommitteeCache.initialized: fail early if possible (#4556) --- consensus/types/src/beacon_state/committee_cache.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 2db8fbe7632..bbe81b93008 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -56,6 +56,11 @@ impl CommitteeCache { return Err(Error::ZeroSlotsPerEpoch); } + // The use of `NonZeroUsize` reduces the maximum number of possible validators by one. + if state.validators().len() == usize::max_value() { + return Err(Error::TooManyValidators); + } + let active_validator_indices = get_active_validator_indices(state.validators(), epoch); if active_validator_indices.is_empty() { @@ -75,11 +80,6 @@ impl CommitteeCache { ) .ok_or(Error::UnableToShuffle)?; - // The use of `NonZeroUsize` reduces the maximum number of possible validators by one. - if state.validators().len() == usize::max_value() { - return Err(Error::TooManyValidators); - } - let mut shuffling_positions = vec![<_>::default(); state.validators().len()]; for (i, &v) in shuffling.iter().enumerate() { *shuffling_positions From 7399a54ca301bfc6d0dc2c718a73c49d7a752e61 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Wed, 2 Aug 2023 23:50:43 +0000 Subject: [PATCH 13/20] CommitteeCache.get_all_beacon_committees: set correct capacity to avoid realloc (#4557) --- consensus/types/src/beacon_state/committee_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index bbe81b93008..64bf686f34e 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -174,7 +174,7 @@ impl CommitteeCache { .ok_or(Error::CommitteeCacheUninitialized(None))?; initialized_epoch.slot_iter(self.slots_per_epoch).try_fold( - Vec::with_capacity(self.slots_per_epoch as usize), + Vec::with_capacity(self.epoch_committee_count()), |mut vec, slot| { vec.append(&mut self.get_beacon_committees_at_slot(slot)?); Ok(vec) From 33976121601d9d31d26c1bb2e1f0676e9c4d24fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arma=C4=9Fan=20Y=C4=B1ld=C4=B1rak?= <41453366+armaganyildirak@users.noreply.github.com> Date: Thu, 3 Aug 2023 01:51:47 +0000 Subject: [PATCH 14/20] Shift networking configuration (#4426) ## Issue Addressed Addresses [#4401](https://github.com/sigp/lighthouse/issues/4401) ## Proposed Changes Shift some constants into ```ChainSpec``` and remove the constant values from code space. ## Additional Info I mostly used ```MainnetEthSpec::default_spec()``` for getting ```ChainSpec```. I wonder Did I make a mistake about that. Co-authored-by: armaganyildirak Co-authored-by: Paul Hauner Co-authored-by: Age Manning Co-authored-by: Diva M --- Cargo.lock | 1 + .../src/attestation_verification.rs | 20 +- beacon_node/beacon_chain/src/beacon_chain.rs | 5 - .../beacon_chain/src/block_verification.rs | 4 +- beacon_node/beacon_chain/src/lib.rs | 2 +- ...ght_client_finality_update_verification.rs | 7 +- ...t_client_optimistic_update_verification.rs | 7 +- .../src/sync_committee_verification.rs | 17 +- beacon_node/beacon_processor/src/lib.rs | 8 +- .../src/work_reprocessing_queue.rs | 8 +- beacon_node/client/src/builder.rs | 3 +- beacon_node/http_api/src/attester_duties.rs | 6 +- beacon_node/http_api/src/proposer_duties.rs | 4 +- beacon_node/http_api/src/sync_committees.rs | 4 +- beacon_node/http_api/tests/tests.rs | 12 +- beacon_node/lighthouse_network/src/config.rs | 36 ++-- .../lighthouse_network/src/rpc/codec/base.rs | 15 +- .../src/rpc/codec/ssz_snappy.rs | 122 +++++++++--- .../lighthouse_network/src/rpc/handler.rs | 36 ++-- beacon_node/lighthouse_network/src/rpc/mod.rs | 31 ++- .../lighthouse_network/src/rpc/protocol.rs | 20 +- .../lighthouse_network/src/service/mod.rs | 18 +- .../lighthouse_network/tests/common.rs | 11 +- .../lighthouse_network/tests/rpc_tests.rs | 52 +++-- .../gossip_methods.rs | 3 + .../src/network_beacon_processor/mod.rs | 11 -- .../src/network_beacon_processor/tests.rs | 15 +- beacon_node/src/config.rs | 6 +- .../gnosis/config.yaml | 12 +- .../mainnet/config.yaml | 12 +- .../prater/config.yaml | 12 +- .../sepolia/config.yaml | 12 +- consensus/types/Cargo.toml | 1 + consensus/types/src/chain_spec.rs | 185 +++++++++++++++--- consensus/types/src/subnet_id.rs | 4 +- .../environment/tests/testnet_dir/config.yaml | 14 ++ 36 files changed, 523 insertions(+), 213 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b2e8188eec4..13f2b7cd43c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8391,6 +8391,7 @@ dependencies = [ "merkle_proof", "metastruct", "parking_lot 0.12.1", + "paste", "rand 0.8.5", "rand_xorshift", "rayon", diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 6df0758b2e6..5535fec37c4 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -35,10 +35,8 @@ mod batch; use crate::{ - beacon_chain::{MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, - metrics, - observed_aggregates::ObserveOutcome, - observed_attesters::Error as ObservedAttestersError, + beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics, + observed_aggregates::ObserveOutcome, observed_attesters::Error as ObservedAttestersError, BeaconChain, BeaconChainError, BeaconChainTypes, }; use bls::verify_signature_sets; @@ -57,8 +55,8 @@ use std::borrow::Cow; use strum::AsRefStr; use tree_hash::TreeHash; use types::{ - Attestation, BeaconCommittee, CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation, - SelectionProof, SignedAggregateAndProof, Slot, SubnetId, + Attestation, BeaconCommittee, ChainSpec, CommitteeIndex, Epoch, EthSpec, Hash256, + IndexedAttestation, SelectionProof, SignedAggregateAndProof, Slot, SubnetId, }; pub use batch::{batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations}; @@ -454,7 +452,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future attestations for later processing. - verify_propagation_slot_range(&chain.slot_clock, attestation)?; + verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?; // Check the attestation's epoch matches its target. if attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()) @@ -722,7 +720,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future attestations for later processing. - verify_propagation_slot_range(&chain.slot_clock, attestation)?; + verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?; // Check to ensure that the attestation is "unaggregated". I.e., it has exactly one // aggregation bit set. @@ -1037,11 +1035,11 @@ fn verify_head_block_is_known( pub fn verify_propagation_slot_range( slot_clock: &S, attestation: &Attestation, + spec: &ChainSpec, ) -> Result<(), Error> { let attestation_slot = attestation.data.slot; - let latest_permissible_slot = slot_clock - .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_future_tolerance(spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)?; if attestation_slot > latest_permissible_slot { return Err(Error::FutureSlot { @@ -1052,7 +1050,7 @@ pub fn verify_propagation_slot_range( // Taking advantage of saturating subtraction on `Slot`. let earliest_permissible_slot = slot_clock - .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_past_tolerance(spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)? - E::slots_per_epoch(); if attestation_slot < earliest_permissible_slot { diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 78f2c3f03b4..25964ed2165 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -217,11 +217,6 @@ pub enum OverrideForkchoiceUpdate { AlreadyApplied, } -/// The accepted clock drift for nodes gossiping blocks and attestations. See: -/// -/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#configuration -pub const MAXIMUM_GOSSIP_CLOCK_DISPARITY: Duration = Duration::from_millis(500); - #[derive(Debug, PartialEq)] pub enum AttestationProcessingOutcome { Processed, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 492f492521e..0a82eae3711 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -59,7 +59,7 @@ use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ beacon_chain::{ BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, - MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, + VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; @@ -730,7 +730,7 @@ impl GossipVerifiedBlock { // Do not gossip or process blocks from future slots. let present_slot_with_tolerance = chain .slot_clock - .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)?; if block.slot() > present_slot_with_tolerance { return Err(BlockError::FutureSlot { diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 85ff0f20a01..4ea1eeee011 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -54,7 +54,7 @@ pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, - INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; pub use self::beacon_snapshot::BeaconSnapshot; pub use self::chain_config::ChainConfig; diff --git a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs index 7c431ebccca..638d2b4012e 100644 --- a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs @@ -1,6 +1,4 @@ -use crate::{ - beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes, -}; +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use derivative::Derivative; use slot_clock::SlotClock; use std::time::Duration; @@ -103,7 +101,8 @@ impl VerifiedLightClientFinalityUpdate { // verify that enough time has passed for the block to have been propagated match start_time { Some(time) => { - if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration + if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() + < time + one_third_slot_duration { return Err(Error::TooEarly); } diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index 20d7181808a..2d1a5cf97cf 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -1,6 +1,4 @@ -use crate::{ - beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes, -}; +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use derivative::Derivative; use eth2::types::Hash256; use slot_clock::SlotClock; @@ -103,7 +101,8 @@ impl VerifiedLightClientOptimisticUpdate { // verify that enough time has passed for the block to have been propagated match start_time { Some(time) => { - if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration + if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() + < time + one_third_slot_duration { return Err(Error::TooEarly); } diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index 246bb12cc0e..5c6710bfd6c 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -28,10 +28,8 @@ use crate::observed_attesters::SlotSubcommitteeIndex; use crate::{ - beacon_chain::{MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, - metrics, - observed_aggregates::ObserveOutcome, - BeaconChain, BeaconChainError, BeaconChainTypes, + beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics, + observed_aggregates::ObserveOutcome, BeaconChain, BeaconChainError, BeaconChainTypes, }; use bls::{verify_signature_sets, PublicKeyBytes}; use derivative::Derivative; @@ -52,6 +50,7 @@ use tree_hash_derive::TreeHash; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::slot_data::SlotData; use types::sync_committee::Error as SyncCommitteeError; +use types::ChainSpec; use types::{ sync_committee_contribution::Error as ContributionError, AggregateSignature, BeaconStateError, EthSpec, Hash256, SignedContributionAndProof, Slot, SyncCommitteeContribution, @@ -297,7 +296,7 @@ impl VerifiedSyncContribution { let subcommittee_index = contribution.subcommittee_index as usize; // Ensure sync committee contribution is within the MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance. - verify_propagation_slot_range(&chain.slot_clock, contribution)?; + verify_propagation_slot_range(&chain.slot_clock, contribution, &chain.spec)?; // Validate subcommittee index. if contribution.subcommittee_index >= SYNC_COMMITTEE_SUBNET_COUNT { @@ -460,7 +459,7 @@ impl VerifiedSyncCommitteeMessage { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future sync committee messages for later processing. - verify_propagation_slot_range(&chain.slot_clock, &sync_message)?; + verify_propagation_slot_range(&chain.slot_clock, &sync_message, &chain.spec)?; // Ensure the `subnet_id` is valid for the given validator. let pubkey = chain @@ -576,11 +575,11 @@ impl VerifiedSyncCommitteeMessage { pub fn verify_propagation_slot_range( slot_clock: &S, sync_contribution: &U, + spec: &ChainSpec, ) -> Result<(), Error> { let message_slot = sync_contribution.get_slot(); - let latest_permissible_slot = slot_clock - .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_future_tolerance(spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)?; if message_slot > latest_permissible_slot { return Err(Error::FutureSlot { @@ -590,7 +589,7 @@ pub fn verify_propagation_slot_range( } let earliest_permissible_slot = slot_clock - .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_past_tolerance(spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)?; if message_slot < earliest_permissible_slot { diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 88066f2a305..297c4868db7 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -661,7 +661,8 @@ impl BeaconProcessor { work_reprocessing_rx: mpsc::Receiver, work_journal_tx: Option>, slot_clock: S, - ) { + maximum_gossip_clock_disparity: Duration, + ) -> Result<(), String> { // Used by workers to communicate that they are finished a task. let (idle_tx, idle_rx) = mpsc::channel::<()>(MAX_IDLE_QUEUE_LEN); @@ -717,13 +718,15 @@ impl BeaconProcessor { // receive them back once they are ready (`ready_work_rx`). let (ready_work_tx, ready_work_rx) = mpsc::channel::(MAX_SCHEDULED_WORK_QUEUE_LEN); + spawn_reprocess_scheduler( ready_work_tx, work_reprocessing_rx, &self.executor, slot_clock, self.log.clone(), - ); + maximum_gossip_clock_disparity, + )?; let executor = self.executor.clone(); @@ -1203,6 +1206,7 @@ impl BeaconProcessor { // Spawn on the core executor. executor.spawn(manager_future, MANAGER_TASK_NAME); + Ok(()) } /// Spawns a blocking worker thread to process some `Work`. diff --git a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs index 608f634d537..9191509d39f 100644 --- a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs @@ -361,7 +361,12 @@ pub fn spawn_reprocess_scheduler( executor: &TaskExecutor, slot_clock: S, log: Logger, -) { + maximum_gossip_clock_disparity: Duration, +) -> Result<(), String> { + // Sanity check + if ADDITIONAL_QUEUED_BLOCK_DELAY >= maximum_gossip_clock_disparity { + return Err("The block delay and gossip disparity don't match.".to_string()); + } let mut queue = ReprocessQueue { work_reprocessing_rx, ready_work_tx, @@ -400,6 +405,7 @@ pub fn spawn_reprocess_scheduler( }, TASK_NAME, ); + Ok(()) } impl ReprocessQueue { diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 14edbb97309..71a9b28fb05 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -795,7 +795,8 @@ where self.work_reprocessing_rx, None, beacon_chain.slot_clock.clone(), - ); + beacon_chain.spec.maximum_gossip_clock_disparity(), + )?; } let state_advance_context = runtime_context.service_context("state_advance".into()); diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 5c3e420839d..aad405d56ba 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -1,9 +1,7 @@ //! Contains the handler for the `GET validator/duties/attester/{epoch}` endpoint. use crate::state_id::StateId; -use beacon_chain::{ - BeaconChain, BeaconChainError, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY, -}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::{self as api_types}; use slot_clock::SlotClock; use state_processing::state_advance::partial_state_advance; @@ -32,7 +30,7 @@ pub fn attester_duties( // will equal `current_epoch + 1` let tolerant_current_epoch = chain .slot_clock - .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) .ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))? .epoch(T::EthSpec::slots_per_epoch()); diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 7e946b89e72..708df39b4d6 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -3,7 +3,7 @@ use crate::state_id::StateId; use beacon_chain::{ beacon_proposer_cache::{compute_proposer_duties_from_head, ensure_state_is_in_epoch}, - BeaconChain, BeaconChainError, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + BeaconChain, BeaconChainError, BeaconChainTypes, }; use eth2::types::{self as api_types}; use safe_arith::SafeArith; @@ -33,7 +33,7 @@ pub fn proposer_duties( // will equal `current_epoch + 1` let tolerant_current_epoch = chain .slot_clock - .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) .ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))? .epoch(T::EthSpec::slots_per_epoch()); diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index 07dfb5c988f..dcf41429f6d 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -6,7 +6,7 @@ use beacon_chain::sync_committee_verification::{ }; use beacon_chain::{ validator_monitor::timestamp_now, BeaconChain, BeaconChainError, BeaconChainTypes, - StateSkipConfig, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + StateSkipConfig, }; use eth2::types::{self as api_types}; use lighthouse_network::PubsubMessage; @@ -85,7 +85,7 @@ fn duties_from_state_load( let current_epoch = chain.epoch()?; let tolerant_current_epoch = chain .slot_clock - .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)? .epoch(T::EthSpec::slots_per_epoch()); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 7c3872925a3..28eb106e8df 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,7 +1,7 @@ use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, - BeaconChain, StateSkipConfig, WhenSlotSkipped, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + BeaconChain, StateSkipConfig, WhenSlotSkipped, }; use environment::null_logger; use eth2::{ @@ -2313,7 +2313,9 @@ impl ApiTester { .unwrap(); self.chain.slot_clock.set_current_time( - current_epoch_start - MAXIMUM_GOSSIP_CLOCK_DISPARITY - Duration::from_millis(1), + current_epoch_start + - self.chain.spec.maximum_gossip_clock_disparity() + - Duration::from_millis(1), ); let dependent_root = self @@ -2350,9 +2352,9 @@ impl ApiTester { "should not get attester duties outside of tolerance" ); - self.chain - .slot_clock - .set_current_time(current_epoch_start - MAXIMUM_GOSSIP_CLOCK_DISPARITY); + self.chain.slot_clock.set_current_time( + current_epoch_start - self.chain.spec.maximum_gossip_clock_disparity(), + ); self.client .get_validator_duties_proposer(current_epoch) diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 6c8f20a24b9..0ab7c03e7f6 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -16,11 +16,6 @@ use std::sync::Arc; use std::time::Duration; use types::{ForkContext, ForkName}; -/// The maximum transmit size of gossip messages in bytes pre-merge. -const GOSSIP_MAX_SIZE: usize = 1_048_576; // 1M -/// The maximum transmit size of gossip messages in bytes post-merge. -const GOSSIP_MAX_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M - /// The cache time is set to accommodate the circulation time of an attestation. /// /// The p2p spec declares that we accept attestations within the following range: @@ -35,20 +30,20 @@ const GOSSIP_MAX_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M /// another 500ms for "fudge factor". pub const DUPLICATE_CACHE_TIME: Duration = Duration::from_secs(33 * 12 + 1); -// We treat uncompressed messages as invalid and never use the INVALID_SNAPPY_DOMAIN as in the -// specification. We leave it here for posterity. -// const MESSAGE_DOMAIN_INVALID_SNAPPY: [u8; 4] = [0, 0, 0, 0]; -const MESSAGE_DOMAIN_VALID_SNAPPY: [u8; 4] = [1, 0, 0, 0]; - /// The maximum size of gossip messages. -pub fn gossip_max_size(is_merge_enabled: bool) -> usize { +pub fn gossip_max_size(is_merge_enabled: bool, gossip_max_size: usize) -> usize { if is_merge_enabled { - GOSSIP_MAX_SIZE_POST_MERGE + gossip_max_size } else { - GOSSIP_MAX_SIZE + gossip_max_size / 10 } } +pub struct GossipsubConfigParams { + pub message_domain_valid_snappy: [u8; 4], + pub gossip_max_size: usize, +} + #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] /// Network configuration for lighthouse. @@ -413,7 +408,11 @@ impl From for NetworkLoad { } /// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork. -pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> gossipsub::Config { +pub fn gossipsub_config( + network_load: u8, + fork_context: Arc, + gossipsub_config_params: GossipsubConfigParams, +) -> gossipsub::Config { // The function used to generate a gossipsub message id // We use the first 8 bytes of SHA256(topic, data) for content addressing let fast_gossip_message_id = |message: &gossipsub::RawMessage| { @@ -446,12 +445,12 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> gos } } } - + let message_domain_valid_snappy = gossipsub_config_params.message_domain_valid_snappy; let is_merge_enabled = fork_context.fork_exists(ForkName::Merge); let gossip_message_id = move |message: &gossipsub::Message| { gossipsub::MessageId::from( &Sha256::digest( - prefix(MESSAGE_DOMAIN_VALID_SNAPPY, message, fork_context.clone()).as_slice(), + prefix(message_domain_valid_snappy, message, fork_context.clone()).as_slice(), )[..20], ) }; @@ -459,7 +458,10 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> gos let load = NetworkLoad::from(network_load); gossipsub::ConfigBuilder::default() - .max_transmit_size(gossip_max_size(is_merge_enabled)) + .max_transmit_size(gossip_max_size( + is_merge_enabled, + gossipsub_config_params.gossip_max_size, + )) .heartbeat_interval(load.heartbeat_interval) .mesh_n(load.mesh_n) .mesh_n_low(load.mesh_n_low) diff --git a/beacon_node/lighthouse_network/src/rpc/codec/base.rs b/beacon_node/lighthouse_network/src/rpc/codec/base.rs index d568f27897e..943d4a3bce2 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/base.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/base.rs @@ -217,9 +217,12 @@ mod tests { let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(ForkName::Base)); + + let chain_spec = Spec::default_spec(); + let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( snappy_protocol_id, - max_rpc_size(&fork_context), + max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize), fork_context, ); @@ -251,9 +254,12 @@ mod tests { let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(ForkName::Base)); + + let chain_spec = Spec::default_spec(); + let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( snappy_protocol_id, - max_rpc_size(&fork_context), + max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize), fork_context, ); @@ -279,7 +285,10 @@ mod tests { // Response limits let fork_context = Arc::new(fork_context(ForkName::Base)); - let max_rpc_size = max_rpc_size(&fork_context); + + let chain_spec = Spec::default_spec(); + + let max_rpc_size = max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize); let limit = protocol_id.rpc_response_limits::(&fork_context); let mut max = encode_len(limit.max + 1); let mut codec = SSZSnappyOutboundCodec::::new( diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 39cf8b3eb29..f1d94da7ece 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -615,8 +615,8 @@ mod tests { }; use std::sync::Arc; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, Epoch, - ForkContext, FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, ChainSpec, EmptyBlock, + Epoch, ForkContext, FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, }; use snap::write::FrameEncoder; @@ -658,7 +658,7 @@ mod tests { } /// Merge block with length < max_rpc_size. - fn merge_block_small(fork_context: &ForkContext) -> SignedBeaconBlock { + fn merge_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> SignedBeaconBlock { let mut block: BeaconBlockMerge<_, FullPayload> = BeaconBlockMerge::empty(&Spec::default_spec()); let tx = VariableList::from(vec![0; 1024]); @@ -667,14 +667,14 @@ mod tests { block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Merge(block); - assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context)); + assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context, spec.max_chunk_size as usize)); SignedBeaconBlock::from_block(block, Signature::empty()) } /// Merge block with length > MAX_RPC_SIZE. /// The max limit for a merge block is in the order of ~16GiB which wouldn't fit in memory. /// Hence, we generate a merge block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. - fn merge_block_large(fork_context: &ForkContext) -> SignedBeaconBlock { + fn merge_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> SignedBeaconBlock { let mut block: BeaconBlockMerge<_, FullPayload> = BeaconBlockMerge::empty(&Spec::default_spec()); let tx = VariableList::from(vec![0; 1024]); @@ -683,7 +683,7 @@ mod tests { block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Merge(block); - assert!(block.ssz_bytes_len() > max_rpc_size(fork_context)); + assert!(block.ssz_bytes_len() > max_rpc_size(fork_context, spec.max_chunk_size as usize)); SignedBeaconBlock::from_block(block, Signature::empty()) } @@ -737,10 +737,11 @@ mod tests { protocol: SupportedProtocol, message: RPCCodedResponse, fork_name: ForkName, + spec: &ChainSpec, ) -> Result { let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(fork_name)); - let max_packet_size = max_rpc_size(&fork_context); + let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); let mut buf = BytesMut::new(); let mut snappy_inbound_codec = @@ -783,10 +784,11 @@ mod tests { protocol: SupportedProtocol, message: &mut BytesMut, fork_name: ForkName, + spec: &ChainSpec, ) -> Result>, RPCError> { let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(fork_name)); - let max_packet_size = max_rpc_size(&fork_context); + let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new(snappy_protocol_id, max_packet_size, fork_context); // decode message just as snappy message @@ -798,15 +800,20 @@ mod tests { protocol: SupportedProtocol, message: RPCCodedResponse, fork_name: ForkName, + spec: &ChainSpec, ) -> Result>, RPCError> { - let mut encoded = encode_response(protocol, message, fork_name)?; - decode_response(protocol, &mut encoded, fork_name) + let mut encoded = encode_response(protocol, message, fork_name, spec)?; + decode_response(protocol, &mut encoded, fork_name, spec) } /// Verifies that requests we send are encoded in a way that we would correctly decode too. - fn encode_then_decode_request(req: OutboundRequest, fork_name: ForkName) { + fn encode_then_decode_request( + req: OutboundRequest, + fork_name: ForkName, + spec: &ChainSpec, + ) { let fork_context = Arc::new(fork_context(fork_name)); - let max_packet_size = max_rpc_size(&fork_context); + let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy); // Encode a request we send let mut buf = BytesMut::new(); @@ -851,11 +858,14 @@ mod tests { // Test RPCResponse encoding/decoding for V1 messages #[test] fn test_encode_then_decode_v1() { + let chain_spec = Spec::default_spec(); + assert_eq!( encode_then_decode_response( SupportedProtocol::StatusV1, RPCCodedResponse::Success(RPCResponse::Status(status_message())), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::Status(status_message()))) ); @@ -865,6 +875,7 @@ mod tests { SupportedProtocol::PingV1, RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::Pong(ping_message()))) ); @@ -874,6 +885,7 @@ mod tests { SupportedProtocol::BlocksByRangeV1, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() @@ -886,6 +898,7 @@ mod tests { SupportedProtocol::BlocksByRangeV1, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, + &chain_spec, ) .unwrap_err(), RPCError::SSZDecodeError(_) @@ -898,6 +911,7 @@ mod tests { SupportedProtocol::BlocksByRootV1, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRoot( Arc::new(empty_base_block()) @@ -910,6 +924,7 @@ mod tests { SupportedProtocol::BlocksByRootV1, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, + &chain_spec, ) .unwrap_err(), RPCError::SSZDecodeError(_) @@ -922,6 +937,7 @@ mod tests { SupportedProtocol::MetaDataV1, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::MetaData(metadata()))), ); @@ -932,6 +948,7 @@ mod tests { SupportedProtocol::MetaDataV1, RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::MetaData(metadata()))), ); @@ -940,11 +957,14 @@ mod tests { // Test RPCResponse encoding/decoding for V1 messages #[test] fn test_encode_then_decode_v2() { + let chain_spec = Spec::default_spec(); + assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() @@ -959,6 +979,7 @@ mod tests { SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Altair, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() @@ -970,12 +991,13 @@ mod tests { SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRange(Arc::new(altair_block())))) ); - let merge_block_small = merge_block_small(&fork_context(ForkName::Merge)); - let merge_block_large = merge_block_large(&fork_context(ForkName::Merge)); + let merge_block_small = merge_block_small(&fork_context(ForkName::Merge), &chain_spec); + let merge_block_large = merge_block_large(&fork_context(ForkName::Merge), &chain_spec); assert_eq!( encode_then_decode_response( @@ -984,6 +1006,7 @@ mod tests { merge_block_small.clone() ))), ForkName::Merge, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRange(Arc::new( merge_block_small.clone() @@ -1000,6 +1023,7 @@ mod tests { SupportedProtocol::BlocksByRangeV2, &mut encoded, ForkName::Merge, + &chain_spec, ) .unwrap_err(), RPCError::InvalidData(_) @@ -1012,6 +1036,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRoot( Arc::new(empty_base_block()) @@ -1026,6 +1051,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRoot( Arc::new(empty_base_block()) @@ -1037,6 +1063,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRoot(Arc::new(altair_block())))) ); @@ -1048,6 +1075,7 @@ mod tests { merge_block_small.clone() ))), ForkName::Merge, + &chain_spec, ), Ok(Some(RPCResponse::BlocksByRoot(Arc::new(merge_block_small)))) ); @@ -1062,6 +1090,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, &mut encoded, ForkName::Merge, + &chain_spec, ) .unwrap_err(), RPCError::InvalidData(_) @@ -1075,6 +1104,7 @@ mod tests { SupportedProtocol::MetaDataV2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), ForkName::Base, + &chain_spec, ), Ok(Some(RPCResponse::MetaData(metadata_v2()))) ); @@ -1084,6 +1114,7 @@ mod tests { SupportedProtocol::MetaDataV2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), ForkName::Altair, + &chain_spec, ), Ok(Some(RPCResponse::MetaData(metadata_v2()))) ); @@ -1094,11 +1125,14 @@ mod tests { fn test_context_bytes_v2() { let fork_context = fork_context(ForkName::Altair); + let chain_spec = Spec::default_spec(); + // Removing context bytes for v2 messages should error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, + &chain_spec, ) .unwrap(); @@ -1108,7 +1142,8 @@ mod tests { decode_response( SupportedProtocol::BlocksByRangeV2, &mut encoded_bytes, - ForkName::Base + ForkName::Base, + &chain_spec, ) .unwrap_err(), RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), @@ -1118,6 +1153,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, + &chain_spec, ) .unwrap(); @@ -1127,7 +1163,8 @@ mod tests { decode_response( SupportedProtocol::BlocksByRangeV2, &mut encoded_bytes, - ForkName::Base + ForkName::Base, + &chain_spec, ) .unwrap_err(), RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), @@ -1138,6 +1175,7 @@ mod tests { SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Altair, + &chain_spec, ) .unwrap(); @@ -1150,7 +1188,8 @@ mod tests { decode_response( SupportedProtocol::BlocksByRangeV2, &mut wrong_fork_bytes, - ForkName::Altair + ForkName::Altair, + &chain_spec, ) .unwrap_err(), RPCError::SSZDecodeError(_), @@ -1161,6 +1200,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, + &chain_spec, ) .unwrap(); @@ -1172,7 +1212,8 @@ mod tests { decode_response( SupportedProtocol::BlocksByRangeV2, &mut wrong_fork_bytes, - ForkName::Altair + ForkName::Altair, + &chain_spec, ) .unwrap_err(), RPCError::SSZDecodeError(_), @@ -1186,6 +1227,7 @@ mod tests { SupportedProtocol::MetaDataV2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), ForkName::Altair, + &chain_spec, ) .unwrap(), ); @@ -1193,7 +1235,8 @@ mod tests { assert!(decode_response( SupportedProtocol::MetaDataV2, &mut encoded_bytes, - ForkName::Altair + ForkName::Altair, + &chain_spec, ) .is_err()); @@ -1202,6 +1245,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, + &chain_spec, ) .unwrap(); @@ -1213,7 +1257,8 @@ mod tests { decode_response( SupportedProtocol::BlocksByRangeV2, &mut wrong_fork_bytes, - ForkName::Altair + ForkName::Altair, + &chain_spec, ) .unwrap_err(), RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), @@ -1224,6 +1269,7 @@ mod tests { SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, + &chain_spec, ) .unwrap(); @@ -1233,7 +1279,8 @@ mod tests { decode_response( SupportedProtocol::BlocksByRangeV2, &mut part, - ForkName::Altair + ForkName::Altair, + &chain_spec, ), Ok(None) ) @@ -1252,9 +1299,12 @@ mod tests { OutboundRequest::MetaData(MetadataRequest::new_v1()), OutboundRequest::MetaData(MetadataRequest::new_v2()), ]; + + let chain_spec = Spec::default_spec(); + for req in requests.iter() { for fork_name in ForkName::list_all() { - encode_then_decode_request(req.clone(), fork_name); + encode_then_decode_request(req.clone(), fork_name, &chain_spec); } } } @@ -1308,9 +1358,16 @@ mod tests { assert_eq!(writer.get_ref().len(), 42); dst.extend_from_slice(writer.get_ref()); + let chain_spec = Spec::default_spec(); // 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. assert!(matches!( - decode_response(SupportedProtocol::StatusV1, &mut dst, ForkName::Base).unwrap_err(), + decode_response( + SupportedProtocol::StatusV1, + &mut dst, + ForkName::Base, + &chain_spec + ) + .unwrap_err(), RPCError::InvalidData(_) )); } @@ -1365,12 +1422,15 @@ mod tests { assert_eq!(writer.get_ref().len(), 8103); dst.extend_from_slice(writer.get_ref()); + let chain_spec = Spec::default_spec(); + // 10 (for stream identifier) + 176156 + 8103 = 184269 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. assert!(matches!( decode_response( SupportedProtocol::BlocksByRangeV2, &mut dst, - ForkName::Altair + ForkName::Altair, + &chain_spec, ) .unwrap_err(), RPCError::InvalidData(_) @@ -1398,8 +1458,12 @@ mod tests { let mut uvi_codec: Uvi = Uvi::default(); let mut dst = BytesMut::with_capacity(1024); + let chain_spec = Spec::default_spec(); + // Insert length-prefix - uvi_codec.encode(MAX_RPC_SIZE + 1, &mut dst).unwrap(); + uvi_codec + .encode(chain_spec.max_chunk_size as usize + 1, &mut dst) + .unwrap(); // Insert snappy stream identifier dst.extend_from_slice(stream_identifier); @@ -1411,7 +1475,13 @@ mod tests { dst.extend_from_slice(writer.get_ref()); assert!(matches!( - decode_response(SupportedProtocol::StatusV1, &mut dst, ForkName::Base).unwrap_err(), + decode_response( + SupportedProtocol::StatusV1, + &mut dst, + ForkName::Base, + &chain_spec + ) + .unwrap_err(), RPCError::InvalidData(_) )); } diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index d42248ad5f6..36a5abc0863 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -3,9 +3,7 @@ use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, ResponseTermination}; use super::outbound::OutboundRequestContainer; -use super::protocol::{ - max_rpc_size, InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol, -}; +use super::protocol::{InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol}; use super::{RPCReceived, RPCSend, ReqId}; use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; use crate::rpc::protocol::InboundFramed; @@ -31,9 +29,6 @@ use tokio::time::{sleep_until, Instant as TInstant, Sleep}; use tokio_util::time::{delay_queue, DelayQueue}; use types::{EthSpec, ForkContext}; -/// The time (in seconds) before a substream that is awaiting a response from the user times out. -pub const RESPONSE_TIMEOUT: u64 = 10; - /// The number of times to retry an outbound upgrade in the case of IO errors. const IO_ERROR_RETRIES: u8 = 3; @@ -131,6 +126,9 @@ where /// Logger for handling RPC streams log: slog::Logger, + + /// Timeout that will me used for inbound and outbound responses. + resp_timeout: Duration, } enum HandlerState { @@ -212,7 +210,8 @@ where pub fn new( listen_protocol: SubstreamProtocol, ()>, fork_context: Arc, - log: slog::Logger, + log: &slog::Logger, + resp_timeout: Duration, ) -> Self { RPCHandler { listen_protocol, @@ -230,7 +229,8 @@ where outbound_io_error_retries: 0, fork_context, waker: None, - log, + log: log.clone(), + resp_timeout, } } @@ -554,7 +554,7 @@ where // Each chunk is allowed RESPONSE_TIMEOUT to be sent. if let Some(ref delay_key) = info.delay_key { self.inbound_substreams_delay - .reset(delay_key, Duration::from_secs(RESPONSE_TIMEOUT)); + .reset(delay_key, self.resp_timeout); } // The stream may be currently idle. Attempt to process more @@ -688,7 +688,7 @@ where }; substream_entry.remaining_chunks = Some(remaining_chunks); self.outbound_substreams_delay - .reset(delay_key, Duration::from_secs(RESPONSE_TIMEOUT)); + .reset(delay_key, self.resp_timeout); } } else { // either this is a single response request or this response closes the @@ -811,7 +811,7 @@ where OutboundRequestContainer { req: req.clone(), fork_context: self.fork_context.clone(), - max_rpc_size: max_rpc_size(&self.fork_context), + max_rpc_size: self.listen_protocol().upgrade().max_rpc_size, }, (), ) @@ -896,10 +896,9 @@ where if expected_responses > 0 { if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS { // Store the stream and tag the output. - let delay_key = self.inbound_substreams_delay.insert( - self.current_inbound_substream_id, - Duration::from_secs(RESPONSE_TIMEOUT), - ); + let delay_key = self + .inbound_substreams_delay + .insert(self.current_inbound_substream_id, self.resp_timeout); let awaiting_stream = InboundState::Idle(substream); self.inbound_substreams.insert( self.current_inbound_substream_id, @@ -961,10 +960,9 @@ where let expected_responses = request.expected_responses(); if expected_responses > 0 { // new outbound request. Store the stream and tag the output. - let delay_key = self.outbound_substreams_delay.insert( - self.current_outbound_substream_id, - Duration::from_secs(RESPONSE_TIMEOUT), - ); + let delay_key = self + .outbound_substreams_delay + .insert(self.current_outbound_substream_id, self.resp_timeout); let awaiting_stream = OutboundSubstreamState::RequestPendingResponse { substream: Box::new(substream), request, diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 4fd9b516d4c..14f77e4ba23 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -17,6 +17,7 @@ use slog::{crit, debug, o}; use std::marker::PhantomData; use std::sync::Arc; use std::task::{Context, Poll}; +use std::time::Duration; use types::{EthSpec, ForkContext}; pub(crate) use handler::HandlerErr; @@ -107,6 +108,12 @@ pub struct RPCMessage { type BehaviourAction = ToSwarm, RPCSend>; +pub struct NetworkParams { + pub max_chunk_size: usize, + pub ttfb_timeout: Duration, + pub resp_timeout: Duration, +} + /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. pub struct RPC { @@ -120,6 +127,8 @@ pub struct RPC { enable_light_client_server: bool, /// Slog logger for RPC behaviour. log: slog::Logger, + /// Networking constant values + network_params: NetworkParams, } impl RPC { @@ -129,6 +138,7 @@ impl RPC { inbound_rate_limiter_config: Option, outbound_rate_limiter_config: Option, log: slog::Logger, + network_params: NetworkParams, ) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); @@ -149,6 +159,7 @@ impl RPC { fork_context, enable_light_client_server, log, + network_params, } } @@ -220,16 +231,22 @@ where let protocol = SubstreamProtocol::new( RPCProtocol { fork_context: self.fork_context.clone(), - max_rpc_size: max_rpc_size(&self.fork_context), + max_rpc_size: max_rpc_size(&self.fork_context, self.network_params.max_chunk_size), enable_light_client_server: self.enable_light_client_server, phantom: PhantomData, + ttfb_timeout: self.network_params.ttfb_timeout, }, (), ); // NOTE: this is needed because PeerIds have interior mutability. let peer_repr = peer_id.to_string(); let log = self.log.new(slog::o!("peer_id" => peer_repr)); - let handler = RPCHandler::new(protocol, self.fork_context.clone(), log); + let handler = RPCHandler::new( + protocol, + self.fork_context.clone(), + &log, + self.network_params.resp_timeout, + ); Ok(handler) } @@ -244,9 +261,10 @@ where let protocol = SubstreamProtocol::new( RPCProtocol { fork_context: self.fork_context.clone(), - max_rpc_size: max_rpc_size(&self.fork_context), + max_rpc_size: max_rpc_size(&self.fork_context, self.network_params.max_chunk_size), enable_light_client_server: self.enable_light_client_server, phantom: PhantomData, + ttfb_timeout: self.network_params.ttfb_timeout, }, (), ); @@ -254,7 +272,12 @@ where // NOTE: this is needed because PeerIds have interior mutability. let peer_repr = peer_id.to_string(); let log = self.log.new(slog::o!("peer_id" => peer_repr)); - let handler = RPCHandler::new(protocol, self.fork_context.clone(), log); + let handler = RPCHandler::new( + protocol, + self.fork_context.clone(), + &log, + self.network_params.resp_timeout, + ); Ok(handler) } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 22f9f19d680..f2a39470b94 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -72,7 +72,7 @@ lazy_static! { /// The `BeaconBlockMerge` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. /// We calculate the value from its fields instead of constructing the block and checking the length. /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network - /// with `MAX_RPC_SIZE_POST_MERGE`. + /// with `max_chunk_size`. pub static ref SIGNED_BEACON_BLOCK_MERGE_MAX: usize = // Size of a full altair block *SIGNED_BEACON_BLOCK_ALTAIR_MAX @@ -109,25 +109,18 @@ lazy_static! { .len(); } -/// The maximum bytes that can be sent across the RPC pre-merge. -pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M -/// The maximum bytes that can be sent across the RPC post-merge. -pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M -pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; -/// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte). -const TTFB_TIMEOUT: u64 = 5; /// The number of seconds to wait for the first bytes of a request once a protocol has been /// established before the stream is terminated. const REQUEST_TIMEOUT: u64 = 15; /// Returns the maximum bytes that can be sent across the RPC. -pub fn max_rpc_size(fork_context: &ForkContext) -> usize { +pub fn max_rpc_size(fork_context: &ForkContext, max_chunk_size: usize) -> usize { match fork_context.current_fork() { - ForkName::Altair | ForkName::Base => MAX_RPC_SIZE, - ForkName::Merge => MAX_RPC_SIZE_POST_MERGE, - ForkName::Capella => MAX_RPC_SIZE_POST_CAPELLA, + ForkName::Altair | ForkName::Base => max_chunk_size / 10, + ForkName::Merge => max_chunk_size, + ForkName::Capella => max_chunk_size, } } @@ -262,6 +255,7 @@ pub struct RPCProtocol { pub max_rpc_size: usize, pub enable_light_client_server: bool, pub phantom: PhantomData, + pub ttfb_timeout: Duration, } impl UpgradeInfo for RPCProtocol { @@ -447,7 +441,7 @@ where } }; let mut timed_socket = TimeoutStream::new(socket); - timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT))); + timed_socket.set_read_timeout(Some(self.ttfb_timeout)); let socket = Framed::new(Box::pin(timed_socket), codec); diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 1a25beee0a7..63e5bcbff66 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1,6 +1,6 @@ use self::behaviour::Behaviour; use self::gossip_cache::GossipCache; -use crate::config::{gossipsub_config, NetworkLoad}; +use crate::config::{gossipsub_config, GossipsubConfigParams, NetworkLoad}; use crate::discovery::{ subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, }; @@ -232,7 +232,15 @@ impl Network { max_subscriptions_per_request: 150, // 148 in theory = (64 attestation + 4 sync committee + 6 core topics) * 2 }; - config.gs_config = gossipsub_config(config.network_load, ctx.fork_context.clone()); + let gossipsub_config_params = GossipsubConfigParams { + message_domain_valid_snappy: ctx.chain_spec.message_domain_valid_snappy, + gossip_max_size: ctx.chain_spec.gossip_max_size as usize, + }; + config.gs_config = gossipsub_config( + config.network_load, + ctx.fork_context.clone(), + gossipsub_config_params, + ); // If metrics are enabled for gossipsub build the configuration let gossipsub_metrics = ctx @@ -256,12 +264,18 @@ impl Network { (gossipsub, update_gossipsub_scores) }; + let network_params = NetworkParams { + max_chunk_size: ctx.chain_spec.max_chunk_size as usize, + ttfb_timeout: ctx.chain_spec.ttfb_timeout(), + resp_timeout: ctx.chain_spec.resp_timeout(), + }; let eth2_rpc = RPC::new( ctx.fork_context.clone(), config.enable_light_client_server, config.inbound_rate_limiter_config.clone(), config.outbound_rate_limiter_config.clone(), log.clone(), + network_params, ); let discovery = { diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index b48891335cc..36a2e523855 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -94,6 +94,7 @@ pub async fn build_libp2p_instance( boot_nodes: Vec, log: slog::Logger, fork_name: ForkName, + spec: &ChainSpec, ) -> Libp2pInstance { let port = unused_tcp4_port().unwrap(); let config = build_config(port, boot_nodes); @@ -106,7 +107,7 @@ pub async fn build_libp2p_instance( config: &config, enr_fork_id: EnrForkId::default(), fork_context: Arc::new(fork_context(fork_name)), - chain_spec: &ChainSpec::minimal(), + chain_spec: spec, gossipsub_registry: None, }; Libp2pInstance( @@ -130,12 +131,13 @@ pub async fn build_node_pair( rt: Weak, log: &slog::Logger, fork_name: ForkName, + spec: &ChainSpec, ) -> (Libp2pInstance, Libp2pInstance) { let sender_log = log.new(o!("who" => "sender")); let receiver_log = log.new(o!("who" => "receiver")); - let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name).await; - let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name).await; + let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name, spec).await; + let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name, spec).await; let receiver_multiaddr = receiver.local_enr().multiaddr()[1].clone(); @@ -180,10 +182,11 @@ pub async fn build_linear( log: slog::Logger, n: usize, fork_name: ForkName, + spec: &ChainSpec, ) -> Vec { let mut nodes = Vec::with_capacity(n); for _ in 0..n { - nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name).await); + nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name, spec).await); } let multiaddrs: Vec = nodes diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 656df0c4a16..05fa5ab8542 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -9,8 +9,9 @@ use std::time::Duration; use tokio::runtime::Runtime; use tokio::time::sleep; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, Epoch, EthSpec, - ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, ChainSpec, EmptyBlock, + Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, + Slot, }; mod common; @@ -18,30 +19,30 @@ mod common; type E = MinimalEthSpec; /// Merge block with length < max_rpc_size. -fn merge_block_small(fork_context: &ForkContext) -> BeaconBlock { - let mut block = BeaconBlockMerge::::empty(&E::default_spec()); +fn merge_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { + let mut block = BeaconBlockMerge::::empty(spec); let tx = VariableList::from(vec![0; 1024]); let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Merge(block); - assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context)); + assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context, spec.max_chunk_size as usize)); block } /// Merge block with length > MAX_RPC_SIZE. /// The max limit for a merge block is in the order of ~16GiB which wouldn't fit in memory. /// Hence, we generate a merge block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. -fn merge_block_large(fork_context: &ForkContext) -> BeaconBlock { - let mut block = BeaconBlockMerge::::empty(&E::default_spec()); +fn merge_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { + let mut block = BeaconBlockMerge::::empty(spec); let tx = VariableList::from(vec![0; 1024]); let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Merge(block); - assert!(block.ssz_bytes_len() > max_rpc_size(fork_context)); + assert!(block.ssz_bytes_len() > max_rpc_size(fork_context, spec.max_chunk_size as usize)); block } @@ -57,10 +58,12 @@ fn test_status_rpc() { let log = common::build_log(log_level, enable_logging); + let spec = E::default_spec(); + rt.block_on(async { // get sender/receiver let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await; // Dummy STATUS RPC message let rpc_request = Request::Status(StatusMessage { @@ -149,10 +152,12 @@ fn test_blocks_by_range_chunked_rpc() { let rt = Arc::new(Runtime::new().unwrap()); + let spec = E::default_spec(); + rt.block_on(async { // get sender/receiver let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge, &spec).await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); @@ -168,7 +173,7 @@ fn test_blocks_by_range_chunked_rpc() { let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_altair = Response::BlocksByRange(Some(Arc::new(signed_full_block))); - let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); + let full_block = merge_block_small(&common::fork_context(ForkName::Merge), &spec); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_merge_small = Response::BlocksByRange(Some(Arc::new(signed_full_block))); @@ -273,16 +278,18 @@ fn test_blocks_by_range_over_limit() { let rt = Arc::new(Runtime::new().unwrap()); + let spec = E::default_spec(); + rt.block_on(async { // get sender/receiver let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge, &spec).await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); // BlocksByRange Response - let full_block = merge_block_large(&common::fork_context(ForkName::Merge)); + let full_block = merge_block_large(&common::fork_context(ForkName::Merge), &spec); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_merge_large = Response::BlocksByRange(Some(Arc::new(signed_full_block))); @@ -355,10 +362,12 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { let rt = Arc::new(Runtime::new().unwrap()); + let spec = E::default_spec(); + rt.block_on(async { // get sender/receiver let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); @@ -475,10 +484,12 @@ fn test_blocks_by_range_single_empty_rpc() { let log = common::build_log(log_level, enable_logging); let rt = Arc::new(Runtime::new().unwrap()); + let spec = E::default_spec(); + rt.block_on(async { // get sender/receiver let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, 10)); @@ -579,7 +590,7 @@ fn test_blocks_by_root_chunked_rpc() { // get sender/receiver rt.block_on(async { let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge, &spec).await; // BlocksByRoot Request let rpc_request = @@ -601,7 +612,7 @@ fn test_blocks_by_root_chunked_rpc() { let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_altair = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); - let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); + let full_block = merge_block_small(&common::fork_context(ForkName::Merge), &spec); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_merge_small = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); @@ -706,7 +717,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { // get sender/receiver rt.block_on(async { let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await; // BlocksByRoot Request let rpc_request = @@ -833,10 +844,13 @@ fn test_goodbye_rpc() { let log = common::build_log(log_level, enable_logging); let rt = Arc::new(Runtime::new().unwrap()); + + let spec = E::default_spec(); + // get sender/receiver rt.block_on(async { let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await; // build the sender future let sender_future = async { diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index cde4da9ffcc..cb4d6f9c2bd 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1634,6 +1634,7 @@ impl NetworkBeaconProcessor { attestation_verification::verify_propagation_slot_range( seen_clock, failed_att.attestation(), + &self.chain.spec, ); // Only penalize the peer if it would have been invalid at the moment we received @@ -2182,6 +2183,7 @@ impl NetworkBeaconProcessor { sync_committee_verification::verify_propagation_slot_range( seen_clock, &sync_committee_message_slot, + &self.chain.spec, ); hindsight_verification.is_err() }; @@ -2494,6 +2496,7 @@ impl NetworkBeaconProcessor { let is_timely = attestation_verification::verify_propagation_slot_range( &self.chain.slot_clock, attestation, + &self.chain.spec, ) .is_ok(); diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 7f0ef1fb817..db83bfc1645 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -577,14 +577,3 @@ impl NetworkBeaconProcessor> { (network_beacon_processor, beacon_processor_receive) } } - -#[cfg(test)] -mod test { - #[test] - fn queued_block_delay_is_sane() { - assert!( - beacon_processor::work_reprocessing_queue::ADDITIONAL_QUEUED_BLOCK_DELAY - < beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY - ); - } -} diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index b8d5db568ec..dbe93de1ea9 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -11,7 +11,7 @@ use crate::{ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; -use beacon_chain::{BeaconChain, ChainConfig, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; +use beacon_chain::{BeaconChain, ChainConfig}; use beacon_processor::{work_reprocessing_queue::*, *}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, @@ -215,7 +215,7 @@ impl TestRig { }; let network_beacon_processor = Arc::new(network_beacon_processor); - BeaconProcessor { + let beacon_processor = BeaconProcessor { network_globals, executor, max_workers: cmp::max(1, num_cpus::get()), @@ -229,8 +229,11 @@ impl TestRig { work_reprocessing_rx, Some(work_journal_tx), harness.chain.slot_clock.clone(), + chain.spec.maximum_gossip_clock_disparity(), ); + assert!(!beacon_processor.is_err()); + Self { chain, next_block: Arc::new(next_block), @@ -505,7 +508,7 @@ async fn import_gossip_block_acceptably_early() { rig.chain .slot_clock - .set_current_time(slot_start - MAXIMUM_GOSSIP_CLOCK_DISPARITY); + .set_current_time(slot_start - rig.chain.spec.maximum_gossip_clock_disparity()); assert_eq!( rig.chain.slot().unwrap(), @@ -552,9 +555,9 @@ async fn import_gossip_block_unacceptably_early() { .start_of(rig.next_block.slot()) .unwrap(); - rig.chain - .slot_clock - .set_current_time(slot_start - MAXIMUM_GOSSIP_CLOCK_DISPARITY - Duration::from_millis(1)); + rig.chain.slot_clock.set_current_time( + slot_start - rig.chain.spec.maximum_gossip_clock_disparity() - Duration::from_millis(1), + ); assert_eq!( rig.chain.slot().unwrap(), diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 4abf649bfbb..c16b1675a9f 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -581,8 +581,10 @@ pub fn get_config( }; } - client_config.chain.max_network_size = - lighthouse_network::gossip_max_size(spec.bellatrix_fork_epoch.is_some()); + client_config.chain.max_network_size = lighthouse_network::gossip_max_size( + spec.bellatrix_fork_epoch.is_some(), + spec.gossip_max_size as usize, + ); if cli_args.is_present("slasher") { let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") { diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 0fdc159ec2b..8e7a9dd07a4 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -89,4 +89,14 @@ DEPOSIT_CONTRACT_ADDRESS: 0x0B98057eA310F4d31F2a452B414647007d1645d9 # Network # --------------------------------------------------------------- -SUBNETS_PER_NODE: 4 \ No newline at end of file +SUBNETS_PER_NODE: 4 +GOSSIP_MAX_SIZE: 10485760 +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +MAX_CHUNK_SIZE: 10485760 +TTFB_TIMEOUT: 5 +RESP_TIMEOUT: 10 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +ATTESTATION_SUBNET_PREFIX_BITS: 6 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 7b26b30a6ce..98984f3b7db 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -89,4 +89,14 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa # Network # --------------------------------------------------------------- -SUBNETS_PER_NODE: 2 \ No newline at end of file +SUBNETS_PER_NODE: 2 +GOSSIP_MAX_SIZE: 10485760 +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +MAX_CHUNK_SIZE: 10485760 +TTFB_TIMEOUT: 5 +RESP_TIMEOUT: 10 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +ATTESTATION_SUBNET_PREFIX_BITS: 6 diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index 63b3d45db9a..a0dd85fec07 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -89,4 +89,14 @@ DEPOSIT_CONTRACT_ADDRESS: 0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b # Network # --------------------------------------------------------------- -SUBNETS_PER_NODE: 2 \ No newline at end of file +SUBNETS_PER_NODE: 2 +GOSSIP_MAX_SIZE: 10485760 +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +MAX_CHUNK_SIZE: 10485760 +TTFB_TIMEOUT: 5 +RESP_TIMEOUT: 10 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +ATTESTATION_SUBNET_PREFIX_BITS: 6 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 8489f085f4c..e3674cf7df5 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -77,4 +77,14 @@ DEPOSIT_CONTRACT_ADDRESS: 0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D # Network # --------------------------------------------------------------- -SUBNETS_PER_NODE: 2 \ No newline at end of file +SUBNETS_PER_NODE: 2 +GOSSIP_MAX_SIZE: 10485760 +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +MAX_CHUNK_SIZE: 10485760 +TTFB_TIMEOUT: 5 +RESP_TIMEOUT: 10 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +ATTESTATION_SUBNET_PREFIX_BITS: 6 diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index ba15f6d4885..f030f2e97a6 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -60,6 +60,7 @@ beacon_chain = { path = "../../beacon_node/beacon_chain" } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } state_processing = { path = "../state_processing" } tokio = "1.14.0" +paste = "1.0.14" [features] default = ["sqlite", "legacy-arith"] diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index fbb6a3d8573..a13d3116d8b 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -6,6 +6,7 @@ use serde_derive::Deserialize; use serde_utils::quoted_u64::MaybeQuoted; use std::fs::File; use std::path::Path; +use std::time::Duration; use tree_hash::TreeHash; /// Each of the BLS signature domains. @@ -170,7 +171,15 @@ pub struct ChainSpec { pub attestation_subnet_count: u64, pub subnets_per_node: u8, pub epochs_per_subnet_subscription: u64, + pub gossip_max_size: u64, + pub min_epochs_for_block_requests: u64, + pub max_chunk_size: u64, + pub ttfb_timeout: u64, + pub resp_timeout: u64, + pub message_domain_invalid_snappy: [u8; 4], + pub message_domain_valid_snappy: [u8; 4], pub attestation_subnet_extra_bits: u8, + pub attestation_subnet_prefix_bits: u8, /* * Application params @@ -451,10 +460,16 @@ impl ChainSpec { Hash256::from(domain) } - #[allow(clippy::arithmetic_side_effects)] - pub const fn attestation_subnet_prefix_bits(&self) -> u32 { - let attestation_subnet_count_bits = self.attestation_subnet_count.ilog2(); - self.attestation_subnet_extra_bits as u32 + attestation_subnet_count_bits + pub fn maximum_gossip_clock_disparity(&self) -> Duration { + Duration::from_millis(self.maximum_gossip_clock_disparity_millis) + } + + pub fn ttfb_timeout(&self) -> Duration { + Duration::from_secs(self.ttfb_timeout) + } + + pub fn resp_timeout(&self) -> Duration { + Duration::from_secs(self.resp_timeout) } /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. @@ -617,8 +632,15 @@ impl ChainSpec { maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, epochs_per_subnet_subscription: 256, - attestation_subnet_extra_bits: 0, - + gossip_max_size: default_gossip_max_size(), + min_epochs_for_block_requests: default_min_epochs_for_block_requests(), + max_chunk_size: default_max_chunk_size(), + ttfb_timeout: default_ttfb_timeout(), + resp_timeout: default_resp_timeout(), + message_domain_invalid_snappy: default_message_domain_invalid_snappy(), + message_domain_valid_snappy: default_message_domain_valid_snappy(), + attestation_subnet_extra_bits: default_attestation_subnet_extra_bits(), + attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), /* * Application specific */ @@ -842,7 +864,15 @@ impl ChainSpec { maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, epochs_per_subnet_subscription: 256, - attestation_subnet_extra_bits: 0, + gossip_max_size: default_gossip_max_size(), + min_epochs_for_block_requests: default_min_epochs_for_block_requests(), + max_chunk_size: default_max_chunk_size(), + ttfb_timeout: default_ttfb_timeout(), + resp_timeout: default_resp_timeout(), + message_domain_invalid_snappy: default_message_domain_invalid_snappy(), + message_domain_valid_snappy: default_message_domain_valid_snappy(), + attestation_subnet_extra_bits: default_attestation_subnet_extra_bits(), + attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), /* * Application specific @@ -953,6 +983,34 @@ pub struct Config { #[serde(with = "serde_utils::quoted_u64")] deposit_network_id: u64, deposit_contract_address: Address, + + #[serde(default = "default_gossip_max_size")] + #[serde(with = "serde_utils::quoted_u64")] + gossip_max_size: u64, + #[serde(default = "default_min_epochs_for_block_requests")] + #[serde(with = "serde_utils::quoted_u64")] + min_epochs_for_block_requests: u64, + #[serde(default = "default_max_chunk_size")] + #[serde(with = "serde_utils::quoted_u64")] + max_chunk_size: u64, + #[serde(default = "default_ttfb_timeout")] + #[serde(with = "serde_utils::quoted_u64")] + ttfb_timeout: u64, + #[serde(default = "default_resp_timeout")] + #[serde(with = "serde_utils::quoted_u64")] + resp_timeout: u64, + #[serde(default = "default_message_domain_invalid_snappy")] + #[serde(with = "serde_utils::bytes_4_hex")] + message_domain_invalid_snappy: [u8; 4], + #[serde(default = "default_message_domain_valid_snappy")] + #[serde(with = "serde_utils::bytes_4_hex")] + message_domain_valid_snappy: [u8; 4], + #[serde(default = "default_attestation_subnet_extra_bits")] + #[serde(with = "serde_utils::quoted_u8")] + attestation_subnet_extra_bits: u8, + #[serde(default = "default_attestation_subnet_prefix_bits")] + #[serde(with = "serde_utils::quoted_u8")] + attestation_subnet_prefix_bits: u8, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -993,6 +1051,42 @@ fn default_subnets_per_node() -> u8 { 2u8 } +const fn default_gossip_max_size() -> u64 { + 10485760 +} + +const fn default_min_epochs_for_block_requests() -> u64 { + 33024 +} + +const fn default_max_chunk_size() -> u64 { + 10485760 +} + +const fn default_ttfb_timeout() -> u64 { + 5 +} + +const fn default_resp_timeout() -> u64 { + 10 +} + +const fn default_message_domain_invalid_snappy() -> [u8; 4] { + [0, 0, 0, 0] +} + +const fn default_message_domain_valid_snappy() -> [u8; 4] { + [1, 0, 0, 0] +} + +const fn default_attestation_subnet_extra_bits() -> u8 { + 0 +} + +const fn default_attestation_subnet_prefix_bits() -> u8 { + 6 +} + impl Default for Config { fn default() -> Self { let chain_spec = MainnetEthSpec::default_spec(); @@ -1088,6 +1182,16 @@ impl Config { deposit_chain_id: spec.deposit_chain_id, deposit_network_id: spec.deposit_network_id, deposit_contract_address: spec.deposit_contract_address, + + gossip_max_size: spec.gossip_max_size, + min_epochs_for_block_requests: spec.min_epochs_for_block_requests, + max_chunk_size: spec.max_chunk_size, + ttfb_timeout: spec.ttfb_timeout, + resp_timeout: spec.resp_timeout, + message_domain_invalid_snappy: spec.message_domain_invalid_snappy, + message_domain_valid_snappy: spec.message_domain_valid_snappy, + attestation_subnet_extra_bits: spec.attestation_subnet_extra_bits, + attestation_subnet_prefix_bits: spec.attestation_subnet_prefix_bits, } } @@ -1132,6 +1236,15 @@ impl Config { deposit_chain_id, deposit_network_id, deposit_contract_address, + gossip_max_size, + min_epochs_for_block_requests, + max_chunk_size, + ttfb_timeout, + resp_timeout, + message_domain_invalid_snappy, + message_domain_valid_snappy, + attestation_subnet_extra_bits, + attestation_subnet_prefix_bits, } = self; if preset_base != T::spec_name().to_string().as_str() { @@ -1169,6 +1282,15 @@ impl Config { terminal_block_hash, terminal_block_hash_activation_epoch, safe_slots_to_import_optimistically, + gossip_max_size, + min_epochs_for_block_requests, + max_chunk_size, + ttfb_timeout, + resp_timeout, + message_domain_invalid_snappy, + message_domain_valid_snappy, + attestation_subnet_extra_bits, + attestation_subnet_prefix_bits, ..chain_spec.clone() }) } @@ -1306,6 +1428,7 @@ mod tests { #[cfg(test)] mod yaml_tests { use super::*; + use paste::paste; use tempfile::NamedTempFile; #[test] @@ -1410,29 +1533,35 @@ mod yaml_tests { "#; let chain_spec: Config = serde_yaml::from_str(spec).unwrap(); - assert_eq!( - chain_spec.terminal_total_difficulty, - default_terminal_total_difficulty() - ); - assert_eq!( - chain_spec.terminal_block_hash, - default_terminal_block_hash() - ); - assert_eq!( - chain_spec.terminal_block_hash_activation_epoch, - default_terminal_block_hash_activation_epoch() - ); - assert_eq!( - chain_spec.safe_slots_to_import_optimistically, - default_safe_slots_to_import_optimistically() - ); - assert_eq!(chain_spec.bellatrix_fork_epoch, None); + // Asserts that `chain_spec.$name` and `default_$name()` are equal. + macro_rules! check_default { + ($name: ident) => { + paste! { + assert_eq!( + chain_spec.$name, + [](), + "{} does not match default", stringify!($name)); + } + }; + } - assert_eq!( - chain_spec.bellatrix_fork_version, - default_bellatrix_fork_version() - ); + check_default!(terminal_total_difficulty); + check_default!(terminal_block_hash); + check_default!(terminal_block_hash_activation_epoch); + check_default!(safe_slots_to_import_optimistically); + check_default!(bellatrix_fork_version); + check_default!(gossip_max_size); + check_default!(min_epochs_for_block_requests); + check_default!(max_chunk_size); + check_default!(ttfb_timeout); + check_default!(resp_timeout); + check_default!(message_domain_invalid_snappy); + check_default!(message_domain_valid_snappy); + check_default!(attestation_subnet_extra_bits); + check_default!(attestation_subnet_prefix_bits); + + assert_eq!(chain_spec.bellatrix_fork_epoch, None); } #[test] diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index eb25b57b0d7..415d6a14040 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -84,7 +84,7 @@ impl SubnetId { let subscription_duration = spec.epochs_per_subnet_subscription; let node_id_prefix = - (node_id >> (256 - spec.attestation_subnet_prefix_bits() as usize)).as_usize(); + (node_id >> (256 - spec.attestation_subnet_prefix_bits as usize)).as_usize(); // NOTE: The as_u64() panics if the number is larger than u64::max_value(). This cannot be // true as spec.epochs_per_subnet_subscription is a u64. @@ -99,7 +99,7 @@ impl SubnetId { let permutation_seed = ethereum_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); - let num_subnets = 1 << spec.attestation_subnet_prefix_bits(); + let num_subnets = 1 << spec.attestation_subnet_prefix_bits; let permutated_prefix = compute_shuffled_index( node_id_prefix, num_subnets, diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 33aa8ad165d..b98145163c4 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -81,3 +81,17 @@ PROPOSER_SCORE_BOOST: 40 DEPOSIT_CHAIN_ID: 1 DEPOSIT_NETWORK_ID: 1 DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa + +# Network +# --------------------------------------------------------------- +SUBNETS_PER_NODE: 2 +GOSSIP_MAX_SIZE: 10485760 +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +MAX_CHUNK_SIZE: 10485760 +TTFB_TIMEOUT: 5 +RESP_TIMEOUT: 10 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +ATTESTATION_SUBNET_PREFIX_BITS: 6 From 31daf3a87cab1e3b57a2e3a3fddeb3998467c306 Mon Sep 17 00:00:00 2001 From: Nico Flaig Date: Mon, 7 Aug 2023 00:46:29 +0000 Subject: [PATCH 15/20] Update doppelganger note about sync committee contributions (#4425) **Motivation** As clarified [on discord](https://discord.com/channels/605577013327167508/605577013331361793/1121246688183603240), sync committee contributions are not delayed if DP is enabled. **Description** This PR updates doppelganger note about sync committee contributions. Based on the current docs, a user might assume that DP is not working as expected. --- book/src/validator-doppelganger.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/book/src/validator-doppelganger.md b/book/src/validator-doppelganger.md index 7ce2868e9b0..b62086d4bfb 100644 --- a/book/src/validator-doppelganger.md +++ b/book/src/validator-doppelganger.md @@ -46,6 +46,8 @@ Staying silent and refusing to sign messages will cause the following: - Potentially missed rewards by missing a block proposal (if the validator is an elected block proposer, which is unlikely). +Notably, sync committee contributions are not slashable and will continue to be produced even when DP is suppressing other messages. + The loss of rewards and penalties incurred due to the missed duties will be very small in dollar-values. Neglecting block proposals, generally they will equate to around 0.00002 ETH (equivalent to USD 0.04 assuming ETH is trading at USD 2000), or less than 1% of the reward for one validator for one day. Since DP costs so little but can protect a user from From 521432129df2489797b302bf16b8452a12bafcc2 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 7 Aug 2023 22:53:04 +0000 Subject: [PATCH 16/20] Support SSZ request body for POST /beacon/blinded_blocks endpoints (v1 & v2) (#4504) ## Issue Addressed #4262 ## Proposed Changes add SSZ support in request body for POST /beacon/blinded_blocks endpoints (v1 & v2) ## Additional Info --- beacon_node/http_api/src/lib.rs | 99 ++++++++++++++++++- .../tests/broadcast_validation_tests.rs | 43 ++++++++ beacon_node/http_api/tests/tests.rs | 77 +++++++++++++++ common/eth2/src/lib.rs | 37 +++++++ 4 files changed, 255 insertions(+), 1 deletion(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 7d1475809a7..739371c6ee9 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1391,6 +1391,46 @@ pub fn serve( }, ); + // POST beacon/blocks + let post_beacon_blinded_blocks_ssz = + eth_v1 + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::path::end()) + .and(warp::body::bytes()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .and_then( + |block_bytes: Bytes, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| async move { + let block = + match SignedBeaconBlock::>::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) { + Ok(data) => data, + Err(e) => { + return Err(warp_utils::reject::custom_bad_request(format!( + "{:?}", + e + ))) + } + }; + publish_blocks::publish_blinded_block( + block, + chain, + &network_tx, + log, + BroadcastValidation::default(), + ) + .await + .map(|()| warp::reply().into_response()) + }, + ); + let post_beacon_blinded_blocks_v2 = eth_v2 .and(warp::path("beacon")) .and(warp::path("blinded_blocks")) @@ -1428,6 +1468,58 @@ pub fn serve( }, ); + let post_beacon_blinded_blocks_v2_ssz = + eth_v2 + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::body::bytes()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .then( + |validation_level: api_types::BroadcastValidationQuery, + block_bytes: Bytes, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| async move { + let block = + match SignedBeaconBlock::>::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) { + Ok(data) => data, + Err(_) => { + return warp::reply::with_status( + StatusCode::BAD_REQUEST, + eth2::StatusCode::BAD_REQUEST, + ) + .into_response(); + } + }; + match publish_blocks::publish_blinded_block( + block, + chain, + &network_tx, + log, + validation_level.broadcast_validation, + ) + .await + { + Ok(()) => warp::reply().into_response(), + Err(e) => match warp_utils::reject::handle_rejection(e).await { + Ok(reply) => reply.into_response(), + Err(_) => warp::reply::with_status( + StatusCode::INTERNAL_SERVER_ERROR, + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(), + }, + } + }, + ); + let block_id_or_err = warp::path::param::().or_else(|_| async { Err(warp_utils::reject::custom_bad_request( "Invalid block ID".to_string(), @@ -4073,7 +4165,12 @@ pub fn serve( warp::post().and( warp::header::exact("Content-Type", "application/octet-stream") // Routes which expect `application/octet-stream` go within this `and`. - .and(post_beacon_blocks_ssz.uor(post_beacon_blocks_v2_ssz)) + .and( + post_beacon_blocks_ssz + .uor(post_beacon_blocks_v2_ssz) + .uor(post_beacon_blinded_blocks_ssz) + .uor(post_beacon_blinded_blocks_v2_ssz), + ) .uor(post_beacon_blocks) .uor(post_beacon_blinded_blocks) .uor(post_beacon_blocks_v2) diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 457276d7023..00825890009 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -851,6 +851,49 @@ pub async fn blinded_gossip_full_pass() { .block_is_known_to_fork_choice(&block.canonical_root())); } +// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_gossip_full_pass_ssz() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2_ssz(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn blinded_consensus_invalid() { diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 28eb106e8df..efdf66747dc 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -2578,6 +2578,66 @@ impl ApiTester { } } + pub async fn test_blinded_block_production_ssz>(&self) { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() * 3 { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + + let block = self + .client + .get_validator_blinded_blocks::(slot, &randao_reveal, None) + .await + .unwrap() + .data; + + let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + + self.client + .post_beacon_blinded_blocks_ssz(&signed_block) + .await + .unwrap(); + + // This converts the generic `Payload` to a concrete type for comparison. + let head_block = SignedBeaconBlock::from(signed_block.clone()); + assert_eq!(head_block, signed_block); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + } + pub async fn test_blinded_block_production_no_verify_randao>( self, ) -> Self { @@ -4704,6 +4764,14 @@ async fn blinded_block_production_full_payload_premerge() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_ssz_full_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_ssz::>() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn blinded_block_production_with_skip_slots_full_payload_premerge() { ApiTester::new() @@ -4713,6 +4781,15 @@ async fn blinded_block_production_with_skip_slots_full_payload_premerge() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_ssz_with_skip_slots_full_payload_premerge() { + ApiTester::new() + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_blinded_block_production_ssz::>() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn blinded_block_production_no_verify_randao_full_payload_premerge() { ApiTester::new() diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 661f9a09eb3..146a832e388 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -742,6 +742,26 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST beacon/blinded_blocks` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn post_beacon_blinded_blocks_ssz>( + &self, + block: &SignedBeaconBlock, + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blinded_blocks"); + + self.post_generic_with_ssz_body(path, block.as_ssz_bytes(), Some(self.timeouts.proposal)) + .await?; + + Ok(()) + } + pub fn post_beacon_blocks_v2_path( &self, validation_level: Option, @@ -829,6 +849,23 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST v2/beacon/blinded_blocks` + pub async fn post_beacon_blinded_blocks_v2_ssz( + &self, + block: &SignedBlindedBeaconBlock, + validation_level: Option, + ) -> Result<(), Error> { + self.post_generic_with_consensus_version_and_ssz_body( + self.post_beacon_blinded_blocks_v2_path(validation_level)?, + block.as_ssz_bytes(), + Some(self.timeouts.proposal), + block.message().body().fork_name(), + ) + .await?; + + Ok(()) + } + /// Path for `v2/beacon/blocks` pub fn get_beacon_blocks_path(&self, block_id: BlockId) -> Result { let mut path = self.eth_path(V2)?; From 5ea75052a87e994a0203070533b0d54c611978b1 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 7 Aug 2023 22:53:05 +0000 Subject: [PATCH 17/20] Increase slashing protection test SQL timeout to 500ms (#4574) ## Issue Addressed NA ## Proposed Changes We've been seeing a lot of [CI failures](https://github.com/sigp/lighthouse/actions/runs/5781296217/job/15666209142) with errors like this: ``` ---- extra_interchange_tests::export_same_key_twice stdout ---- thread 'extra_interchange_tests::export_same_key_twice' panicked at 'called `Result::unwrap()` on an `Err` value: SQLError("Unable to open database: Error(None)")', validator_client/slashing_protection/src/extra_interchange_tests.rs:48:67 ``` I'm assuming they're timeouts. I noticed that tests have a 0.1s timeout. Perhaps this just doesn't cut it when our new runners are overloaded. ## Additional Info NA --- validator_client/slashing_protection/src/slashing_database.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index c8be851472e..406913bfd10 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -23,7 +23,7 @@ pub const POOL_SIZE: u32 = 1; #[cfg(not(test))] pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); #[cfg(test)] -pub const CONNECTION_TIMEOUT: Duration = Duration::from_millis(100); +pub const CONNECTION_TIMEOUT: Duration = Duration::from_millis(500); /// Supported version of the interchange format. pub const SUPPORTED_INTERCHANGE_FORMAT_VERSION: u64 = 5; From 1373dcf076bd031e1c09119281c5f84b789974c2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 8 Aug 2023 00:03:22 +0000 Subject: [PATCH 18/20] Add `validator-manager` (#3502) ## Issue Addressed Addresses #2557 ## Proposed Changes Adds the `lighthouse validator-manager` command, which provides: - `lighthouse validator-manager create` - Creates a `validators.json` file and a `deposits.json` (same format as https://github.com/ethereum/staking-deposit-cli) - `lighthouse validator-manager import` - Imports validators from a `validators.json` file to the VC via the HTTP API. - `lighthouse validator-manager move` - Moves validators from one VC to the other, utilizing only the VC API. ## Additional Info In 98bcb947c I've reduced some VC `ERRO` and `CRIT` warnings to `WARN` or `DEBG` for the case where a pubkey is missing from the validator store. These were being triggered when we removed a validator but still had it in caches. It seems to me that `UnknownPubkey` will only happen in the case where we've removed a validator, so downgrading the logs is prudent. All the logs are `DEBG` apart from attestations and blocks which are `WARN`. I thought having *some* logging about this condition might help us down the track. In https://github.com/sigp/lighthouse/pull/3502/commits/856cd7e37db11e0318dff9db0979d67b79c2a633 I've made the VC delete the corresponding password file when it's deleting a keystore. This seemed like nice hygiene. Notably, it'll only delete that password file after it scans the validator definitions and finds that no other validator is also using that password file. --- Cargo.lock | 30 + Cargo.toml | 2 + account_manager/Cargo.toml | 2 + account_manager/src/common.rs | 50 +- account_manager/src/validator/import.rs | 8 +- account_manager/src/validator/recover.rs | 3 +- account_manager/src/wallet/recover.rs | 2 +- book/src/SUMMARY.md | 5 +- book/src/key-management.md | 27 +- book/src/validator-management.md | 4 + book/src/validator-manager-create.md | 206 +++ book/src/validator-manager-move.md | 188 +++ book/src/validator-manager.md | 35 + common/account_utils/src/lib.rs | 57 + .../src/validator_definitions.rs | 61 +- common/eth2/src/lighthouse_vc/http_client.rs | 15 + common/eth2/src/lighthouse_vc/std_types.rs | 5 +- common/eth2/src/lighthouse_vc/types.rs | 16 + common/validator_dir/Cargo.toml | 1 + common/validator_dir/src/builder.rs | 23 +- common/validator_dir/src/lib.rs | 4 +- common/validator_dir/src/validator_dir.rs | 6 +- consensus/types/src/lib.rs | 2 + consensus/types/src/withdrawal_credentials.rs | 57 + lighthouse/Cargo.toml | 2 + lighthouse/src/main.rs | 11 + lighthouse/tests/main.rs | 1 + lighthouse/tests/validator_client.rs | 26 + lighthouse/tests/validator_manager.rs | 344 +++++ validator_client/Cargo.toml | 2 - validator_client/src/attestation_service.rs | 30 +- validator_client/src/block_service.rs | 55 +- validator_client/src/cli.rs | 20 + validator_client/src/config.rs | 8 + validator_client/src/duties_service.rs | 14 + validator_client/src/duties_service/sync.rs | 13 + .../src/http_api/create_validator.rs | 37 +- validator_client/src/http_api/keystores.rs | 87 +- validator_client/src/http_api/mod.rs | 121 +- validator_client/src/http_api/test_utils.rs | 631 +++++++++ validator_client/src/http_api/tests.rs | 721 +++++----- .../src/http_api/tests/keystores.rs | 401 +++--- .../src/initialized_validators.rs | 160 ++- validator_client/src/key_cache.rs | 6 + validator_client/src/lib.rs | 10 +- validator_client/src/notifier.rs | 3 +- validator_client/src/preparation_service.rs | 19 +- .../src/sync_committee_service.rs | 29 +- validator_client/src/validator_store.rs | 6 +- validator_manager/Cargo.toml | 30 + validator_manager/src/common.rs | 361 +++++ validator_manager/src/create_validators.rs | 934 ++++++++++++ validator_manager/src/import_validators.rs | 436 ++++++ validator_manager/src/lib.rs | 85 ++ validator_manager/src/move_validators.rs | 1253 +++++++++++++++++ validator_manager/test_vectors/.gitignore | 1 + validator_manager/test_vectors/generate.py | 123 ++ .../deposit_data-1660803666.json | 1 + .../deposit_data-1660803669.json | 1 + .../deposit_data-1660803684.json | 1 + .../deposit_data-1660803679.json | 1 + .../deposit_data-1660803672.json | 1 + .../deposit_data-1660803675.json | 1 + .../deposit_data-1660803687.json | 1 + .../deposit_data-1660803690.json | 1 + .../deposit_data-1660803705.json | 1 + .../deposit_data-1660803701.json | 1 + .../deposit_data-1660803693.json | 1 + .../deposit_data-1660803696.json | 1 + 69 files changed, 6058 insertions(+), 743 deletions(-) create mode 100644 book/src/validator-manager-create.md create mode 100644 book/src/validator-manager-move.md create mode 100644 book/src/validator-manager.md create mode 100644 consensus/types/src/withdrawal_credentials.rs create mode 100644 lighthouse/tests/validator_manager.rs create mode 100644 validator_client/src/http_api/test_utils.rs create mode 100644 validator_manager/Cargo.toml create mode 100644 validator_manager/src/common.rs create mode 100644 validator_manager/src/create_validators.rs create mode 100644 validator_manager/src/import_validators.rs create mode 100644 validator_manager/src/lib.rs create mode 100644 validator_manager/src/move_validators.rs create mode 100644 validator_manager/test_vectors/.gitignore create mode 100644 validator_manager/test_vectors/generate.py create mode 100644 validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json create mode 100644 validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json create mode 100644 validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json create mode 100644 validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json create mode 100644 validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json create mode 100644 validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json create mode 100644 validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json create mode 100644 validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json create mode 100644 validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json create mode 100644 validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json create mode 100644 validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json create mode 100644 validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json diff --git a/Cargo.lock b/Cargo.lock index 13f2b7cd43c..a24087c3a06 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,6 +30,8 @@ dependencies = [ "filesystem", "safe_arith", "sensitive_url", + "serde", + "serde_json", "slashing_protection", "slot_clock", "tempfile", @@ -4327,6 +4329,7 @@ dependencies = [ "env_logger 0.9.3", "environment", "eth1", + "eth2", "eth2_network_config", "ethereum_hashing", "futures", @@ -4349,6 +4352,7 @@ dependencies = [ "unused_port", "validator_client", "validator_dir", + "validator_manager", ] [[package]] @@ -8613,6 +8617,7 @@ dependencies = [ "bls", "deposit_contract", "derivative", + "directory", "eth2_keystore", "filesystem", "hex", @@ -8623,6 +8628,31 @@ dependencies = [ "types", ] +[[package]] +name = "validator_manager" +version = "0.1.0" +dependencies = [ + "account_utils", + "bls", + "clap", + "clap_utils", + "environment", + "eth2", + "eth2_keystore", + "eth2_network_config", + "eth2_wallet", + "ethereum_serde_utils", + "hex", + "regex", + "serde", + "serde_json", + "tempfile", + "tokio", + "tree_hash", + "types", + "validator_client", +] + [[package]] name = "valuable" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index cb09d26a5dc..15906a03065 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,6 +83,8 @@ members = [ "validator_client", "validator_client/slashing_protection", + "validator_manager", + "watch", ] resolver = "2" diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index ce863f91477..7d90cbb427d 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -24,6 +24,8 @@ safe_arith = {path = "../consensus/safe_arith"} slot_clock = { path = "../common/slot_clock" } filesystem = { path = "../common/filesystem" } sensitive_url = { path = "../common/sensitive_url" } +serde = { version = "1.0.116", features = ["derive"] } +serde_json = "1.0.58" [dev-dependencies] tempfile = "3.1.0" diff --git a/account_manager/src/common.rs b/account_manager/src/common.rs index ce42615e507..0764db21f37 100644 --- a/account_manager/src/common.rs +++ b/account_manager/src/common.rs @@ -1,55 +1,7 @@ -use account_utils::PlainText; -use account_utils::{read_input_from_user, strip_off_newlines}; -use eth2_wallet::bip39::{Language, Mnemonic}; -use std::fs; -use std::path::PathBuf; -use std::str::from_utf8; -use std::thread::sleep; -use std::time::Duration; +use account_utils::read_input_from_user; -pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:"; pub const WALLET_NAME_PROMPT: &str = "Enter wallet name:"; -pub fn read_mnemonic_from_cli( - mnemonic_path: Option, - stdin_inputs: bool, -) -> Result { - let mnemonic = match mnemonic_path { - Some(path) => fs::read(&path) - .map_err(|e| format!("Unable to read {:?}: {:?}", path, e)) - .and_then(|bytes| { - let bytes_no_newlines: PlainText = strip_off_newlines(bytes).into(); - let phrase = from_utf8(bytes_no_newlines.as_ref()) - .map_err(|e| format!("Unable to derive mnemonic: {:?}", e))?; - Mnemonic::from_phrase(phrase, Language::English).map_err(|e| { - format!( - "Unable to derive mnemonic from string {:?}: {:?}", - phrase, e - ) - }) - })?, - None => loop { - eprintln!(); - eprintln!("{}", MNEMONIC_PROMPT); - - let mnemonic = read_input_from_user(stdin_inputs)?; - - match Mnemonic::from_phrase(mnemonic.as_str(), Language::English) { - Ok(mnemonic_m) => { - eprintln!("Valid mnemonic provided."); - eprintln!(); - sleep(Duration::from_secs(1)); - break mnemonic_m; - } - Err(_) => { - eprintln!("Invalid mnemonic"); - } - } - }, - }; - Ok(mnemonic) -} - /// Reads in a wallet name from the user. If the `--wallet-name` flag is provided, use it. Otherwise /// read from an interactive prompt using tty unless the `--stdin-inputs` flag is provided. pub fn read_wallet_name_from_cli( diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 8dc50a9df1f..339d9a2914f 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -4,8 +4,8 @@ use account_utils::{ eth2_keystore::Keystore, read_password_from_user, validator_definitions::{ - recursively_find_voting_keystores, ValidatorDefinition, ValidatorDefinitions, - CONFIG_FILENAME, + recursively_find_voting_keystores, PasswordStorage, ValidatorDefinition, + ValidatorDefinitions, CONFIG_FILENAME, }, ZeroizeString, }; @@ -277,7 +277,9 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin let suggested_fee_recipient = None; let validator_def = ValidatorDefinition::new_keystore_with_password( &dest_keystore, - password_opt, + password_opt + .map(PasswordStorage::ValidatorDefinitions) + .unwrap_or(PasswordStorage::None), graffiti, suggested_fee_recipient, None, diff --git a/account_manager/src/validator/recover.rs b/account_manager/src/validator/recover.rs index d9b05e7756e..33d3b189266 100644 --- a/account_manager/src/validator/recover.rs +++ b/account_manager/src/validator/recover.rs @@ -1,10 +1,9 @@ use super::create::STORE_WITHDRAW_FLAG; -use crate::common::read_mnemonic_from_cli; use crate::validator::create::COUNT_FLAG; use crate::wallet::create::STDIN_INPUTS_FLAG; use crate::SECRETS_DIR_FLAG; use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; -use account_utils::random_password; +use account_utils::{random_password, read_mnemonic_from_cli}; use clap::{App, Arg, ArgMatches}; use directory::ensure_dir_exists; use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR}; diff --git a/account_manager/src/wallet/recover.rs b/account_manager/src/wallet/recover.rs index f107c3638cc..6e047aca8d2 100644 --- a/account_manager/src/wallet/recover.rs +++ b/account_manager/src/wallet/recover.rs @@ -1,6 +1,6 @@ -use crate::common::read_mnemonic_from_cli; use crate::wallet::create::{create_wallet_from_mnemonic, STDIN_INPUTS_FLAG}; use crate::wallet::create::{HD_TYPE, NAME_FLAG, PASSWORD_FLAG, TYPE_FLAG}; +use account_utils::read_mnemonic_from_cli; use clap::{App, Arg, ArgMatches}; use std::path::PathBuf; diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 7431d223871..507896f4311 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -12,6 +12,9 @@ * [Run a Node](./run_a_node.md) * [Become a Validator](./mainnet-validator.md) * [Validator Management](./validator-management.md) + * [The `validator-manager` Command](./validator-manager.md) + * [Creating validators](./validator-manager-create.md) + * [Moving validators](./validator-manager-move.md) * [Slashing Protection](./slashing-protection.md) * [Voluntary Exits](./voluntary-exit.md) * [Partial Withdrawals](./partial-withdrawal.md) @@ -41,7 +44,7 @@ * [Remote Signing with Web3Signer](./validator-web3signer.md) * [Database Configuration](./advanced_database.md) * [Database Migrations](./database-migrations.md) - * [Key Management](./key-management.md) + * [Key Management (Deprecated)](./key-management.md) * [Key Recovery](./key-recovery.md) * [Advanced Networking](./advanced_networking.md) * [Running a Slasher](./slasher.md) diff --git a/book/src/key-management.md b/book/src/key-management.md index cebd84649da..b2bb7737fd4 100644 --- a/book/src/key-management.md +++ b/book/src/key-management.md @@ -1,9 +1,30 @@ -# Key Management +# Key Management (Deprecated) [launchpad]: https://launchpad.ethereum.org/ -> -> **Note: While Lighthouse is able to generate the validator keys and the deposit data file to submit to the deposit contract, we strongly recommend using the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) to create validators keys and the deposit data file. This is because the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) has the option to assign a withdrawal address during the key generation process, while Lighthouse wallet will always generate keys with withdrawal credentials of type 0x00. This means that users who created keys using Lighthouse will have to update their withdrawal credentials in the future to enable withdrawals. In addition, Lighthouse generates the deposit data file in the form of `*.rlp`, which cannot be uploaded to the [Staking launchpad][launchpad] that accepts only `*.json` file. This means that users have to directly interact with the deposit contract to be able to submit the deposit if they were to generate the files using Lighthouse.** +**⚠️ The information on this page refers to tooling and process that have been deprecated. Please read the "Deprecation Notice". ⚠️** + +## Deprecation Notice + +This page recommends the use of the `lighthouse account-manager` tool to create +validators. This tool will always generate keys with the withdrawal credentials +of type `0x00`. This means the users who created keys using `lighthouse +account-manager` will have to update their withdrawal credentials in a +separate step to receive staking rewards. + +In addition, Lighthouse generates the deposit data file in the form of `*.rlp`, +which cannot be uploaded to the [Staking launchpad][launchpad] that accepts only +`*.json` file. This means that users have to directly interact with the deposit +contract to be able to submit the deposit if they were to generate the files +using Lighthouse. + +Rather than continuing to read this page, we recommend users visit either: + +- The [Staking Launchpad][launchpad] for detailed, beginner-friendly instructions. +- The [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) for a CLI tool used by the [Staking Launchpad][launchpad]. +- The [validator-manager documentation](./validator-manager.md) for a Lighthouse-specific tool for streamlined validator management tools. + +## The `lighthouse account-manager` Lighthouse uses a _hierarchical_ key management system for producing validator keys. It is hierarchical because each validator key can be _derived_ from a diff --git a/book/src/validator-management.md b/book/src/validator-management.md index be34fef2c3c..df7c2ac4760 100644 --- a/book/src/validator-management.md +++ b/book/src/validator-management.md @@ -13,6 +13,10 @@ standard directories and do not start their `lighthouse vc` with the this document. However, users with more complex needs may find this document useful. +The [lighthouse validator-manager](./validator-manager.md) command can be used +to create and import validators to a Lighthouse VC. It can also be used to move +validators between two Lighthouse VCs. + ## Introducing the `validator_definitions.yml` file The `validator_definitions.yml` file is located in the `validator-dir`, which diff --git a/book/src/validator-manager-create.md b/book/src/validator-manager-create.md new file mode 100644 index 00000000000..779c159276e --- /dev/null +++ b/book/src/validator-manager-create.md @@ -0,0 +1,206 @@ +# Creating and Importing Validators + +[Ethereum Staking Launchpad]: https://launchpad.ethereum.org/en/ + +The `lighthouse validator-manager create` command derives validators from a +mnemonic and produces two files: + +- `validators.json`: the keystores and passwords for the newly generated + validators, in JSON format. +- `deposits.json`: a JSON file of the same format as + [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) which can + be used for deposit submission via the [Ethereum Staking + Launchpad][]. + +The `lighthouse validator-manager import` command accepts a `validators.json` +file (from the `create` command) and submits those validators to a running +Lighthouse Validator Client via the HTTP API. + +These two commands enable a workflow of: + +1. Creating the validators via the `create` command. +1. Importing the validators via the `import` command. +1. Depositing validators via the [Ethereum Staking + Launchpad][]. + +The separation of the `create` and `import` commands allows for running the +`create` command on an air-gapped host whilst performing the `import` command on +an internet-connected host. + +The `create` and `import` commands are recommended for advanced users who are +familiar with command line tools and the practicalities of managing sensitive +cryptographic material. **We recommend that novice users follow the workflow on +[Ethereum Staking Launchpad][] rather than using the `create` and `import` +commands.** + +## Simple Example + +Create validators from a mnemonic with: + +```bash +lighthouse \ + validator-manager \ + create \ + --network mainnet \ + --first-index 0 \ + --count 2 \ + --eth1-withdrawal-address

\ + --suggested-fee-recipient
\ + --output-path ./ +``` +> If the flag `--first-index` is not provided, it will default to using index 0. +> The `--suggested-fee-recipient` flag may be omitted to use whatever default +> value the VC uses. It does not necessarily need to be identical to +> `--eth1-withdrawal-address`. +> The command will create the `deposits.json` and `validators.json` in the present working directory. If you would like these files to be created in a different directory, change the value of `output-path`, for example `--output-path /desired/directory`. The directory will be created if the path does not exist. + +Then, import the validators to a running VC with: + +```bash +lighthouse \ + validator-manager \ + import \ + --validators-file validators.json \ + --vc-token +``` +> This is assuming that `validators.json` is in the present working directory. If it is not, insert the directory of the file. +> Be sure to remove `./validators.json` after the import is successful since it +> contains unencrypted validator keystores. + +## Detailed Guide + +This guide will create two validators and import them to a VC. For simplicity, +the same host will be used to generate the keys and run the VC. In reality, +users may want to perform the `create` command on an air-gapped machine and then +move the `validators.json` and `deposits.json` files to an Internet-connected +host. This would help protect the mnemonic from being exposed to the Internet. + +### 1. Create the Validators + +Run the `create` command, substituting `
` for an execution address that +you control. This is where all the staked ETH and rewards will ultimately +reside, so it's very important that this address is secure, accessible and +backed-up. The `create` command: + +```bash +lighthouse \ + validator-manager \ + create \ + --first-index 0 \ + --count 2 \ + --eth1-withdrawal-address
\ + --output-path ./ +``` + +If successful, the command output will appear like below: + +```bash +Running validator manager for mainnet network + +Enter the mnemonic phrase: + +Valid mnemonic provided. + +Starting derivation of 2 keystores. Each keystore may take several seconds. +Completed 1/2: 0x8885c29b8f88ee9b9a37b480fd4384fed74bda33d85bc8171a904847e65688b6c9bb4362d6597fd30109fb2def6c3ae4 +Completed 2/2: 0xa262dae3dcd2b2e280af534effa16bedb27c06f2959e114d53bd2a248ca324a018dc73179899a066149471a94a1bc92f +Keystore generation complete +Writing "./validators.json" +Writing "./deposits.json" +``` + +This command will create validators at indices `0, 1`. The exact indices created +can be influenced with the `--first-index` and `--count` flags. Use these flags +with caution to prevent creating the same validator twice, this may result in a +slashing! + +The command will create two files: + +- `./deposits.json`: this file does *not* contain sensitive information and may be uploaded to the [Ethereum Staking Launchpad]. +- `./validators.json`: this file contains **sensitive unencrypted validator keys, do not share it with anyone or upload it to any website**. + +### 2. Import the validators + +The VC which will receive the validators needs to have the following flags at a minimum: + +- `--http` +- `--http-port 5062` +- `--enable-doppelganger-protection` + +Therefore, the VC command might look like: + +```bash +lighthouse \ + vc \ + --http \ + --http-port 5062 \ + --enable-doppelganger-protection +``` + +In order to import the validators, the location of the VC `api-token.txt` file +must be known. The location of the file varies, but it is located in the +"validator directory" of your data directory. For example: +`~/.lighthouse/mainnet/validators/api-token.txt`. We will use `` +to subsitute this value. If you are unsure of the `api-token.txt` path, you can run `curl http://localhost:5062/lighthouse/auth` which will show the path. + + +Once the VC is running, use the `import` command to import the validators to the VC: + +```bash +lighthouse \ + validator-manager \ + import \ + --validators-file validators.json \ + --vc-token +``` + +If successful, the command output will appear like below: + +```bash +Running validator manager for mainnet network +Validator client is reachable at http://localhost:5062/ and reports 0 validators +Starting to submit 2 validators to VC, each validator may take several seconds +Uploaded keystore 1 of 2 to the VC +Uploaded keystore 2 of 2 to the VC +``` + +The user should now *securely* delete the `validators.json` file (e.g., `shred -u validators.json`). +The `validators.json` contains the unencrypted validator keys and must not be +shared with anyone. +At the same time, `lighthouse vc` will log: +```bash +INFO Importing keystores via standard HTTP API, count: 1 +WARN No slashing protection data provided with keystores +INFO Enabled validator voting_pubkey: 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f, signing_method: local_keystore +INFO Modified key_cache saved successfully +``` +The WARN message means that the `validators.json` file does not contain the slashing protection data. This is normal if you are starting a new validator. The flag `--enable-doppelganger-protection` will also protect users from potential slashing risk. +The validators will now go through 2-3 epochs of [doppelganger +protection](./validator-doppelganger.md) and will automatically start performing +their duties when they are deposited and activated. + +If the host VC contains the same public key as the `validators.json` file, an error will be shown and the `import` process will stop: + +```bash +Duplicate validator 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f already exists on the destination validator client. This may indicate that some validators are running in two places at once, which can lead to slashing. If you are certain that there is no risk, add the --ignore-duplicates flag. +Err(DuplicateValidator(0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f)) +``` + +If you are certain that it is safe, you can add the flag `--ignore-duplicates` in the `import` command. The command becomes: + +```bash +lighthouse \ + validator-manager \ + import \ + --validators-file validators.json \ + --vc-token \ + --ignore-duplicates +``` +and the output will be as follows: + +```bash +Duplicate validators are ignored, ignoring 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f which exists on the destination validator client +Re-uploaded keystore 1 of 6 to the VC +``` + +The guide is complete. \ No newline at end of file diff --git a/book/src/validator-manager-move.md b/book/src/validator-manager-move.md new file mode 100644 index 00000000000..98932604d5d --- /dev/null +++ b/book/src/validator-manager-move.md @@ -0,0 +1,188 @@ +# Moving Validators + +The `lighthouse validator-manager move` command uses the VC HTTP API to move +validators from one VC (the "src" VC) to another VC (the "dest" VC). The move +operation is *comprehensive*; it will: + +- Disable the validators on the src VC. +- Remove the validator keystores from the src VC file system. +- Export the slashing database records for the appropriate validators from the src VC to the dest VC. +- Enable the validators on the dest VC. +- Generally result in very little or no validator downtime. + +It is capable of moving all validators on the src VC, a count of validators or +a list of pubkeys. + +The `move` command is only guaranteed to work between two Lighthouse VCs (i.e., +there is no guarantee that the commands will work between Lighthouse and Teku, for instance). + +The `move` command only supports moving validators using a keystore on the local +file system, it does not support `Web3Signer` validators. + +Although all efforts are taken to avoid it, it's possible for the `move` command +to fail in a way that removes the validator from the src VC without adding it to the +dest VC. Therefore, it is recommended to **never use the `move` command without +having a backup of all validator keystores (e.g. the mnemonic).** + +## Simple Example + +The following command will move all validators from the VC running at +`http://localhost:6062` to the VC running at `http://localhost:5062`. + +```bash +lighthouse \ + validator-manager \ + move \ + --src-vc-url http://localhost:6062 \ + --src-vc-token ~/src-token.txt \ + --dest-vc-url http://localhost:5062 \ + --dest-vc-token ~/.lighthouse/mainnet/validators/api-token.txt \ + --validators all +``` + +## Detailed Guide + +This guide describes the steps to move validators between two validator clients (VCs) which are +able to SSH between each other. This guide assumes experience with the Linux command line and SSH +connections. + +There will be two VCs in this example: + +- The *source* VC which contains the validators/keystores to be moved. +- The *destination* VC which is to take the validators/keystores from the source. + +There will be two hosts in this example: + +- Host 1 (*"source host"*): Is running the `src-vc`. +- Host 2 (*"destination host"*): Is running the `dest-vc`. + +The example assumes +that Host 1 is able to SSH to Host 2. + +In reality, many host configurations are possible. For example: + +- Both VCs on the same host. +- Both VCs on different hosts and the `validator-manager` being used on a third host. + +### 1. Configure the Source VC + +The source VC needs to have the following flags at a minimum: + +- `--http` +- `--http-port 5062` +- `--http-allow-keystore-export` + +Therefore, the source VC command might look like: + +```bash +lighthouse \ + vc \ + --http \ + --http-port 5062 \ + --http-allow-keystore-export +``` + +### 2. Configure the Destination VC + +The destination VC needs to have the following flags at a minimum: + +- `--http` +- `--http-port 5062` +- `--enable-doppelganger-protection` + +Therefore, the destination VC command might look like: + +```bash +lighthouse \ + vc \ + --http \ + --http-port 5062 \ + --enable-doppelganger-protection +``` + +> The `--enable-doppelganger-protection` flag is not *strictly* required, however +> it is recommended for an additional layer of safety. It will result in 2-3 +> epochs of downtime for the validator after it is moved, which is generally an +> inconsequential cost in lost rewards or penalties. +> +> Optionally, users can add the `--http-store-passwords-in-secrets-dir` flag if they'd like to have +> the import validator keystore passwords stored in separate files rather than in the +> `validator-definitions.yml` file. If you don't know what this means, you can safely omit the flag. + +### 3. Obtain the Source API Token + +The VC API is protected by an *API token*. This is stored in a file on each of the hosts. Since +we'll be running our command on the destination host, it will need to have the API token for the +source host on its file-system. + +On the **source host**, find the location of the `api-token.txt` file and copy the contents. The +location of the file varies, but it is located in the "validator directory" of your data directory, +alongside validator keystores. For example: `~/.lighthouse/mainnet/validators/api-token.txt`. If you are unsure of the `api-token.txt` path, you can run `curl http://localhost:5062/lighthouse/auth` which will show the path. + +Copy the contents of that file into a new file on the **destination host** at `~/src-token.txt`. The +API token should be similar to `api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123`. + +### 4. Create an SSH Tunnel + +In the **source host**, open a terminal window, SSH to the **destination host** and establish a reverse-SSH connection +between the **destination host** and the **source host**. + +```bash +ssh dest-host +ssh -L 6062:localhost:5062 src-host +``` + +It's important that you leave this session open throughout the rest of this tutorial. If you close +this terminal window then the connection between the destination and source host will be lost. + +### 5. Move + +With the SSH tunnel established between the `dest-host` and `src-host`, from the **destination +host** run the command to move the validators: + +```bash +lighthouse \ + validator-manager \ + move \ + --src-vc-url http://localhost:6062 \ + --src-vc-token ~/src-token.txt \ + --dest-vc-url http://localhost:5062 \ + --dest-vc-token ~/.lighthouse/mainnet/validators/api-token.txt \ + --validators all +``` + +The command will provide information about the progress of the operation and +emit `Done.` when the operation has completed successfully. For example: + +```bash +Running validator manager for mainnet network +Validator client is reachable at http://localhost:5062/ and reports 2 validators +Validator client is reachable at http://localhost:6062/ and reports 0 validators +Moved keystore 1 of 2 +Moved keystore 2 of 2 +Done. +``` +At the same time, `lighthouse vc` will log: +```bash +INFO Importing keystores via standard HTTP API, count: 1 +INFO Enabled validator voting_pubkey: 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f, signing_method: local_keystore +INFO Modified key_cache saved successfully +Once the operation completes successfully, there is nothing else to be done. The +validators have been removed from the `src-host` and enabled at the `dest-host`. +If the `--enable-doppelganger-protection` flag was used it may take 2-3 epochs +for the validators to start attesting and producing blocks on the `dest-host`. +If you would only like to move some validators, you can replace the flag `--validators all` with one or more validator public keys. For example: + +```bash +lighthouse \ + validator-manager \ + move \ + --src-vc-url http://localhost:6062 \ + --src-vc-token ~/src-token.txt \ + --dest-vc-url http://localhost:5062 \ + --dest-vc-token ~/.lighthouse/mainnet/validators/api-token.txt \ + --validators 0x9096aab771e44da149bd7c9926d6f7bb96ef465c0eeb4918be5178cd23a1deb4aec232c61d85ff329b54ed4a3bdfff3a,0x90fc4f72d898a8f01ab71242e36f4545aaf87e3887be81632bb8ba4b2ae8fb70753a62f866344d7905e9a07f5a9cdda1 +``` +Any errors encountered during the operation should include information on how to +proceed. Assistance is also available on our +[Discord](https://discord.gg/cyAszAh). \ No newline at end of file diff --git a/book/src/validator-manager.md b/book/src/validator-manager.md new file mode 100644 index 00000000000..e3cb74bd668 --- /dev/null +++ b/book/src/validator-manager.md @@ -0,0 +1,35 @@ +# Validator Manager + +[Ethereum Staking Launchpad]: https://launchpad.ethereum.org/en/ +[Import Validators]: #import-validators + +## Introduction + +The `lighthouse validator-manager` tool provides utilities for managing validators on a *running* +Lighthouse Validator Client. The validator manager performs operations via the HTTP API of the +validator client (VC). Due to limitations of the +[keymanager-APIs](https://ethereum.github.io/keymanager-APIs/), only Lighthouse VCs are fully +supported by this command. + +The validator manager tool is similar to the `lighthouse account-manager` tool, +except the latter creates files that will be read by the VC next time it starts +whilst the former makes instant changes to a live VC. + +The `account-manager` is ideal for importing keys created with the +[staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli). On the +other hand, the `validator-manager` is ideal for moving existing validators +between two VCs or for advanced users to create validators at scale with less +downtime. + +The `validator-manager` boasts the following features: + +- One-line command to arbitrarily move validators between two VCs, maintaining the slashing protection database. +- Generates deposit files compatible with the [Ethereum Staking Launchpad][]. +- Generally involves zero or very little downtime. +- The "key cache" is preserved whenever a validator is added with the validator + manager, preventing long waits at start up when a new validator is added. + +## Guides + +- [Creating and importing validators using the `create` and `import` commands.](./validator-manager-create.md) +- [Moving validators between two VCs using the `move` command.](./validator-manager-move.md) \ No newline at end of file diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index 89de3803856..e566d7cdda3 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -13,6 +13,9 @@ use std::fs::{self, File}; use std::io; use std::io::prelude::*; use std::path::{Path, PathBuf}; +use std::str::from_utf8; +use std::thread::sleep; +use std::time::Duration; use zeroize::Zeroize; pub mod validator_definitions; @@ -30,6 +33,8 @@ pub const MINIMUM_PASSWORD_LEN: usize = 12; /// array of length 32. const DEFAULT_PASSWORD_LEN: usize = 48; +pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:"; + /// Returns the "default" path where a wallet should store its password file. pub fn default_wallet_password_path>(wallet_name: &str, secrets_dir: P) -> PathBuf { secrets_dir.as_ref().join(format!("{}.pass", wallet_name)) @@ -59,6 +64,18 @@ pub fn read_password>(path: P) -> Result { fs::read(path).map(strip_off_newlines).map(Into::into) } +/// Reads a password file into a `ZeroizeString` struct, with new-lines removed. +pub fn read_password_string>(path: P) -> Result { + fs::read(path) + .map_err(|e| format!("Error opening file: {:?}", e)) + .map(strip_off_newlines) + .and_then(|bytes| { + String::from_utf8(bytes) + .map_err(|e| format!("Error decoding utf8: {:?}", e)) + .map(Into::into) + }) +} + /// Write a file atomically by using a temporary file as an intermediate. /// /// Care is taken to preserve the permissions of the file at `file_path` being written. @@ -220,6 +237,46 @@ impl AsRef<[u8]> for ZeroizeString { } } +pub fn read_mnemonic_from_cli( + mnemonic_path: Option, + stdin_inputs: bool, +) -> Result { + let mnemonic = match mnemonic_path { + Some(path) => fs::read(&path) + .map_err(|e| format!("Unable to read {:?}: {:?}", path, e)) + .and_then(|bytes| { + let bytes_no_newlines: PlainText = strip_off_newlines(bytes).into(); + let phrase = from_utf8(bytes_no_newlines.as_ref()) + .map_err(|e| format!("Unable to derive mnemonic: {:?}", e))?; + Mnemonic::from_phrase(phrase, Language::English).map_err(|e| { + format!( + "Unable to derive mnemonic from string {:?}: {:?}", + phrase, e + ) + }) + })?, + None => loop { + eprintln!(); + eprintln!("{}", MNEMONIC_PROMPT); + + let mnemonic = read_input_from_user(stdin_inputs)?; + + match Mnemonic::from_phrase(mnemonic.as_str(), Language::English) { + Ok(mnemonic_m) => { + eprintln!("Valid mnemonic provided."); + eprintln!(); + sleep(Duration::from_secs(1)); + break mnemonic_m; + } + Err(_) => { + eprintln!("Invalid mnemonic"); + } + } + }, + }; + Ok(mnemonic) +} + #[cfg(test)] mod test { use super::*; diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 6ce2517fb2b..c91e717d11b 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -3,7 +3,9 @@ //! Serves as the source-of-truth of which validators this validator client should attempt (or not //! attempt) to load into the `crate::intialized_validators::InitializedValidators` struct. -use crate::{default_keystore_password_path, write_file_via_temporary, ZeroizeString}; +use crate::{ + default_keystore_password_path, read_password_string, write_file_via_temporary, ZeroizeString, +}; use directory::ensure_dir_exists; use eth2_keystore::Keystore; use regex::Regex; @@ -43,6 +45,18 @@ pub enum Error { UnableToOpenKeystore(eth2_keystore::Error), /// The validator directory could not be created. UnableToCreateValidatorDir(PathBuf), + UnableToReadKeystorePassword(String), + KeystoreWithoutPassword, +} + +/// Defines how a password for a validator keystore will be persisted. +pub enum PasswordStorage { + /// Store the password in the `validator_definitions.yml` file. + ValidatorDefinitions(ZeroizeString), + /// Store the password in a separate, dedicated file (likely in the "secrets" directory). + File(PathBuf), + /// Don't store the password at all. + None, } #[derive(Clone, PartialEq, Serialize, Deserialize, Hash, Eq)] @@ -92,6 +106,34 @@ impl SigningDefinition { pub fn is_local_keystore(&self) -> bool { matches!(self, SigningDefinition::LocalKeystore { .. }) } + + pub fn voting_keystore_password(&self) -> Result, Error> { + match self { + SigningDefinition::LocalKeystore { + voting_keystore_password: Some(password), + .. + } => Ok(Some(password.clone())), + SigningDefinition::LocalKeystore { + voting_keystore_password_path: Some(path), + .. + } => read_password_string(path) + .map(Into::into) + .map(Option::Some) + .map_err(Error::UnableToReadKeystorePassword), + SigningDefinition::LocalKeystore { .. } => Err(Error::KeystoreWithoutPassword), + SigningDefinition::Web3Signer(_) => Ok(None), + } + } + + pub fn voting_keystore_password_path(&self) -> Option<&PathBuf> { + match self { + SigningDefinition::LocalKeystore { + voting_keystore_password_path: Some(path), + .. + } => Some(path), + _ => None, + } + } } /// A validator that may be initialized by this validator client. @@ -129,7 +171,7 @@ impl ValidatorDefinition { /// This function does not check the password against the keystore. pub fn new_keystore_with_password>( voting_keystore_path: P, - voting_keystore_password: Option, + voting_keystore_password_storage: PasswordStorage, graffiti: Option, suggested_fee_recipient: Option
, gas_limit: Option, @@ -139,6 +181,12 @@ impl ValidatorDefinition { let keystore = Keystore::from_json_file(&voting_keystore_path).map_err(Error::UnableToOpenKeystore)?; let voting_public_key = keystore.public_key().ok_or(Error::InvalidKeystorePubkey)?; + let (voting_keystore_password_path, voting_keystore_password) = + match voting_keystore_password_storage { + PasswordStorage::ValidatorDefinitions(password) => (None, Some(password)), + PasswordStorage::File(path) => (Some(path), None), + PasswordStorage::None => (None, None), + }; Ok(ValidatorDefinition { enabled: true, @@ -150,7 +198,7 @@ impl ValidatorDefinition { builder_proposals, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, - voting_keystore_password_path: None, + voting_keystore_password_path, voting_keystore_password, }, }) @@ -346,6 +394,13 @@ impl ValidatorDefinitions { pub fn as_mut_slice(&mut self) -> &mut [ValidatorDefinition] { self.0.as_mut_slice() } + + // Returns an iterator over all the `voting_keystore_password_paths` in self. + pub fn iter_voting_keystore_password_paths(&self) -> impl Iterator { + self.0 + .iter() + .filter_map(|def| def.signing_definition.voting_keystore_password_path()) + } } /// Perform an exhaustive tree search of `dir`, adding any discovered voting keystore paths to diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index cd7873c9b63..7bf4cf5b19a 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -490,6 +490,21 @@ impl ValidatorClientHttpClient { .await } + /// `DELETE eth/v1/keystores` + pub async fn delete_lighthouse_keystores( + &self, + req: &DeleteKeystoresRequest, + ) -> Result { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("keystores"); + + self.delete_with_unsigned_response(path, req).await + } + fn make_keystores_url(&self) -> Result { let mut url = self.server.full.clone(); url.path_segments_mut() diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index 0d67df47a9a..33e2f764efe 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -1,9 +1,10 @@ use account_utils::ZeroizeString; use eth2_keystore::Keystore; use serde::{Deserialize, Serialize}; -use slashing_protection::interchange::Interchange; use types::{Address, PublicKeyBytes}; +pub use slashing_protection::interchange::Interchange; + #[derive(Debug, Deserialize, Serialize, PartialEq)] pub struct GetFeeRecipientResponse { pub pubkey: PublicKeyBytes, @@ -27,7 +28,7 @@ pub struct ListKeystoresResponse { pub data: Vec, } -#[derive(Debug, Deserialize, Serialize, PartialEq)] +#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] pub struct SingleKeystoreResponse { pub validating_pubkey: PublicKeyBytes, pub derivation_path: Option, diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 7bbe041dbdb..f1a91b4ef1e 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -152,3 +152,19 @@ pub struct UpdateGasLimitRequest { pub struct VoluntaryExitQuery { pub epoch: Option, } + +#[derive(Deserialize, Serialize)] +pub struct ExportKeystoresResponse { + pub data: Vec, + #[serde(with = "serde_utils::json_str")] + pub slashing_protection: Interchange, +} + +#[derive(Deserialize, Serialize)] +pub struct SingleExportKeystoresResponse { + pub status: Status, + #[serde(skip_serializing_if = "Option::is_none")] + pub validating_keystore: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub validating_keystore_password: Option, +} diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index 39a14e28377..8accddfcb9b 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -20,6 +20,7 @@ tree_hash = "0.5.0" hex = "0.4.2" derivative = "2.1.1" lockfile = { path = "../lockfile" } +directory = { path = "../directory" } [dev-dependencies] tempfile = "3.1.0" diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index 2b3f670c70f..bccf9086acc 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -1,6 +1,7 @@ use crate::{Error as DirError, ValidatorDir}; use bls::get_withdrawal_credentials; use deposit_contract::{encode_eth1_tx_data, Error as DepositError}; +use directory::ensure_dir_exists; use eth2_keystore::{Error as KeystoreError, Keystore, KeystoreBuilder, PlainText}; use filesystem::create_with_600_perms; use rand::{distributions::Alphanumeric, Rng}; @@ -41,6 +42,7 @@ pub enum Error { #[cfg(feature = "insecure_keys")] InsecureKeysError(String), MissingPasswordDir, + UnableToCreatePasswordDir(String), } impl From for Error { @@ -78,6 +80,13 @@ impl<'a> Builder<'a> { self } + /// Optionally supply a directory in which to store the passwords for the validator keystores. + /// If `None` is provided, do not store the password. + pub fn password_dir_opt(mut self, password_dir_opt: Option) -> Self { + self.password_dir = password_dir_opt; + self + } + /// Build the `ValidatorDir` use the given `keystore` which can be unlocked with `password`. /// /// The builder will not necessarily check that `password` can unlock `keystore`. @@ -153,6 +162,10 @@ impl<'a> Builder<'a> { create_dir_all(&dir).map_err(Error::UnableToCreateDir)?; } + if let Some(password_dir) = &self.password_dir { + ensure_dir_exists(password_dir).map_err(Error::UnableToCreatePasswordDir)?; + } + // The withdrawal keystore must be initialized in order to store it or create an eth1 // deposit. if (self.store_withdrawal_keystore || self.deposit_info.is_some()) @@ -234,7 +247,7 @@ impl<'a> Builder<'a> { if self.store_withdrawal_keystore { // Write the withdrawal password to file. write_password_to_file( - password_dir.join(withdrawal_keypair.pk.as_hex_string()), + keystore_password_path(password_dir, &withdrawal_keystore), withdrawal_password.as_bytes(), )?; @@ -250,7 +263,7 @@ impl<'a> Builder<'a> { if let Some(password_dir) = self.password_dir.as_ref() { // Write the voting password to file. write_password_to_file( - password_dir.join(format!("0x{}", voting_keystore.pubkey())), + keystore_password_path(password_dir, &voting_keystore), voting_password.as_bytes(), )?; } @@ -262,6 +275,12 @@ impl<'a> Builder<'a> { } } +pub fn keystore_password_path>(password_dir: P, keystore: &Keystore) -> PathBuf { + password_dir + .as_ref() + .join(format!("0x{}", keystore.pubkey())) +} + /// Writes a JSON keystore to file. fn write_keystore_to_file(path: PathBuf, keystore: &Keystore) -> Result<(), Error> { if path.exists() { diff --git a/common/validator_dir/src/lib.rs b/common/validator_dir/src/lib.rs index a39d322834b..4aa0d590a16 100644 --- a/common/validator_dir/src/lib.rs +++ b/common/validator_dir/src/lib.rs @@ -15,6 +15,6 @@ pub use crate::validator_dir::{ ETH1_DEPOSIT_TX_HASH_FILE, }; pub use builder::{ - Builder, Error as BuilderError, ETH1_DEPOSIT_DATA_FILE, VOTING_KEYSTORE_FILE, - WITHDRAWAL_KEYSTORE_FILE, + keystore_password_path, Builder, Error as BuilderError, ETH1_DEPOSIT_DATA_FILE, + VOTING_KEYSTORE_FILE, WITHDRAWAL_KEYSTORE_FILE, }; diff --git a/common/validator_dir/src/validator_dir.rs b/common/validator_dir/src/validator_dir.rs index cb1ddde24a4..24b317dcfe3 100644 --- a/common/validator_dir/src/validator_dir.rs +++ b/common/validator_dir/src/validator_dir.rs @@ -1,5 +1,5 @@ use crate::builder::{ - ETH1_DEPOSIT_AMOUNT_FILE, ETH1_DEPOSIT_DATA_FILE, VOTING_KEYSTORE_FILE, + keystore_password_path, ETH1_DEPOSIT_AMOUNT_FILE, ETH1_DEPOSIT_DATA_FILE, VOTING_KEYSTORE_FILE, WITHDRAWAL_KEYSTORE_FILE, }; use deposit_contract::decode_eth1_tx_data; @@ -219,9 +219,7 @@ pub fn unlock_keypair>( ) .map_err(Error::UnableToReadKeystore)?; - let password_path = password_dir - .as_ref() - .join(format!("0x{}", keystore.pubkey())); + let password_path = keystore_password_path(password_dir, &keystore); let password: PlainText = read(&password_path) .map_err(|_| Error::UnableToReadPassword(password_path))? .into(); diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 874d7cd2bdf..85ce351766f 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -71,6 +71,7 @@ pub mod sync_duty; pub mod validator; pub mod validator_subscription; pub mod voluntary_exit; +pub mod withdrawal_credentials; #[macro_use] pub mod slot_epoch_macros; pub mod config_and_preset; @@ -189,6 +190,7 @@ pub use crate::validator_registration_data::*; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; pub use crate::withdrawal::Withdrawal; +pub use crate::withdrawal_credentials::WithdrawalCredentials; pub type CommitteeIndex = u64; pub type Hash256 = H256; diff --git a/consensus/types/src/withdrawal_credentials.rs b/consensus/types/src/withdrawal_credentials.rs new file mode 100644 index 00000000000..8d42d4eafd4 --- /dev/null +++ b/consensus/types/src/withdrawal_credentials.rs @@ -0,0 +1,57 @@ +use crate::*; +use bls::get_withdrawal_credentials; + +pub struct WithdrawalCredentials(Hash256); + +impl WithdrawalCredentials { + pub fn bls(withdrawal_public_key: &PublicKey, spec: &ChainSpec) -> Self { + let withdrawal_credentials = + get_withdrawal_credentials(withdrawal_public_key, spec.bls_withdrawal_prefix_byte); + Self(Hash256::from_slice(&withdrawal_credentials)) + } + + pub fn eth1(withdrawal_address: Address, spec: &ChainSpec) -> Self { + let mut withdrawal_credentials = [0; 32]; + withdrawal_credentials[0] = spec.eth1_address_withdrawal_prefix_byte; + withdrawal_credentials[12..].copy_from_slice(withdrawal_address.as_bytes()); + Self(Hash256::from_slice(&withdrawal_credentials)) + } +} + +impl From for Hash256 { + fn from(withdrawal_credentials: WithdrawalCredentials) -> Self { + withdrawal_credentials.0 + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::test_utils::generate_deterministic_keypair; + use std::str::FromStr; + + #[test] + fn bls_withdrawal_credentials() { + let spec = &MainnetEthSpec::default_spec(); + let keypair = generate_deterministic_keypair(0); + let credentials = WithdrawalCredentials::bls(&keypair.pk, spec); + let manually_generated_credentials = + get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte); + let hash: Hash256 = credentials.into(); + assert_eq!(hash[0], spec.bls_withdrawal_prefix_byte); + assert_eq!(hash.as_bytes(), &manually_generated_credentials); + } + + #[test] + fn eth1_withdrawal_credentials() { + let spec = &MainnetEthSpec::default_spec(); + let address = Address::from_str("0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b").unwrap(); + let credentials = WithdrawalCredentials::eth1(address, spec); + let hash: Hash256 = credentials.into(); + assert_eq!( + hash, + Hash256::from_str("0x01000000000000000000000025c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b") + .unwrap() + ) + } +} diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index e7746a2db98..169aa67fdde 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -56,6 +56,7 @@ directory = { path = "../common/directory" } unused_port = { path = "../common/unused_port" } database_manager = { path = "../database_manager" } slasher = { path = "../slasher" } +validator_manager = { path = "../validator_manager" } [dev-dependencies] tempfile = "3.1.0" @@ -64,6 +65,7 @@ slashing_protection = { path = "../validator_client/slashing_protection" } lighthouse_network = { path = "../beacon_node/lighthouse_network" } sensitive_url = { path = "../common/sensitive_url" } eth1 = { path = "../beacon_node/eth1" } +eth2 = { path = "../common/eth2" } [[test]] name = "lighthouse_tests" diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index b814639ceb0..73e042342af 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -329,6 +329,7 @@ fn main() { .subcommand(validator_client::cli_app()) .subcommand(account_manager::cli_app()) .subcommand(database_manager::cli_app()) + .subcommand(validator_manager::cli_app()) .get_matches(); // Configure the allocator early in the process, before it has the chance to use the default values for @@ -567,6 +568,16 @@ fn run( return Ok(()); } + if let Some(sub_matches) = matches.subcommand_matches(validator_manager::CMD) { + eprintln!("Running validator manager for {} network", network_name); + + // Pass the entire `environment` to the account manager so it can run blocking operations. + validator_manager::run::(sub_matches, environment)?; + + // Exit as soon as account manager returns control. + return Ok(()); + } + if let Some(sub_matches) = matches.subcommand_matches(database_manager::CMD) { info!(log, "Running database manager for {} network", network_name); // Pass the entire `environment` to the database manager so it can run blocking operations. diff --git a/lighthouse/tests/main.rs b/lighthouse/tests/main.rs index 806524cab05..bf587f79df7 100644 --- a/lighthouse/tests/main.rs +++ b/lighthouse/tests/main.rs @@ -5,3 +5,4 @@ mod beacon_node; mod boot_node; mod exec; mod validator_client; +mod validator_manager; diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 27d7c10e96c..9bcfe2a1d50 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -309,6 +309,32 @@ fn http_allow_origin_all_flag() { .run() .with_config(|config| assert_eq!(config.http_api.allow_origin, Some("*".to_string()))); } +#[test] +fn http_allow_keystore_export_default() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(!config.http_api.allow_keystore_export)); +} +#[test] +fn http_allow_keystore_export_present() { + CommandLineTest::new() + .flag("http-allow-keystore-export", None) + .run() + .with_config(|config| assert!(config.http_api.allow_keystore_export)); +} +#[test] +fn http_store_keystore_passwords_in_secrets_dir_default() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(!config.http_api.store_passwords_in_secrets_dir)); +} +#[test] +fn http_store_keystore_passwords_in_secrets_dir_present() { + CommandLineTest::new() + .flag("http-store-passwords-in-secrets-dir", None) + .run() + .with_config(|config| assert!(config.http_api.store_passwords_in_secrets_dir)); +} // Tests for Metrics flags. #[test] diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs new file mode 100644 index 00000000000..e0a1e92d6ae --- /dev/null +++ b/lighthouse/tests/validator_manager.rs @@ -0,0 +1,344 @@ +use eth2::SensitiveUrl; +use serde::de::DeserializeOwned; +use std::fs; +use std::marker::PhantomData; +use std::path::PathBuf; +use std::process::{Command, Stdio}; +use std::str::FromStr; +use tempfile::{tempdir, TempDir}; +use types::*; +use validator_manager::{ + create_validators::CreateConfig, + import_validators::ImportConfig, + move_validators::{MoveConfig, PasswordSource, Validators}, +}; + +const EXAMPLE_ETH1_ADDRESS: &str = "0x00000000219ab540356cBB839Cbe05303d7705Fa"; + +const EXAMPLE_PUBKEY_0: &str = "0x933ad9491b62059dd065b560d256d8957a8c402cc6e8d8ee7290ae11e8f7329267a8811c397529dac52ae1342ba58c95"; +const EXAMPLE_PUBKEY_1: &str = "0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c"; + +struct CommandLineTest { + cmd: Command, + config_path: PathBuf, + _dir: TempDir, + _phantom: PhantomData, +} + +impl Default for CommandLineTest { + fn default() -> Self { + let dir = tempdir().unwrap(); + let config_path = dir.path().join("config.json"); + let mut cmd = Command::new(env!("CARGO_BIN_EXE_lighthouse")); + cmd.arg("--dump-config") + .arg(config_path.as_os_str()) + .arg("validator-manager") + .stdin(Stdio::null()) + .stdout(Stdio::null()) + .stderr(Stdio::null()); + Self { + cmd, + config_path, + _dir: dir, + _phantom: PhantomData, + } + } +} + +impl CommandLineTest { + fn flag(mut self, flag: &str, value: Option<&str>) -> Self { + self.cmd.arg(flag); + if let Some(value) = value { + self.cmd.arg(value); + } + self + } + + fn run(mut cmd: Command, should_succeed: bool) { + let output = cmd.output().expect("process should complete"); + if output.status.success() != should_succeed { + let stdout = String::from_utf8(output.stdout).unwrap(); + let stderr = String::from_utf8(output.stderr).unwrap(); + eprintln!("{}", stdout); + eprintln!("{}", stderr); + panic!( + "Command success was {} when expecting {}", + !should_succeed, should_succeed + ); + } + } +} + +impl CommandLineTest { + fn assert_success(self, func: F) { + Self::run(self.cmd, true); + let contents = fs::read_to_string(self.config_path).unwrap(); + let config: T = serde_json::from_str(&contents).unwrap(); + func(config) + } + + fn assert_failed(self) { + Self::run(self.cmd, false); + } +} + +impl CommandLineTest { + fn validators_create() -> Self { + Self::default().flag("create", None) + } +} + +impl CommandLineTest { + fn validators_import() -> Self { + Self::default().flag("import", None) + } +} + +impl CommandLineTest { + fn validators_move() -> Self { + Self::default().flag("move", None) + } +} + +#[test] +pub fn validator_create_without_output_path() { + CommandLineTest::validators_create().assert_failed(); +} + +#[test] +pub fn validator_create_defaults() { + CommandLineTest::validators_create() + .flag("--output-path", Some("./meow")) + .flag("--count", Some("1")) + .assert_success(|config| { + let expected = CreateConfig { + output_path: PathBuf::from("./meow"), + first_index: 0, + count: 1, + deposit_gwei: MainnetEthSpec::default_spec().max_effective_balance, + mnemonic_path: None, + stdin_inputs: cfg!(windows) || false, + disable_deposits: false, + specify_voting_keystore_password: false, + eth1_withdrawal_address: None, + builder_proposals: None, + fee_recipient: None, + gas_limit: None, + bn_url: None, + force_bls_withdrawal_credentials: false, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_create_misc_flags() { + CommandLineTest::validators_create() + .flag("--output-path", Some("./meow")) + .flag("--deposit-gwei", Some("42")) + .flag("--first-index", Some("12")) + .flag("--count", Some("9")) + .flag("--mnemonic-path", Some("./woof")) + .flag("--stdin-inputs", None) + .flag("--specify-voting-keystore-password", None) + .flag("--eth1-withdrawal-address", Some(EXAMPLE_ETH1_ADDRESS)) + .flag("--builder-proposals", Some("true")) + .flag("--suggested-fee-recipient", Some(EXAMPLE_ETH1_ADDRESS)) + .flag("--gas-limit", Some("1337")) + .flag("--beacon-node", Some("http://localhost:1001")) + .flag("--force-bls-withdrawal-credentials", None) + .assert_success(|config| { + let expected = CreateConfig { + output_path: PathBuf::from("./meow"), + first_index: 12, + count: 9, + deposit_gwei: 42, + mnemonic_path: Some(PathBuf::from("./woof")), + stdin_inputs: true, + disable_deposits: false, + specify_voting_keystore_password: true, + eth1_withdrawal_address: Some(Address::from_str(EXAMPLE_ETH1_ADDRESS).unwrap()), + builder_proposals: Some(true), + fee_recipient: Some(Address::from_str(EXAMPLE_ETH1_ADDRESS).unwrap()), + gas_limit: Some(1337), + bn_url: Some(SensitiveUrl::parse("http://localhost:1001").unwrap()), + force_bls_withdrawal_credentials: true, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_create_disable_deposits() { + CommandLineTest::validators_create() + .flag("--output-path", Some("./meow")) + .flag("--count", Some("1")) + .flag("--disable-deposits", None) + .flag("--builder-proposals", Some("false")) + .assert_success(|config| { + assert_eq!(config.disable_deposits, true); + assert_eq!(config.builder_proposals, Some(false)); + }); +} + +#[test] +pub fn validator_import_defaults() { + CommandLineTest::validators_import() + .flag("--validators-file", Some("./vals.json")) + .flag("--vc-token", Some("./token.json")) + .assert_success(|config| { + let expected = ImportConfig { + validators_file_path: PathBuf::from("./vals.json"), + vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), + vc_token_path: PathBuf::from("./token.json"), + ignore_duplicates: false, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_import_misc_flags() { + CommandLineTest::validators_import() + .flag("--validators-file", Some("./vals.json")) + .flag("--vc-token", Some("./token.json")) + .flag("--ignore-duplicates", None) + .assert_success(|config| { + let expected = ImportConfig { + validators_file_path: PathBuf::from("./vals.json"), + vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), + vc_token_path: PathBuf::from("./token.json"), + ignore_duplicates: true, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_import_missing_token() { + CommandLineTest::validators_import() + .flag("--validators-file", Some("./vals.json")) + .assert_failed(); +} + +#[test] +pub fn validator_import_missing_validators_file() { + CommandLineTest::validators_import() + .flag("--vc-token", Some("./token.json")) + .assert_failed(); +} + +#[test] +pub fn validator_move_defaults() { + CommandLineTest::validators_move() + .flag("--src-vc-url", Some("http://localhost:1")) + .flag("--src-vc-token", Some("./1.json")) + .flag("--dest-vc-url", Some("http://localhost:2")) + .flag("--dest-vc-token", Some("./2.json")) + .flag("--validators", Some("all")) + .assert_success(|config| { + let expected = MoveConfig { + src_vc_url: SensitiveUrl::parse("http://localhost:1").unwrap(), + src_vc_token_path: PathBuf::from("./1.json"), + dest_vc_url: SensitiveUrl::parse("http://localhost:2").unwrap(), + dest_vc_token_path: PathBuf::from("./2.json"), + validators: Validators::All, + builder_proposals: None, + fee_recipient: None, + gas_limit: None, + password_source: PasswordSource::Interactive { + stdin_inputs: cfg!(windows) || false, + }, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_move_misc_flags_0() { + CommandLineTest::validators_move() + .flag("--src-vc-url", Some("http://localhost:1")) + .flag("--src-vc-token", Some("./1.json")) + .flag("--dest-vc-url", Some("http://localhost:2")) + .flag("--dest-vc-token", Some("./2.json")) + .flag( + "--validators", + Some(&format!("{},{}", EXAMPLE_PUBKEY_0, EXAMPLE_PUBKEY_1)), + ) + .flag("--builder-proposals", Some("true")) + .flag("--suggested-fee-recipient", Some(EXAMPLE_ETH1_ADDRESS)) + .flag("--gas-limit", Some("1337")) + .flag("--stdin-inputs", None) + .assert_success(|config| { + let expected = MoveConfig { + src_vc_url: SensitiveUrl::parse("http://localhost:1").unwrap(), + src_vc_token_path: PathBuf::from("./1.json"), + dest_vc_url: SensitiveUrl::parse("http://localhost:2").unwrap(), + dest_vc_token_path: PathBuf::from("./2.json"), + validators: Validators::Specific(vec![ + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_0).unwrap(), + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_1).unwrap(), + ]), + builder_proposals: Some(true), + fee_recipient: Some(Address::from_str(EXAMPLE_ETH1_ADDRESS).unwrap()), + gas_limit: Some(1337), + password_source: PasswordSource::Interactive { stdin_inputs: true }, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_move_misc_flags_1() { + CommandLineTest::validators_move() + .flag("--src-vc-url", Some("http://localhost:1")) + .flag("--src-vc-token", Some("./1.json")) + .flag("--dest-vc-url", Some("http://localhost:2")) + .flag("--dest-vc-token", Some("./2.json")) + .flag("--validators", Some(&format!("{}", EXAMPLE_PUBKEY_0))) + .flag("--builder-proposals", Some("false")) + .assert_success(|config| { + let expected = MoveConfig { + src_vc_url: SensitiveUrl::parse("http://localhost:1").unwrap(), + src_vc_token_path: PathBuf::from("./1.json"), + dest_vc_url: SensitiveUrl::parse("http://localhost:2").unwrap(), + dest_vc_token_path: PathBuf::from("./2.json"), + validators: Validators::Specific(vec![ + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_0).unwrap() + ]), + builder_proposals: Some(false), + fee_recipient: None, + gas_limit: None, + password_source: PasswordSource::Interactive { + stdin_inputs: cfg!(windows) || false, + }, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_move_count() { + CommandLineTest::validators_move() + .flag("--src-vc-url", Some("http://localhost:1")) + .flag("--src-vc-token", Some("./1.json")) + .flag("--dest-vc-url", Some("http://localhost:2")) + .flag("--dest-vc-token", Some("./2.json")) + .flag("--count", Some("42")) + .assert_success(|config| { + let expected = MoveConfig { + src_vc_url: SensitiveUrl::parse("http://localhost:1").unwrap(), + src_vc_token_path: PathBuf::from("./1.json"), + dest_vc_url: SensitiveUrl::parse("http://localhost:2").unwrap(), + dest_vc_token_path: PathBuf::from("./2.json"), + validators: Validators::Count(42), + builder_proposals: None, + fee_recipient: None, + gas_limit: None, + password_source: PasswordSource::Interactive { + stdin_inputs: cfg!(windows) || false, + }, + }; + assert_eq!(expected, config); + }); +} diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 494ebcb3dfc..200db731677 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -10,7 +10,6 @@ path = "src/lib.rs" [dev-dependencies] tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } -logging = { path = "../common/logging" } [dependencies] tree_hash = "0.5.0" @@ -63,4 +62,3 @@ malloc_utils = { path = "../common/malloc_utils" } sysinfo = "0.26.5" system_health = { path = "../common/system_health" } logging = { path = "../common/logging" } - diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index a7118aa945c..f0a9258c747 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -2,12 +2,12 @@ use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; use crate::{ duties_service::{DutiesService, DutyAndProof}, http_metrics::metrics, - validator_store::ValidatorStore, + validator_store::{Error as ValidatorStoreError, ValidatorStore}, OfflineOnFailure, }; use environment::RuntimeContext; use futures::future::join_all; -use slog::{crit, error, info, trace}; +use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::collections::HashMap; use std::ops::Deref; @@ -395,6 +395,20 @@ impl AttestationService { .await { Ok(()) => Some((attestation, duty.validator_index)), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + warn!( + log, + "Missing pubkey for attestation"; + "info" => "a validator may have recently been removed from this VC", + "pubkey" => ?pubkey, + "validator" => ?duty.pubkey, + "committee_index" => committee_index, + "slot" => slot.as_u64(), + ); + None + } Err(e) => { crit!( log, @@ -527,10 +541,20 @@ impl AttestationService { .await { Ok(aggregate) => Some(aggregate), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!( + log, + "Missing pubkey for aggregate"; + "pubkey" => ?pubkey, + ); + None + } Err(e) => { crit!( log, - "Failed to sign attestation"; + "Failed to sign aggregate"; "error" => ?e, "pubkey" => ?duty.pubkey, ); diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index d22e6c95f35..2a09455b6ff 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -5,7 +5,10 @@ use crate::{ graffiti_file::GraffitiFile, OfflineOnFailure, }; -use crate::{http_metrics::metrics, validator_store::ValidatorStore}; +use crate::{ + http_metrics::metrics, + validator_store::{Error as ValidatorStoreError, ValidatorStore}, +}; use environment::RuntimeContext; use eth2::BeaconNodeHttpClient; use slog::{crit, debug, error, info, trace, warn}; @@ -417,17 +420,31 @@ impl BlockService { BlockError::Recoverable("Unable to determine current slot from clock".to_string()) })?; - let randao_reveal = self + let randao_reveal = match self .validator_store .randao_reveal(validator_pubkey, slot.epoch(E::slots_per_epoch())) .await - .map_err(|e| { - BlockError::Recoverable(format!( + { + Ok(signature) => signature.into(), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently removed + // via the API. + warn!( + log, + "Missing pubkey for block randao"; + "info" => "a validator may have recently been removed from this VC", + "pubkey" => ?pubkey, + "slot" => ?slot + ); + return Ok(()); + } + Err(e) => { + return Err(BlockError::Recoverable(format!( "Unable to produce randao reveal signature: {:?}", e - )) - })? - .into(); + ))) + } + }; let graffiti = determine_graffiti( &validator_pubkey, @@ -522,11 +539,31 @@ impl BlockService { .await?; let signing_timer = metrics::start_timer(&metrics::BLOCK_SIGNING_TIMES); - let signed_block = self_ref + let signed_block = match self_ref .validator_store .sign_block::(*validator_pubkey_ref, block, current_slot) .await - .map_err(|e| BlockError::Recoverable(format!("Unable to sign block: {:?}", e)))?; + { + Ok(block) => block, + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently removed + // via the API. + warn!( + log, + "Missing pubkey for block"; + "info" => "a validator may have recently been removed from this VC", + "pubkey" => ?pubkey, + "slot" => ?slot + ); + return Ok(()); + } + Err(e) => { + return Err(BlockError::Recoverable(format!( + "Unable to sign block: {:?}", + e + ))) + } + }; let signing_time_ms = Duration::from_secs_f64(signing_timer.map_or(0.0, |t| t.stop_and_record())).as_millis(); diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 436b8eb4d5c..0789ac78a00 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -204,6 +204,26 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { address of this server (e.g., http://localhost:5062).") .takes_value(true), ) + .arg( + Arg::with_name("http-allow-keystore-export") + .long("http-allow-keystore-export") + .help("If present, allow access to the DELETE /lighthouse/keystores HTTP \ + API method, which allows exporting keystores and passwords to HTTP API \ + consumers who have access to the API token. This method is useful for \ + exporting validators, however it should be used with caution since it \ + exposes private key data to authorized users.") + .required(false) + .takes_value(false), + ) + .arg( + Arg::with_name("http-store-passwords-in-secrets-dir") + .long("http-store-passwords-in-secrets-dir") + .help("If present, any validators created via the HTTP will have keystore \ + passwords stored in the secrets-dir rather than the validator \ + definitions file.") + .required(false) + .takes_value(false), + ) /* Prometheus metrics HTTP server related arguments */ .arg( Arg::with_name("metrics") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index e0dd12e1005..7c662db9371 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -294,6 +294,14 @@ impl Config { config.http_api.allow_origin = Some(allow_origin.to_string()); } + if cli_args.is_present("http-allow-keystore-export") { + config.http_api.allow_keystore_export = true; + } + + if cli_args.is_present("http-store-passwords-in-secrets-dir") { + config.http_api.store_passwords_in_secrets_dir = true; + } + /* * Prometheus metrics HTTP server */ diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 83cdb936aa3..535f6aeb0a7 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -932,6 +932,20 @@ async fn fill_in_selection_proofs( for result in duty_and_proof_results { let duty_and_proof = match result { Ok(duty_and_proof) => duty_and_proof, + Err(Error::FailedToProduceSelectionProof( + ValidatorStoreError::UnknownPubkey(pubkey), + )) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + warn!( + log, + "Missing pubkey for duty and proof"; + "info" => "a validator may have recently been removed from this VC", + "pubkey" => ?pubkey, + ); + // Do not abort the entire batch for a single failure. + continue; + } Err(e) => { error!( log, diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/src/duties_service/sync.rs index 7a852091aa3..1e66d947a21 100644 --- a/validator_client/src/duties_service/sync.rs +++ b/validator_client/src/duties_service/sync.rs @@ -2,6 +2,7 @@ use crate::beacon_node_fallback::{OfflineOnFailure, RequireSynced}; use crate::{ doppelganger_service::DoppelgangerStatus, duties_service::{DutiesService, Error}, + validator_store::Error as ValidatorStoreError, }; use futures::future::join_all; use itertools::Itertools; @@ -539,6 +540,18 @@ pub async fn fill_in_aggregation_proofs( .await { Ok(proof) => proof, + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!( + log, + "Missing pubkey for sync selection proof"; + "pubkey" => ?pubkey, + "pubkey" => ?duty.pubkey, + "slot" => slot, + ); + return None; + } Err(e) => { warn!( log, diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs index f3107cfedbd..52336afa59d 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/src/http_api/create_validator.rs @@ -1,15 +1,16 @@ use crate::ValidatorStore; -use account_utils::validator_definitions::ValidatorDefinition; +use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; use account_utils::{ + eth2_keystore::Keystore, eth2_wallet::{bip39::Mnemonic, WalletBuilder}, random_mnemonic, random_password, ZeroizeString, }; use eth2::lighthouse_vc::types::{self as api_types}; use slot_clock::SlotClock; -use std::path::Path; +use std::path::{Path, PathBuf}; use types::ChainSpec; use types::EthSpec; -use validator_dir::Builder as ValidatorDirBuilder; +use validator_dir::{keystore_password_path, Builder as ValidatorDirBuilder}; /// Create some validator EIP-2335 keystores and store them on disk. Then, enroll the validators in /// this validator client. @@ -27,6 +28,7 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, key_derivation_path_offset: Option, validator_requests: &[api_types::ValidatorRequest], validator_dir: P, + secrets_dir: Option, validator_store: &ValidatorStore, spec: &ChainSpec, ) -> Result<(Vec, Mnemonic), warp::Rejection> { @@ -95,7 +97,11 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, )) })?; + let voting_password_storage = + get_voting_password_storage(&secrets_dir, &keystores.voting, &voting_password_string)?; + let validator_dir = ValidatorDirBuilder::new(validator_dir.as_ref().into()) + .password_dir_opt(secrets_dir.clone()) .voting_keystore(keystores.voting, voting_password.as_bytes()) .withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes()) .create_eth1_tx_data(request.deposit_gwei, spec) @@ -136,7 +142,7 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, validator_store .add_validator_keystore( voting_keystore_path, - voting_password_string, + voting_password_storage, request.enable, request.graffiti.clone(), request.suggested_fee_recipient, @@ -185,3 +191,26 @@ pub async fn create_validators_web3signer( Ok(()) } + +/// Attempts to return a `PasswordStorage::File` if `secrets_dir` is defined. +/// Otherwise, returns a `PasswordStorage::ValidatorDefinitions`. +pub fn get_voting_password_storage( + secrets_dir: &Option, + voting_keystore: &Keystore, + voting_password_string: &ZeroizeString, +) -> Result { + if let Some(secrets_dir) = &secrets_dir { + let password_path = keystore_password_path(secrets_dir, voting_keystore); + if password_path.exists() { + Err(warp_utils::reject::custom_server_error( + "Duplicate keystore password path".to_string(), + )) + } else { + Ok(PasswordStorage::File(password_path)) + } + } else { + Ok(PasswordStorage::ValidatorDefinitions( + voting_password_string.clone(), + )) + } +} diff --git a/validator_client/src/http_api/keystores.rs b/validator_client/src/http_api/keystores.rs index b886f604350..c2d9b4d67f4 100644 --- a/validator_client/src/http_api/keystores.rs +++ b/validator_client/src/http_api/keystores.rs @@ -3,11 +3,14 @@ use crate::{ initialized_validators::Error, signing_method::SigningMethod, InitializedValidators, ValidatorStore, }; -use account_utils::ZeroizeString; -use eth2::lighthouse_vc::std_types::{ - DeleteKeystoreStatus, DeleteKeystoresRequest, DeleteKeystoresResponse, ImportKeystoreStatus, - ImportKeystoresRequest, ImportKeystoresResponse, InterchangeJsonStr, KeystoreJsonStr, - ListKeystoresResponse, SingleKeystoreResponse, Status, +use account_utils::{validator_definitions::PasswordStorage, ZeroizeString}; +use eth2::lighthouse_vc::{ + std_types::{ + DeleteKeystoreStatus, DeleteKeystoresRequest, DeleteKeystoresResponse, + ImportKeystoreStatus, ImportKeystoresRequest, ImportKeystoresResponse, InterchangeJsonStr, + KeystoreJsonStr, ListKeystoresResponse, SingleKeystoreResponse, Status, + }, + types::{ExportKeystoresResponse, SingleExportKeystoresResponse}, }; use eth2_keystore::Keystore; use slog::{info, warn, Logger}; @@ -17,7 +20,7 @@ use std::sync::Arc; use task_executor::TaskExecutor; use tokio::runtime::Handle; use types::{EthSpec, PublicKeyBytes}; -use validator_dir::Builder as ValidatorDirBuilder; +use validator_dir::{keystore_password_path, Builder as ValidatorDirBuilder}; use warp::Rejection; use warp_utils::reject::{custom_bad_request, custom_server_error}; @@ -58,6 +61,7 @@ pub fn list( pub fn import( request: ImportKeystoresRequest, validator_dir: PathBuf, + secrets_dir: Option, validator_store: Arc>, task_executor: TaskExecutor, log: Logger, @@ -128,6 +132,7 @@ pub fn import( keystore, password, validator_dir.clone(), + secrets_dir.clone(), &validator_store, handle, ) { @@ -158,6 +163,7 @@ fn import_single_keystore( keystore: Keystore, password: ZeroizeString, validator_dir_path: PathBuf, + secrets_dir: Option, validator_store: &ValidatorStore, handle: Handle, ) -> Result { @@ -179,6 +185,16 @@ fn import_single_keystore( } } + let password_storage = if let Some(secrets_dir) = &secrets_dir { + let password_path = keystore_password_path(secrets_dir, &keystore); + if password_path.exists() { + return Ok(ImportKeystoreStatus::Duplicate); + } + PasswordStorage::File(password_path) + } else { + PasswordStorage::ValidatorDefinitions(password.clone()) + }; + // Check that the password is correct. // In future we should re-structure to avoid the double decryption here. It's not as simple // as removing this check because `add_validator_keystore` will break if provided with an @@ -189,6 +205,7 @@ fn import_single_keystore( .map_err(|e| format!("incorrect password: {:?}", e))?; let validator_dir = ValidatorDirBuilder::new(validator_dir_path) + .password_dir_opt(secrets_dir) .voting_keystore(keystore, password.as_ref()) .store_withdrawal_keystore(false) .build() @@ -201,7 +218,7 @@ fn import_single_keystore( handle .block_on(validator_store.add_validator_keystore( voting_keystore_path, - password, + password_storage, true, None, None, @@ -219,11 +236,28 @@ pub fn delete( task_executor: TaskExecutor, log: Logger, ) -> Result { + let export_response = export(request, validator_store, task_executor, log)?; + Ok(DeleteKeystoresResponse { + data: export_response + .data + .into_iter() + .map(|response| response.status) + .collect(), + slashing_protection: export_response.slashing_protection, + }) +} + +pub fn export( + request: DeleteKeystoresRequest, + validator_store: Arc>, + task_executor: TaskExecutor, + log: Logger, +) -> Result { // Remove from initialized validators. let initialized_validators_rwlock = validator_store.initialized_validators(); let mut initialized_validators = initialized_validators_rwlock.write(); - let mut statuses = request + let mut responses = request .pubkeys .iter() .map(|pubkey_bytes| { @@ -232,7 +266,7 @@ pub fn delete( &mut initialized_validators, task_executor.clone(), ) { - Ok(status) => Status::ok(status), + Ok(status) => status, Err(error) => { warn!( log, @@ -240,7 +274,11 @@ pub fn delete( "pubkey" => ?pubkey_bytes, "error" => ?error, ); - Status::error(DeleteKeystoreStatus::Error, error) + SingleExportKeystoresResponse { + status: Status::error(DeleteKeystoreStatus::Error, error), + validating_keystore: None, + validating_keystore_password: None, + } } } }) @@ -263,19 +301,19 @@ pub fn delete( })?; // Update stasuses based on availability of slashing protection data. - for (pubkey, status) in request.pubkeys.iter().zip(statuses.iter_mut()) { - if status.status == DeleteKeystoreStatus::NotFound + for (pubkey, response) in request.pubkeys.iter().zip(responses.iter_mut()) { + if response.status.status == DeleteKeystoreStatus::NotFound && slashing_protection .data .iter() .any(|interchange_data| interchange_data.pubkey == *pubkey) { - status.status = DeleteKeystoreStatus::NotActive; + response.status.status = DeleteKeystoreStatus::NotActive; } } - Ok(DeleteKeystoresResponse { - data: statuses, + Ok(ExportKeystoresResponse { + data: responses, slashing_protection, }) } @@ -284,7 +322,7 @@ fn delete_single_keystore( pubkey_bytes: &PublicKeyBytes, initialized_validators: &mut InitializedValidators, task_executor: TaskExecutor, -) -> Result { +) -> Result { if let Some(handle) = task_executor.handle() { let pubkey = pubkey_bytes .decompress() @@ -292,9 +330,22 @@ fn delete_single_keystore( match handle.block_on(initialized_validators.delete_definition_and_keystore(&pubkey, true)) { - Ok(_) => Ok(DeleteKeystoreStatus::Deleted), + Ok(Some(keystore_and_password)) => Ok(SingleExportKeystoresResponse { + status: Status::ok(DeleteKeystoreStatus::Deleted), + validating_keystore: Some(KeystoreJsonStr(keystore_and_password.keystore)), + validating_keystore_password: keystore_and_password.password, + }), + Ok(None) => Ok(SingleExportKeystoresResponse { + status: Status::ok(DeleteKeystoreStatus::Deleted), + validating_keystore: None, + validating_keystore_password: None, + }), Err(e) => match e { - Error::ValidatorNotInitialized(_) => Ok(DeleteKeystoreStatus::NotFound), + Error::ValidatorNotInitialized(_) => Ok(SingleExportKeystoresResponse { + status: Status::ok(DeleteKeystoreStatus::NotFound), + validating_keystore: None, + validating_keystore_password: None, + }), _ => Err(format!("unable to disable and delete: {:?}", e)), }, } diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index f08c8da1bd9..f654833cbb4 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -5,6 +5,8 @@ mod keystores; mod remotekeys; mod tests; +pub mod test_utils; + use crate::http_api::create_signed_voluntary_exit::create_signed_voluntary_exit; use crate::{determine_graffiti, GraffitiFile, ValidatorStore}; use account_utils::{ @@ -12,7 +14,9 @@ use account_utils::{ validator_definitions::{SigningDefinition, ValidatorDefinition, Web3SignerDefinition}, }; pub use api_secret::ApiSecret; -use create_validator::{create_validators_mnemonic, create_validators_web3signer}; +use create_validator::{ + create_validators_mnemonic, create_validators_web3signer, get_voting_password_storage, +}; use eth2::lighthouse_vc::{ std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, types::{self as api_types, GenericResponse, Graffiti, PublicKey, PublicKeyBytes}, @@ -71,6 +75,7 @@ pub struct Context { pub api_secret: ApiSecret, pub validator_store: Option>>, pub validator_dir: Option, + pub secrets_dir: Option, pub graffiti_file: Option, pub graffiti_flag: Option, pub spec: ChainSpec, @@ -88,6 +93,8 @@ pub struct Config { pub listen_addr: IpAddr, pub listen_port: u16, pub allow_origin: Option, + pub allow_keystore_export: bool, + pub store_passwords_in_secrets_dir: bool, } impl Default for Config { @@ -97,6 +104,8 @@ impl Default for Config { listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), listen_port: 5062, allow_origin: None, + allow_keystore_export: false, + store_passwords_in_secrets_dir: false, } } } @@ -121,6 +130,8 @@ pub fn serve( shutdown: impl Future + Send + Sync + 'static, ) -> Result<(SocketAddr, impl Future), Error> { let config = &ctx.config; + let allow_keystore_export = config.allow_keystore_export; + let store_passwords_in_secrets_dir = config.store_passwords_in_secrets_dir; let log = ctx.log.clone(); // Configure CORS. @@ -187,6 +198,17 @@ pub fn serve( }) }); + let inner_secrets_dir = ctx.secrets_dir.clone(); + let secrets_dir_filter = warp::any().map(move || inner_secrets_dir.clone()).and_then( + |secrets_dir: Option<_>| async move { + secrets_dir.ok_or_else(|| { + warp_utils::reject::custom_not_found( + "secrets_dir directory is not initialized.".to_string(), + ) + }) + }, + ); + let inner_graffiti_file = ctx.graffiti_file.clone(); let graffiti_file_filter = warp::any().map(move || inner_graffiti_file.clone()); @@ -394,18 +416,21 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(validator_dir_filter.clone()) + .and(secrets_dir_filter.clone()) .and(validator_store_filter.clone()) .and(spec_filter.clone()) .and(signer.clone()) .and(task_executor_filter.clone()) .and_then( - |body: Vec, - validator_dir: PathBuf, - validator_store: Arc>, - spec: Arc, - signer, - task_executor: TaskExecutor| { + move |body: Vec, + validator_dir: PathBuf, + secrets_dir: PathBuf, + validator_store: Arc>, + spec: Arc, + signer, + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { + let secrets_dir = store_passwords_in_secrets_dir.then_some(secrets_dir); if let Some(handle) = task_executor.handle() { let (validators, mnemonic) = handle.block_on(create_validators_mnemonic( @@ -413,6 +438,7 @@ pub fn serve( None, &body, &validator_dir, + secrets_dir, &validator_store, &spec, ))?; @@ -437,18 +463,21 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(validator_dir_filter.clone()) + .and(secrets_dir_filter.clone()) .and(validator_store_filter.clone()) .and(spec_filter) .and(signer.clone()) .and(task_executor_filter.clone()) .and_then( - |body: api_types::CreateValidatorsMnemonicRequest, - validator_dir: PathBuf, - validator_store: Arc>, - spec: Arc, - signer, - task_executor: TaskExecutor| { + move |body: api_types::CreateValidatorsMnemonicRequest, + validator_dir: PathBuf, + secrets_dir: PathBuf, + validator_store: Arc>, + spec: Arc, + signer, + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { + let secrets_dir = store_passwords_in_secrets_dir.then_some(secrets_dir); if let Some(handle) = task_executor.handle() { let mnemonic = mnemonic_from_phrase(body.mnemonic.as_str()).map_err(|e| { @@ -463,6 +492,7 @@ pub fn serve( Some(body.key_derivation_path_offset), &body.validators, &validator_dir, + secrets_dir, &validator_store, &spec, ))?; @@ -483,15 +513,17 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(validator_dir_filter.clone()) + .and(secrets_dir_filter.clone()) .and(validator_store_filter.clone()) .and(signer.clone()) .and(task_executor_filter.clone()) .and_then( - |body: api_types::KeystoreValidatorsPostRequest, - validator_dir: PathBuf, - validator_store: Arc>, - signer, - task_executor: TaskExecutor| { + move |body: api_types::KeystoreValidatorsPostRequest, + validator_dir: PathBuf, + secrets_dir: PathBuf, + validator_store: Arc>, + signer, + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { // Check to ensure the password is correct. let keypair = body @@ -504,7 +536,12 @@ pub fn serve( )) })?; + let secrets_dir = store_passwords_in_secrets_dir.then_some(secrets_dir); + let password_storage = + get_voting_password_storage(&secrets_dir, &body.keystore, &body.password)?; + let validator_dir = ValidatorDirBuilder::new(validator_dir.clone()) + .password_dir_opt(secrets_dir) .voting_keystore(body.keystore.clone(), body.password.as_ref()) .store_withdrawal_keystore(false) .build() @@ -518,7 +555,6 @@ pub fn serve( // Drop validator dir so that `add_validator_keystore` can re-lock the keystore. let voting_keystore_path = validator_dir.voting_keystore_path(); drop(validator_dir); - let voting_password = body.password.clone(); let graffiti = body.graffiti.clone(); let suggested_fee_recipient = body.suggested_fee_recipient; let gas_limit = body.gas_limit; @@ -529,7 +565,7 @@ pub fn serve( handle .block_on(validator_store.add_validator_keystore( voting_keystore_path, - voting_password, + password_storage, body.enable, graffiti, suggested_fee_recipient, @@ -698,6 +734,29 @@ pub fn serve( }) }); + // DELETE /lighthouse/keystores + let delete_lighthouse_keystores = warp::path("lighthouse") + .and(warp::path("keystores")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(signer.clone()) + .and(validator_store_filter.clone()) + .and(task_executor_filter.clone()) + .and(log_filter.clone()) + .and_then( + move |request, signer, validator_store, task_executor, log| { + blocking_signed_json_task(signer, move || { + if allow_keystore_export { + keystores::export(request, validator_store, task_executor, log) + } else { + Err(warp_utils::reject::custom_bad_request( + "keystore export is disabled".to_string(), + )) + } + }) + }, + ); + // Standard key-manager endpoints. let eth_v1 = warp::path("eth").and(warp::path("v1")); let std_keystores = eth_v1.and(warp::path("keystores")).and(warp::path::end()); @@ -982,13 +1041,28 @@ pub fn serve( .and(warp::body::json()) .and(signer.clone()) .and(validator_dir_filter) + .and(secrets_dir_filter) .and(validator_store_filter.clone()) .and(task_executor_filter.clone()) .and(log_filter.clone()) .and_then( - |request, signer, validator_dir, validator_store, task_executor, log| { + move |request, + signer, + validator_dir, + secrets_dir, + validator_store, + task_executor, + log| { + let secrets_dir = store_passwords_in_secrets_dir.then_some(secrets_dir); blocking_signed_json_task(signer, move || { - keystores::import(request, validator_dir, validator_store, task_executor, log) + keystores::import( + request, + validator_dir, + secrets_dir, + validator_store, + task_executor, + log, + ) }) }, ); @@ -1117,7 +1191,8 @@ pub fn serve( )) .or(warp::patch().and(patch_validators)) .or(warp::delete().and( - delete_fee_recipient + delete_lighthouse_keystores + .or(delete_fee_recipient) .or(delete_gas_limit) .or(delete_std_keystores) .or(delete_std_remotekeys), diff --git a/validator_client/src/http_api/test_utils.rs b/validator_client/src/http_api/test_utils.rs new file mode 100644 index 00000000000..c7558dd586d --- /dev/null +++ b/validator_client/src/http_api/test_utils.rs @@ -0,0 +1,631 @@ +use crate::doppelganger_service::DoppelgangerService; +use crate::key_cache::{KeyCache, CACHE_FILENAME}; +use crate::{ + http_api::{ApiSecret, Config as HttpConfig, Context}, + initialized_validators::{InitializedValidators, OnDecryptFailure}, + Config, ValidatorDefinitions, ValidatorStore, +}; +use account_utils::{ + eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, + ZeroizeString, +}; +use deposit_contract::decode_eth1_tx_data; +use eth2::{ + lighthouse_vc::{http_client::ValidatorClientHttpClient, types::*}, + types::ErrorMessage as ApiErrorMessage, + Error as ApiError, +}; +use eth2_keystore::KeystoreBuilder; +use logging::test_logger; +use parking_lot::RwLock; +use sensitive_url::SensitiveUrl; +use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; +use slot_clock::{SlotClock, TestingSlotClock}; +use std::future::Future; +use std::marker::PhantomData; +use std::net::{IpAddr, Ipv4Addr}; +use std::sync::Arc; +use std::time::Duration; +use task_executor::test_utils::TestRuntime; +use tempfile::{tempdir, TempDir}; +use tokio::sync::oneshot; + +pub const PASSWORD_BYTES: &[u8] = &[42, 50, 37]; +pub const TEST_DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); + +type E = MainnetEthSpec; + +pub struct HdValidatorScenario { + pub count: usize, + pub specify_mnemonic: bool, + pub key_derivation_path_offset: u32, + pub disabled: Vec, +} + +pub struct KeystoreValidatorScenario { + pub enabled: bool, + pub correct_password: bool, +} + +pub struct Web3SignerValidatorScenario { + pub count: usize, + pub enabled: bool, +} + +pub struct ApiTester { + pub client: ValidatorClientHttpClient, + pub initialized_validators: Arc>, + pub validator_store: Arc>, + pub url: SensitiveUrl, + pub api_token: String, + pub test_runtime: TestRuntime, + pub _server_shutdown: oneshot::Sender<()>, + pub validator_dir: TempDir, + pub secrets_dir: TempDir, +} + +impl ApiTester { + pub async fn new() -> Self { + Self::new_with_http_config(Self::default_http_config()).await + } + + pub async fn new_with_http_config(http_config: HttpConfig) -> Self { + let log = test_logger(); + + let validator_dir = tempdir().unwrap(); + let secrets_dir = tempdir().unwrap(); + + let validator_defs = ValidatorDefinitions::open_or_create(validator_dir.path()).unwrap(); + + let initialized_validators = InitializedValidators::from_definitions( + validator_defs, + validator_dir.path().into(), + log.clone(), + ) + .await + .unwrap(); + + let api_secret = ApiSecret::create_or_open(validator_dir.path()).unwrap(); + let api_pubkey = api_secret.api_token(); + + let config = Config { + validator_dir: validator_dir.path().into(), + secrets_dir: secrets_dir.path().into(), + fee_recipient: Some(TEST_DEFAULT_FEE_RECIPIENT), + ..Default::default() + }; + + let spec = E::default_spec(); + + let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); + let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); + + let slot_clock = + TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); + + let test_runtime = TestRuntime::default(); + + let validator_store = Arc::new(ValidatorStore::<_, E>::new( + initialized_validators, + slashing_protection, + Hash256::repeat_byte(42), + spec, + Some(Arc::new(DoppelgangerService::new(log.clone()))), + slot_clock.clone(), + &config, + test_runtime.task_executor.clone(), + log.clone(), + )); + + validator_store + .register_all_in_doppelganger_protection_if_enabled() + .expect("Should attach doppelganger service"); + + let initialized_validators = validator_store.initialized_validators(); + + let context = Arc::new(Context { + task_executor: test_runtime.task_executor.clone(), + api_secret, + validator_dir: Some(validator_dir.path().into()), + secrets_dir: Some(secrets_dir.path().into()), + validator_store: Some(validator_store.clone()), + graffiti_file: None, + graffiti_flag: Some(Graffiti::default()), + spec: E::default_spec(), + config: http_config, + log, + sse_logging_components: None, + slot_clock, + _phantom: PhantomData, + }); + let ctx = context; + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let server_shutdown = async { + // It's not really interesting why this triggered, just that it happened. + let _ = shutdown_rx.await; + }; + let (listening_socket, server) = super::serve(ctx, server_shutdown).unwrap(); + + tokio::spawn(server); + + let url = SensitiveUrl::parse(&format!( + "http://{}:{}", + listening_socket.ip(), + listening_socket.port() + )) + .unwrap(); + + let client = ValidatorClientHttpClient::new(url.clone(), api_pubkey.clone()).unwrap(); + + Self { + client, + initialized_validators, + validator_store, + url, + api_token: api_pubkey, + test_runtime, + _server_shutdown: shutdown_tx, + validator_dir, + secrets_dir, + } + } + + pub fn default_http_config() -> HttpConfig { + HttpConfig { + enabled: true, + listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + listen_port: 0, + allow_origin: None, + allow_keystore_export: true, + store_passwords_in_secrets_dir: false, + } + } + + /// Checks that the key cache exists and can be decrypted with the current + /// set of known validators. + #[allow(clippy::await_holding_lock)] // This is a test, so it should be fine. + pub async fn ensure_key_cache_consistency(&self) { + assert!( + self.validator_dir.as_ref().join(CACHE_FILENAME).exists(), + "the key cache should exist" + ); + let key_cache = + KeyCache::open_or_create(self.validator_dir.as_ref()).expect("should open a key cache"); + + self.initialized_validators + .read() + .decrypt_key_cache(key_cache, &mut <_>::default(), OnDecryptFailure::Error) + .await + .expect("key cache should decypt"); + } + + pub fn invalid_token_client(&self) -> ValidatorClientHttpClient { + let tmp = tempdir().unwrap(); + let api_secret = ApiSecret::create_or_open(tmp.path()).unwrap(); + let invalid_pubkey = api_secret.api_token(); + ValidatorClientHttpClient::new(self.url.clone(), invalid_pubkey).unwrap() + } + + pub async fn test_with_invalid_auth(self, func: F) -> Self + where + F: Fn(ValidatorClientHttpClient) -> A, + A: Future>, + { + /* + * Test with an invalid Authorization header. + */ + match func(self.invalid_token_client()).await { + Err(ApiError::ServerMessage(ApiErrorMessage { code: 403, .. })) => (), + Err(other) => panic!("expected authorized error, got {:?}", other), + Ok(_) => panic!("expected authorized error, got Ok"), + } + + /* + * Test with a missing Authorization header. + */ + let mut missing_token_client = self.client.clone(); + missing_token_client.send_authorization_header(false); + match func(missing_token_client).await { + Err(ApiError::ServerMessage(ApiErrorMessage { + code: 401, message, .. + })) if message.contains("missing Authorization header") => (), + Err(other) => panic!("expected missing header error, got {:?}", other), + Ok(_) => panic!("expected missing header error, got Ok"), + } + + self + } + + pub fn invalidate_api_token(mut self) -> Self { + self.client = self.invalid_token_client(); + self + } + + pub async fn test_get_lighthouse_version_invalid(self) -> Self { + self.client.get_lighthouse_version().await.unwrap_err(); + self + } + + pub async fn test_get_lighthouse_spec(self) -> Self { + let result = self + .client + .get_lighthouse_spec::() + .await + .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .unwrap(); + let expected = ConfigAndPreset::from_chain_spec::(&E::default_spec(), None); + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_lighthouse_version(self) -> Self { + let result = self.client.get_lighthouse_version().await.unwrap().data; + + let expected = VersionData { + version: lighthouse_version::version_with_platform(), + }; + + assert_eq!(result, expected); + + self + } + + #[cfg(target_os = "linux")] + pub async fn test_get_lighthouse_health(self) -> Self { + self.client.get_lighthouse_health().await.unwrap(); + + self + } + + #[cfg(not(target_os = "linux"))] + pub async fn test_get_lighthouse_health(self) -> Self { + self.client.get_lighthouse_health().await.unwrap_err(); + + self + } + pub fn vals_total(&self) -> usize { + self.initialized_validators.read().num_total() + } + + pub fn vals_enabled(&self) -> usize { + self.initialized_validators.read().num_enabled() + } + + pub fn assert_enabled_validators_count(self, count: usize) -> Self { + assert_eq!(self.vals_enabled(), count); + self + } + + pub fn assert_validators_count(self, count: usize) -> Self { + assert_eq!(self.vals_total(), count); + self + } + + pub async fn create_hd_validators(self, s: HdValidatorScenario) -> Self { + let initial_vals = self.vals_total(); + let initial_enabled_vals = self.vals_enabled(); + + let validators = (0..s.count) + .map(|i| ValidatorRequest { + enable: !s.disabled.contains(&i), + description: format!("boi #{}", i), + graffiti: None, + suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, + deposit_gwei: E::default_spec().max_effective_balance, + }) + .collect::>(); + + let (response, mnemonic) = if s.specify_mnemonic { + let mnemonic = ZeroizeString::from(random_mnemonic().phrase().to_string()); + let request = CreateValidatorsMnemonicRequest { + mnemonic: mnemonic.clone(), + key_derivation_path_offset: s.key_derivation_path_offset, + validators: validators.clone(), + }; + let response = self + .client + .post_lighthouse_validators_mnemonic(&request) + .await + .unwrap() + .data; + + (response, mnemonic) + } else { + assert_eq!( + s.key_derivation_path_offset, 0, + "cannot use a derivation offset without specifying a mnemonic" + ); + let response = self + .client + .post_lighthouse_validators(validators.clone()) + .await + .unwrap() + .data; + (response.validators.clone(), response.mnemonic) + }; + + assert_eq!(response.len(), s.count); + assert_eq!(self.vals_total(), initial_vals + s.count); + assert_eq!( + self.vals_enabled(), + initial_enabled_vals + s.count - s.disabled.len() + ); + + let server_vals = self.client.get_lighthouse_validators().await.unwrap().data; + + assert_eq!(server_vals.len(), self.vals_total()); + + // Ensure the server lists all of these newly created validators. + for validator in &response { + assert!(server_vals + .iter() + .any(|server_val| server_val.voting_pubkey == validator.voting_pubkey)); + } + + /* + * Verify that we can regenerate all the keys from the mnemonic. + */ + + let mnemonic = mnemonic_from_phrase(mnemonic.as_str()).unwrap(); + let mut wallet = WalletBuilder::from_mnemonic(&mnemonic, PASSWORD_BYTES, "".to_string()) + .unwrap() + .build() + .unwrap(); + + wallet + .set_nextaccount(s.key_derivation_path_offset) + .unwrap(); + + for item in response.iter().take(s.count) { + let keypairs = wallet + .next_validator(PASSWORD_BYTES, PASSWORD_BYTES, PASSWORD_BYTES) + .unwrap(); + let voting_keypair = keypairs.voting.decrypt_keypair(PASSWORD_BYTES).unwrap(); + + assert_eq!( + item.voting_pubkey, + voting_keypair.pk.clone().into(), + "the locally generated voting pk should match the server response" + ); + + let withdrawal_keypair = keypairs.withdrawal.decrypt_keypair(PASSWORD_BYTES).unwrap(); + + let deposit_bytes = serde_utils::hex::decode(&item.eth1_deposit_tx_data).unwrap(); + + let (deposit_data, _) = + decode_eth1_tx_data(&deposit_bytes, E::default_spec().max_effective_balance) + .unwrap(); + + assert_eq!( + deposit_data.pubkey, + voting_keypair.pk.clone().into(), + "the locally generated voting pk should match the deposit data" + ); + + assert_eq!( + deposit_data.withdrawal_credentials, + Hash256::from_slice(&bls::get_withdrawal_credentials( + &withdrawal_keypair.pk, + E::default_spec().bls_withdrawal_prefix_byte + )), + "the locally generated withdrawal creds should match the deposit data" + ); + + assert_eq!( + deposit_data.signature, + deposit_data.create_signature(&voting_keypair.sk, &E::default_spec()), + "the locally-generated deposit sig should create the same deposit sig" + ); + } + + self + } + + pub async fn create_keystore_validators(self, s: KeystoreValidatorScenario) -> Self { + let initial_vals = self.vals_total(); + let initial_enabled_vals = self.vals_enabled(); + + let password = random_password(); + let keypair = Keypair::random(); + let keystore = KeystoreBuilder::new(&keypair, password.as_bytes(), String::new()) + .unwrap() + .build() + .unwrap(); + + if !s.correct_password { + let request = KeystoreValidatorsPostRequest { + enable: s.enabled, + password: String::from_utf8(random_password().as_ref().to_vec()) + .unwrap() + .into(), + keystore, + graffiti: None, + suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, + }; + + self.client + .post_lighthouse_validators_keystore(&request) + .await + .unwrap_err(); + + return self; + } + + let request = KeystoreValidatorsPostRequest { + enable: s.enabled, + password: String::from_utf8(password.as_ref().to_vec()) + .unwrap() + .into(), + keystore, + graffiti: None, + suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, + }; + + let response = self + .client + .post_lighthouse_validators_keystore(&request) + .await + .unwrap() + .data; + + let num_enabled = s.enabled as usize; + + assert_eq!(self.vals_total(), initial_vals + 1); + assert_eq!(self.vals_enabled(), initial_enabled_vals + num_enabled); + + let server_vals = self.client.get_lighthouse_validators().await.unwrap().data; + + assert_eq!(server_vals.len(), self.vals_total()); + + assert_eq!(response.voting_pubkey, keypair.pk.into()); + assert_eq!(response.enabled, s.enabled); + + self + } + + pub async fn create_web3signer_validators(self, s: Web3SignerValidatorScenario) -> Self { + let initial_vals = self.vals_total(); + let initial_enabled_vals = self.vals_enabled(); + + let request: Vec<_> = (0..s.count) + .map(|i| { + let kp = Keypair::random(); + Web3SignerValidatorRequest { + enable: s.enabled, + description: format!("{}", i), + graffiti: None, + suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, + voting_public_key: kp.pk, + url: format!("http://signer_{}.com/", i), + root_certificate_path: None, + request_timeout_ms: None, + client_identity_path: None, + client_identity_password: None, + } + }) + .collect(); + + self.client + .post_lighthouse_validators_web3signer(&request) + .await + .unwrap(); + + assert_eq!(self.vals_total(), initial_vals + s.count); + if s.enabled { + assert_eq!(self.vals_enabled(), initial_enabled_vals + s.count); + } else { + assert_eq!(self.vals_enabled(), initial_enabled_vals); + }; + + self + } + + pub async fn set_validator_enabled(self, index: usize, enabled: bool) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators(&validator.voting_pubkey, Some(enabled), None, None, None) + .await + .unwrap(); + + assert_eq!( + self.initialized_validators + .read() + .is_enabled(&validator.voting_pubkey.decompress().unwrap()) + .unwrap(), + enabled + ); + + assert!(self + .client + .get_lighthouse_validators() + .await + .unwrap() + .data + .into_iter() + .find(|v| v.voting_pubkey == validator.voting_pubkey) + .map(|v| v.enabled == enabled) + .unwrap()); + + // Check the server via an individual request. + assert_eq!( + self.client + .get_lighthouse_validators_pubkey(&validator.voting_pubkey) + .await + .unwrap() + .unwrap() + .data + .enabled, + enabled + ); + + self + } + + pub async fn set_gas_limit(self, index: usize, gas_limit: u64) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators( + &validator.voting_pubkey, + None, + Some(gas_limit), + None, + None, + ) + .await + .unwrap(); + + self + } + + pub async fn assert_gas_limit(self, index: usize, gas_limit: u64) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + assert_eq!( + self.validator_store.get_gas_limit(&validator.voting_pubkey), + gas_limit + ); + + self + } + + pub async fn set_builder_proposals(self, index: usize, builder_proposals: bool) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators( + &validator.voting_pubkey, + None, + None, + Some(builder_proposals), + None, + ) + .await + .unwrap(); + + self + } + + pub async fn assert_builder_proposals(self, index: usize, builder_proposals: bool) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + assert_eq!( + self.validator_store + .get_builder_proposals(&validator.voting_pubkey), + builder_proposals + ); + + self + } +} diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index dbb9d4d620c..3bff444703b 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -31,10 +31,8 @@ use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; -use task_executor::TaskExecutor; +use task_executor::test_utils::TestRuntime; use tempfile::{tempdir, TempDir}; -use tokio::runtime::Runtime; -use tokio::sync::oneshot; use types::graffiti::GraffitiString; const PASSWORD_BYTES: &[u8] = &[42, 50, 37]; @@ -48,23 +46,12 @@ struct ApiTester { validator_store: Arc>, url: SensitiveUrl, slot_clock: TestingSlotClock, - _server_shutdown: oneshot::Sender<()>, _validator_dir: TempDir, - _runtime_shutdown: exit_future::Signal, -} - -// Builds a runtime to be used in the testing configuration. -fn build_runtime() -> Arc { - Arc::new( - tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build() - .expect("Should be able to build a testing runtime"), - ) + _test_runtime: TestRuntime, } impl ApiTester { - pub async fn new(runtime: std::sync::Weak) -> Self { + pub async fn new() -> Self { let log = test_logger(); let validator_dir = tempdir().unwrap(); @@ -100,9 +87,7 @@ impl ApiTester { Duration::from_secs(1), ); - let (runtime_shutdown, exit) = exit_future::signal(); - let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let executor = TaskExecutor::new(runtime.clone(), exit, log.clone(), shutdown_tx); + let test_runtime = TestRuntime::default(); let validator_store = Arc::new(ValidatorStore::<_, E>::new( initialized_validators, @@ -112,7 +97,7 @@ impl ApiTester { Some(Arc::new(DoppelgangerService::new(log.clone()))), slot_clock.clone(), &config, - executor.clone(), + test_runtime.task_executor.clone(), log.clone(), )); @@ -123,9 +108,10 @@ impl ApiTester { let initialized_validators = validator_store.initialized_validators(); let context = Arc::new(Context { - task_executor: executor, + task_executor: test_runtime.task_executor.clone(), api_secret, validator_dir: Some(validator_dir.path().into()), + secrets_dir: Some(secrets_dir.path().into()), validator_store: Some(validator_store.clone()), graffiti_file: None, graffiti_flag: Some(Graffiti::default()), @@ -135,6 +121,8 @@ impl ApiTester { listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), listen_port: 0, allow_origin: None, + allow_keystore_export: true, + store_passwords_in_secrets_dir: false, }, sse_logging_components: None, log, @@ -142,12 +130,8 @@ impl ApiTester { _phantom: PhantomData, }); let ctx = context.clone(); - let (shutdown_tx, shutdown_rx) = oneshot::channel(); - let server_shutdown = async { - // It's not really interesting why this triggered, just that it happened. - let _ = shutdown_rx.await; - }; - let (listening_socket, server) = super::serve(ctx, server_shutdown).unwrap(); + let (listening_socket, server) = + super::serve(ctx, test_runtime.task_executor.exit()).unwrap(); tokio::spawn(async { server.await }); @@ -166,9 +150,8 @@ impl ApiTester { validator_store, url, slot_clock, - _server_shutdown: shutdown_tx, _validator_dir: validator_dir, - _runtime_shutdown: runtime_shutdown, + _test_runtime: test_runtime, } } @@ -676,387 +659,341 @@ struct Web3SignerValidatorScenario { enabled: bool, } -#[test] -fn invalid_pubkey() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .invalidate_api_token() - .test_get_lighthouse_version_invalid() - .await; - }); +#[tokio::test] +async fn invalid_pubkey() { + ApiTester::new() + .await + .invalidate_api_token() + .test_get_lighthouse_version_invalid() + .await; } -#[test] -fn routes_with_invalid_auth() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .test_with_invalid_auth(|client| async move { client.get_lighthouse_version().await }) - .await - .test_with_invalid_auth(|client| async move { client.get_lighthouse_health().await }) - .await - .test_with_invalid_auth(|client| async move { - client.get_lighthouse_spec::().await - }) - .await - .test_with_invalid_auth( - |client| async move { client.get_lighthouse_validators().await }, - ) - .await - .test_with_invalid_auth(|client| async move { - client - .get_lighthouse_validators_pubkey(&PublicKeyBytes::empty()) - .await - }) - .await - .test_with_invalid_auth(|client| async move { - client - .post_lighthouse_validators(vec![ValidatorRequest { - enable: <_>::default(), - description: <_>::default(), - graffiti: <_>::default(), - suggested_fee_recipient: <_>::default(), - gas_limit: <_>::default(), - builder_proposals: <_>::default(), - deposit_gwei: <_>::default(), - }]) - .await - }) - .await - .test_with_invalid_auth(|client| async move { - client - .post_lighthouse_validators_mnemonic(&CreateValidatorsMnemonicRequest { - mnemonic: String::default().into(), - key_derivation_path_offset: <_>::default(), - validators: <_>::default(), - }) - .await - }) - .await - .test_with_invalid_auth(|client| async move { - let password = random_password(); - let keypair = Keypair::random(); - let keystore = KeystoreBuilder::new(&keypair, password.as_bytes(), String::new()) - .unwrap() - .build() - .unwrap(); - client - .post_lighthouse_validators_keystore(&KeystoreValidatorsPostRequest { - password: String::default().into(), - enable: <_>::default(), - keystore, - graffiti: <_>::default(), - suggested_fee_recipient: <_>::default(), - gas_limit: <_>::default(), - builder_proposals: <_>::default(), - }) - .await - }) - .await - .test_with_invalid_auth(|client| async move { - client - .patch_lighthouse_validators( - &PublicKeyBytes::empty(), - Some(false), - None, - None, - None, - ) - .await - }) - .await - .test_with_invalid_auth(|client| async move { client.get_keystores().await }) - .await - .test_with_invalid_auth(|client| async move { - let password = random_password_string(); - let keypair = Keypair::random(); - let keystore = KeystoreBuilder::new(&keypair, password.as_ref(), String::new()) - .unwrap() - .build() - .map(KeystoreJsonStr) - .unwrap(); - client - .post_keystores(&ImportKeystoresRequest { - keystores: vec![keystore], - passwords: vec![password], - slashing_protection: None, - }) - .await - }) - .await - .test_with_invalid_auth(|client| async move { - let keypair = Keypair::random(); - client - .delete_keystores(&DeleteKeystoresRequest { - pubkeys: vec![keypair.pk.compress()], - }) - .await - }) - .await - }); +#[tokio::test] +async fn routes_with_invalid_auth() { + ApiTester::new() + .await + .test_with_invalid_auth(|client| async move { client.get_lighthouse_version().await }) + .await + .test_with_invalid_auth(|client| async move { client.get_lighthouse_health().await }) + .await + .test_with_invalid_auth(|client| async move { + client.get_lighthouse_spec::().await + }) + .await + .test_with_invalid_auth(|client| async move { client.get_lighthouse_validators().await }) + .await + .test_with_invalid_auth(|client| async move { + client + .get_lighthouse_validators_pubkey(&PublicKeyBytes::empty()) + .await + }) + .await + .test_with_invalid_auth(|client| async move { + client + .post_lighthouse_validators(vec![ValidatorRequest { + enable: <_>::default(), + description: <_>::default(), + graffiti: <_>::default(), + suggested_fee_recipient: <_>::default(), + gas_limit: <_>::default(), + builder_proposals: <_>::default(), + deposit_gwei: <_>::default(), + }]) + .await + }) + .await + .test_with_invalid_auth(|client| async move { + client + .post_lighthouse_validators_mnemonic(&CreateValidatorsMnemonicRequest { + mnemonic: String::default().into(), + key_derivation_path_offset: <_>::default(), + validators: <_>::default(), + }) + .await + }) + .await + .test_with_invalid_auth(|client| async move { + let password = random_password(); + let keypair = Keypair::random(); + let keystore = KeystoreBuilder::new(&keypair, password.as_bytes(), String::new()) + .unwrap() + .build() + .unwrap(); + client + .post_lighthouse_validators_keystore(&KeystoreValidatorsPostRequest { + password: String::default().into(), + enable: <_>::default(), + keystore, + graffiti: <_>::default(), + suggested_fee_recipient: <_>::default(), + gas_limit: <_>::default(), + builder_proposals: <_>::default(), + }) + .await + }) + .await + .test_with_invalid_auth(|client| async move { + client + .patch_lighthouse_validators( + &PublicKeyBytes::empty(), + Some(false), + None, + None, + None, + ) + .await + }) + .await + .test_with_invalid_auth(|client| async move { client.get_keystores().await }) + .await + .test_with_invalid_auth(|client| async move { + let password = random_password_string(); + let keypair = Keypair::random(); + let keystore = KeystoreBuilder::new(&keypair, password.as_ref(), String::new()) + .unwrap() + .build() + .map(KeystoreJsonStr) + .unwrap(); + client + .post_keystores(&ImportKeystoresRequest { + keystores: vec![keystore], + passwords: vec![password], + slashing_protection: None, + }) + .await + }) + .await + .test_with_invalid_auth(|client| async move { + let keypair = Keypair::random(); + client + .delete_keystores(&DeleteKeystoresRequest { + pubkeys: vec![keypair.pk.compress()], + }) + .await + }) + .await; } -#[test] -fn simple_getters() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .test_get_lighthouse_version() - .await - .test_get_lighthouse_health() - .await - .test_get_lighthouse_spec() - .await; - }); +#[tokio::test] +async fn simple_getters() { + ApiTester::new() + .await + .test_get_lighthouse_version() + .await + .test_get_lighthouse_health() + .await + .test_get_lighthouse_spec() + .await; } -#[test] -fn hd_validator_creation() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .assert_enabled_validators_count(0) - .assert_validators_count(0) - .create_hd_validators(HdValidatorScenario { - count: 2, - specify_mnemonic: true, - key_derivation_path_offset: 0, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2) - .create_hd_validators(HdValidatorScenario { - count: 1, - specify_mnemonic: false, - key_derivation_path_offset: 0, - disabled: vec![0], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(3) - .create_hd_validators(HdValidatorScenario { - count: 0, - specify_mnemonic: true, - key_derivation_path_offset: 4, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(3); - }); +#[tokio::test] +async fn hd_validator_creation() { + ApiTester::new() + .await + .assert_enabled_validators_count(0) + .assert_validators_count(0) + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: true, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .create_hd_validators(HdValidatorScenario { + count: 1, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![0], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(3) + .create_hd_validators(HdValidatorScenario { + count: 0, + specify_mnemonic: true, + key_derivation_path_offset: 4, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(3); } -#[test] -fn validator_exit() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .create_hd_validators(HdValidatorScenario { - count: 2, - specify_mnemonic: false, - key_derivation_path_offset: 0, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2) - .test_sign_voluntary_exits(0, None) - .await - .test_sign_voluntary_exits(0, Some(Epoch::new(256))) - .await; - }); +#[tokio::test] +async fn validator_exit() { + ApiTester::new() + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .test_sign_voluntary_exits(0, None) + .await + .test_sign_voluntary_exits(0, Some(Epoch::new(256))) + .await; } -#[test] -fn validator_enabling() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .create_hd_validators(HdValidatorScenario { - count: 2, - specify_mnemonic: false, - key_derivation_path_offset: 0, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2) - .set_validator_enabled(0, false) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(2) - .set_validator_enabled(0, true) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2); - }); +#[tokio::test] +async fn validator_enabling() { + ApiTester::new() + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2); } -#[test] -fn validator_gas_limit() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .create_hd_validators(HdValidatorScenario { - count: 2, - specify_mnemonic: false, - key_derivation_path_offset: 0, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2) - .set_gas_limit(0, 500) - .await - .assert_gas_limit(0, 500) - .await - // Update gas limit while validator is disabled. - .set_validator_enabled(0, false) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(2) - .set_gas_limit(0, 1000) - .await - .set_validator_enabled(0, true) - .await - .assert_enabled_validators_count(2) - .assert_gas_limit(0, 1000) - .await - }); +#[tokio::test] +async fn validator_gas_limit() { + ApiTester::new() + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_gas_limit(0, 500) + .await + .assert_gas_limit(0, 500) + .await + // Update gas limit while validator is disabled. + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_gas_limit(0, 1000) + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_gas_limit(0, 1000) + .await; } -#[test] -fn validator_builder_proposals() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .create_hd_validators(HdValidatorScenario { - count: 2, - specify_mnemonic: false, - key_derivation_path_offset: 0, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2) - .set_builder_proposals(0, true) - .await - // Test setting builder proposals while the validator is disabled - .set_validator_enabled(0, false) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(2) - .set_builder_proposals(0, false) - .await - .set_validator_enabled(0, true) - .await - .assert_enabled_validators_count(2) - .assert_builder_proposals(0, false) - .await - }); +#[tokio::test] +async fn validator_builder_proposals() { + ApiTester::new() + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_builder_proposals(0, true) + .await + // Test setting builder proposals while the validator is disabled + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_builder_proposals(0, false) + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_builder_proposals(0, false) + .await; } -#[test] -fn validator_graffiti() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .create_hd_validators(HdValidatorScenario { - count: 2, - specify_mnemonic: false, - key_derivation_path_offset: 0, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2) - .set_graffiti(0, "Mr F was here") - .await - .assert_graffiti(0, "Mr F was here") - .await - // Test setting graffiti while the validator is disabled - .set_validator_enabled(0, false) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(2) - .set_graffiti(0, "Mr F was here again") - .await - .set_validator_enabled(0, true) - .await - .assert_enabled_validators_count(2) - .assert_graffiti(0, "Mr F was here again") - .await - }); +#[tokio::test] +async fn validator_graffiti() { + ApiTester::new() + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_graffiti(0, "Mr F was here") + .await + .assert_graffiti(0, "Mr F was here") + .await + // Test setting graffiti while the validator is disabled + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_graffiti(0, "Mr F was here again") + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_graffiti(0, "Mr F was here again") + .await; } -#[test] -fn keystore_validator_creation() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .assert_enabled_validators_count(0) - .assert_validators_count(0) - .create_keystore_validators(KeystoreValidatorScenario { - correct_password: true, - enabled: true, - }) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(1) - .create_keystore_validators(KeystoreValidatorScenario { - correct_password: false, - enabled: true, - }) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(1) - .create_keystore_validators(KeystoreValidatorScenario { - correct_password: true, - enabled: false, - }) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(2); - }); +#[tokio::test] +async fn keystore_validator_creation() { + ApiTester::new() + .await + .assert_enabled_validators_count(0) + .assert_validators_count(0) + .create_keystore_validators(KeystoreValidatorScenario { + correct_password: true, + enabled: true, + }) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(1) + .create_keystore_validators(KeystoreValidatorScenario { + correct_password: false, + enabled: true, + }) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(1) + .create_keystore_validators(KeystoreValidatorScenario { + correct_password: true, + enabled: false, + }) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2); } -#[test] -fn web3signer_validator_creation() { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - ApiTester::new(weak_runtime) - .await - .assert_enabled_validators_count(0) - .assert_validators_count(0) - .create_web3signer_validators(Web3SignerValidatorScenario { - count: 1, - enabled: true, - }) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(1); - }); +#[tokio::test] +async fn web3signer_validator_creation() { + ApiTester::new() + .await + .assert_enabled_validators_count(0) + .assert_validators_count(0) + .create_web3signer_validators(Web3SignerValidatorScenario { + count: 1, + enabled: true, + }) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(1); } diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index 7120ee5f9fb..d60872e497b 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -12,6 +12,7 @@ use itertools::Itertools; use rand::{rngs::SmallRng, Rng, SeedableRng}; use slashing_protection::interchange::{Interchange, InterchangeMetadata}; use std::{collections::HashMap, path::Path}; +use tokio::runtime::Handle; use types::Address; fn new_keystore(password: ZeroizeString) -> Keystore { @@ -64,31 +65,23 @@ fn remotekey_validator_with_pubkey(pubkey: PublicKey) -> SingleImportRemotekeysR } } -fn run_test(f: F) +async fn run_test(f: F) where F: FnOnce(ApiTester) -> V, V: Future, { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - let tester = ApiTester::new(weak_runtime).await; - f(tester).await - }); + let tester = ApiTester::new().await; + f(tester).await } -fn run_dual_vc_test(f: F) +async fn run_dual_vc_test(f: F) where F: FnOnce(ApiTester, ApiTester) -> V, V: Future, { - let runtime = build_runtime(); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - let tester1 = ApiTester::new(weak_runtime.clone()).await; - let tester2 = ApiTester::new(weak_runtime).await; - f(tester1, tester2).await - }); + let tester1 = ApiTester::new().await; + let tester2 = ApiTester::new().await; + f(tester1, tester2).await } fn keystore_pubkey(keystore: &Keystore) -> PublicKeyBytes { @@ -199,8 +192,8 @@ fn check_remotekey_delete_response( } } -#[test] -fn get_auth_no_token() { +#[tokio::test] +async fn get_auth_no_token() { run_test(|mut tester| async move { let _ = &tester; tester.client.send_authorization_header(false); @@ -213,19 +206,21 @@ fn get_auth_no_token() { // The token should match the one that the client was originally initialised with. assert!(tester.client.api_token() == Some(&token)); }) + .await; } -#[test] -fn get_empty_keystores() { +#[tokio::test] +async fn get_empty_keystores() { run_test(|tester| async move { let _ = &tester; let res = tester.client.get_keystores().await.unwrap(); assert_eq!(res, ListKeystoresResponse { data: vec![] }); }) + .await; } -#[test] -fn import_new_keystores() { +#[tokio::test] +async fn import_new_keystores() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -250,10 +245,11 @@ fn import_new_keystores() { let get_res = tester.client.get_keystores().await.unwrap(); check_keystore_get_response(&get_res, &keystores); }) + .await; } -#[test] -fn import_only_duplicate_keystores() { +#[tokio::test] +async fn import_only_duplicate_keystores() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -279,10 +275,11 @@ fn import_only_duplicate_keystores() { let get_res = tester.client.get_keystores().await.unwrap(); check_keystore_get_response(&get_res, &keystores); }) + .await; } -#[test] -fn import_some_duplicate_keystores() { +#[tokio::test] +async fn import_some_duplicate_keystores() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -330,10 +327,11 @@ fn import_some_duplicate_keystores() { let import_res = tester.client.post_keystores(&req2).await.unwrap(); check_keystore_import_response(&import_res, expected); }) + .await; } -#[test] -fn import_wrong_number_of_passwords() { +#[tokio::test] +async fn import_wrong_number_of_passwords() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -352,10 +350,11 @@ fn import_wrong_number_of_passwords() { .unwrap_err(); assert_eq!(err.status().unwrap(), 400); }) + .await; } -#[test] -fn get_web3_signer_keystores() { +#[tokio::test] +async fn get_web3_signer_keystores() { run_test(|tester| async move { let _ = &tester; let num_local = 3; @@ -412,10 +411,11 @@ fn get_web3_signer_keystores() { assert!(get_res.data.contains(&response), "{:?}", response); } }) + .await; } -#[test] -fn import_and_delete_conflicting_web3_signer_keystores() { +#[tokio::test] +async fn import_and_delete_conflicting_web3_signer_keystores() { run_test(|tester| async move { let _ = &tester; let num_keystores = 3; @@ -477,10 +477,11 @@ fn import_and_delete_conflicting_web3_signer_keystores() { let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); check_keystore_delete_response(&delete_res, all_delete_error(keystores.len())); }) + .await; } -#[test] -fn import_keystores_wrong_password() { +#[tokio::test] +async fn import_keystores_wrong_password() { run_test(|tester| async move { let _ = &tester; let num_keystores = 4; @@ -551,11 +552,12 @@ fn import_keystores_wrong_password() { &import_res, (0..num_keystores).map(|_| ImportKeystoreStatus::Duplicate), ); - }); + }) + .await; } -#[test] -fn import_invalid_slashing_protection() { +#[tokio::test] +async fn import_invalid_slashing_protection() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -589,10 +591,11 @@ fn import_invalid_slashing_protection() { let get_res = tester.client.get_keystores().await.unwrap(); check_keystore_get_response(&get_res, &[]); }) + .await; } -#[test] -fn check_get_set_fee_recipient() { +#[tokio::test] +async fn check_get_set_fee_recipient() { run_test(|tester: ApiTester| async move { let _ = &tester; let password = random_password_string(); @@ -768,10 +771,11 @@ fn check_get_set_fee_recipient() { ); } }) + .await; } -#[test] -fn check_get_set_gas_limit() { +#[tokio::test] +async fn check_get_set_gas_limit() { run_test(|tester: ApiTester| async move { let _ = &tester; let password = random_password_string(); @@ -943,14 +947,15 @@ fn check_get_set_gas_limit() { ); } }) + .await } fn all_indices(count: usize) -> Vec { (0..count).collect() } -#[test] -fn migrate_all_with_slashing_protection() { +#[tokio::test] +async fn migrate_all_with_slashing_protection() { let n = 3; generic_migration_test( n, @@ -967,11 +972,12 @@ fn migrate_all_with_slashing_protection() { (1, make_attestation(2, 3), false), (2, make_attestation(1, 2), false), ], - ); + ) + .await; } -#[test] -fn migrate_some_with_slashing_protection() { +#[tokio::test] +async fn migrate_some_with_slashing_protection() { let n = 3; generic_migration_test( n, @@ -989,11 +995,12 @@ fn migrate_some_with_slashing_protection() { (0, make_attestation(2, 3), true), (1, make_attestation(3, 4), true), ], - ); + ) + .await; } -#[test] -fn migrate_some_missing_slashing_protection() { +#[tokio::test] +async fn migrate_some_missing_slashing_protection() { let n = 3; generic_migration_test( n, @@ -1010,11 +1017,12 @@ fn migrate_some_missing_slashing_protection() { (1, make_attestation(2, 3), true), (0, make_attestation(2, 3), true), ], - ); + ) + .await; } -#[test] -fn migrate_some_extra_slashing_protection() { +#[tokio::test] +async fn migrate_some_extra_slashing_protection() { let n = 3; generic_migration_test( n, @@ -1033,7 +1041,8 @@ fn migrate_some_extra_slashing_protection() { (1, make_attestation(3, 4), true), (2, make_attestation(2, 3), false), ], - ); + ) + .await; } /// Run a test that creates some validators on one VC, and then migrates them to a second VC. @@ -1051,7 +1060,7 @@ fn migrate_some_extra_slashing_protection() { /// - `import_indices`: validators to transfer. It needn't be a subset of `delete_indices`. /// - `second_vc_attestations`: attestations to sign on the second VC after the transfer. The bool /// indicates whether the signing should be successful. -fn generic_migration_test( +async fn generic_migration_test( num_validators: usize, first_vc_attestations: Vec<(usize, Attestation)>, delete_indices: Vec, @@ -1169,11 +1178,12 @@ fn generic_migration_test( Err(e) => assert!(!should_succeed, "{:?}", e), } } - }); + }) + .await } -#[test] -fn delete_keystores_twice() { +#[tokio::test] +async fn delete_keystores_twice() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -1201,10 +1211,11 @@ fn delete_keystores_twice() { let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); check_keystore_delete_response(&delete_res, all_not_active(keystores.len())); }) + .await } -#[test] -fn delete_nonexistent_keystores() { +#[tokio::test] +async fn delete_nonexistent_keystores() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -1219,6 +1230,7 @@ fn delete_nonexistent_keystores() { let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); check_keystore_delete_response(&delete_res, all_not_found(keystores.len())); }) + .await } fn make_attestation(source_epoch: u64, target_epoch: u64) -> Attestation { @@ -1242,9 +1254,9 @@ fn make_attestation(source_epoch: u64, target_epoch: u64) -> Attestation { } } -#[test] -fn delete_concurrent_with_signing() { - let runtime = build_runtime(); +#[tokio::test] +async fn delete_concurrent_with_signing() { + let handle = Handle::try_current().unwrap(); let num_keys = 8; let num_signing_threads = 8; let num_attestations = 100; @@ -1257,115 +1269,112 @@ fn delete_concurrent_with_signing() { "num_keys should be divisible by num threads for simplicity" ); - let weak_runtime = Arc::downgrade(&runtime); - runtime.block_on(async { - let tester = ApiTester::new(weak_runtime).await; + let tester = ApiTester::new().await; - // Generate a lot of keys and import them. - let password = random_password_string(); - let keystores = (0..num_keys) - .map(|_| new_keystore(password.clone())) - .collect::>(); - let all_pubkeys = keystores.iter().map(keystore_pubkey).collect::>(); - - let import_res = tester - .client - .post_keystores(&ImportKeystoresRequest { - keystores: keystores.clone(), - passwords: vec![password.clone(); keystores.len()], - slashing_protection: None, - }) - .await - .unwrap(); - check_keystore_import_response(&import_res, all_imported(keystores.len())); + // Generate a lot of keys and import them. + let password = random_password_string(); + let keystores = (0..num_keys) + .map(|_| new_keystore(password.clone())) + .collect::>(); + let all_pubkeys = keystores.iter().map(keystore_pubkey).collect::>(); - // Start several threads signing attestations at sequential epochs. - let mut join_handles = vec![]; - - for thread_index in 0..num_signing_threads { - let keys_per_thread = num_keys / num_signing_threads; - let validator_store = tester.validator_store.clone(); - let thread_pubkeys = all_pubkeys - [thread_index * keys_per_thread..(thread_index + 1) * keys_per_thread] - .to_vec(); - - let handle = runtime.spawn(async move { - for j in 0..num_attestations { - let mut att = make_attestation(j, j + 1); - for (_validator_id, public_key) in thread_pubkeys.iter().enumerate() { - let _ = validator_store - .sign_attestation(*public_key, 0, &mut att, Epoch::new(j + 1)) - .await; - } + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + check_keystore_import_response(&import_res, all_imported(keystores.len())); + + // Start several threads signing attestations at sequential epochs. + let mut join_handles = vec![]; + + for thread_index in 0..num_signing_threads { + let keys_per_thread = num_keys / num_signing_threads; + let validator_store = tester.validator_store.clone(); + let thread_pubkeys = all_pubkeys + [thread_index * keys_per_thread..(thread_index + 1) * keys_per_thread] + .to_vec(); + + let handle = handle.spawn(async move { + for j in 0..num_attestations { + let mut att = make_attestation(j, j + 1); + for (_validator_id, public_key) in thread_pubkeys.iter().enumerate() { + let _ = validator_store + .sign_attestation(*public_key, 0, &mut att, Epoch::new(j + 1)) + .await; } - }); - join_handles.push(handle); - } + } + }); + join_handles.push(handle); + } + + // Concurrently, delete each validator one at a time. Store the slashing protection + // data so we can ensure it doesn't change after a key is exported. + let mut delete_handles = vec![]; + for _ in 0..num_delete_threads { + let client = tester.client.clone(); + let all_pubkeys = all_pubkeys.clone(); + + let handle = handle.spawn(async move { + let mut rng = SmallRng::from_entropy(); - // Concurrently, delete each validator one at a time. Store the slashing protection - // data so we can ensure it doesn't change after a key is exported. - let mut delete_handles = vec![]; - for _ in 0..num_delete_threads { - let client = tester.client.clone(); - let all_pubkeys = all_pubkeys.clone(); - - let handle = runtime.spawn(async move { - let mut rng = SmallRng::from_entropy(); - - let mut slashing_protection = vec![]; - for _ in 0..num_delete_attempts { - let to_delete = all_pubkeys - .iter() - .filter(|_| rng.gen_bool(delete_prob)) - .copied() - .collect::>(); - - if !to_delete.is_empty() { - let delete_res = client - .delete_keystores(&DeleteKeystoresRequest { pubkeys: to_delete }) - .await - .unwrap(); - - for status in delete_res.data.iter() { - assert_ne!(status.status, DeleteKeystoreStatus::Error); - } - - slashing_protection.push(delete_res.slashing_protection); + let mut slashing_protection = vec![]; + for _ in 0..num_delete_attempts { + let to_delete = all_pubkeys + .iter() + .filter(|_| rng.gen_bool(delete_prob)) + .copied() + .collect::>(); + + if !to_delete.is_empty() { + let delete_res = client + .delete_keystores(&DeleteKeystoresRequest { pubkeys: to_delete }) + .await + .unwrap(); + + for status in delete_res.data.iter() { + assert_ne!(status.status, DeleteKeystoreStatus::Error); } + + slashing_protection.push(delete_res.slashing_protection); } - slashing_protection - }); + } + slashing_protection + }); - delete_handles.push(handle); - } + delete_handles.push(handle); + } - // Collect slashing protection. - let mut slashing_protection_map = HashMap::new(); - let collected_slashing_protection = futures::future::join_all(delete_handles).await; - - for interchange in collected_slashing_protection - .into_iter() - .flat_map(Result::unwrap) - { - for validator_data in interchange.data { - slashing_protection_map - .entry(validator_data.pubkey) - .and_modify(|existing| { - assert_eq!( - *existing, validator_data, - "slashing protection data changed after first export" - ) - }) - .or_insert(validator_data); - } + // Collect slashing protection. + let mut slashing_protection_map = HashMap::new(); + let collected_slashing_protection = futures::future::join_all(delete_handles).await; + + for interchange in collected_slashing_protection + .into_iter() + .flat_map(Result::unwrap) + { + for validator_data in interchange.data { + slashing_protection_map + .entry(validator_data.pubkey) + .and_modify(|existing| { + assert_eq!( + *existing, validator_data, + "slashing protection data changed after first export" + ) + }) + .or_insert(validator_data); } + } - futures::future::join_all(join_handles).await - }); + futures::future::join_all(join_handles).await; } -#[test] -fn delete_then_reimport() { +#[tokio::test] +async fn delete_then_reimport() { run_test(|tester| async move { let _ = &tester; let password = random_password_string(); @@ -1396,19 +1405,21 @@ fn delete_then_reimport() { let import_res = tester.client.post_keystores(&import_req).await.unwrap(); check_keystore_import_response(&import_res, all_imported(keystores.len())); }) + .await } -#[test] -fn get_empty_remotekeys() { +#[tokio::test] +async fn get_empty_remotekeys() { run_test(|tester| async move { let _ = &tester; let res = tester.client.get_remotekeys().await.unwrap(); assert_eq!(res, ListRemotekeysResponse { data: vec![] }); }) + .await } -#[test] -fn import_new_remotekeys() { +#[tokio::test] +async fn import_new_remotekeys() { run_test(|tester| async move { let _ = &tester; @@ -1443,10 +1454,11 @@ fn import_new_remotekeys() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } -#[test] -fn import_same_remotekey_different_url() { +#[tokio::test] +async fn import_same_remotekey_different_url() { run_test(|tester| async move { let _ = &tester; @@ -1485,10 +1497,11 @@ fn import_same_remotekey_different_url() { }], ); }) + .await } -#[test] -fn delete_remotekey_then_reimport_different_url() { +#[tokio::test] +async fn delete_remotekey_then_reimport_different_url() { run_test(|tester| async move { let _ = &tester; @@ -1534,10 +1547,11 @@ fn delete_remotekey_then_reimport_different_url() { vec![ImportRemotekeyStatus::Imported].into_iter(), ); }) + .await } -#[test] -fn import_only_duplicate_remotekeys() { +#[tokio::test] +async fn import_only_duplicate_remotekeys() { run_test(|tester| async move { let _ = &tester; let remotekeys = (0..3) @@ -1582,10 +1596,11 @@ fn import_only_duplicate_remotekeys() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } -#[test] -fn import_some_duplicate_remotekeys() { +#[tokio::test] +async fn import_some_duplicate_remotekeys() { run_test(|tester| async move { let _ = &tester; let num_remotekeys = 5; @@ -1649,10 +1664,11 @@ fn import_some_duplicate_remotekeys() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } -#[test] -fn import_remote_and_local_keys() { +#[tokio::test] +async fn import_remote_and_local_keys() { run_test(|tester| async move { let _ = &tester; let num_local = 3; @@ -1714,10 +1730,11 @@ fn import_remote_and_local_keys() { assert!(get_res.data.contains(&response), "{:?}", response); } }) + .await } -#[test] -fn import_same_local_and_remote_keys() { +#[tokio::test] +async fn import_same_local_and_remote_keys() { run_test(|tester| async move { let _ = &tester; let num_local = 3; @@ -1782,9 +1799,10 @@ fn import_same_local_and_remote_keys() { assert!(get_res.data.contains(&response), "{:?}", response); } }) + .await } -#[test] -fn import_same_remote_and_local_keys() { +#[tokio::test] +async fn import_same_remote_and_local_keys() { run_test(|tester| async move { let _ = &tester; let num_local = 3; @@ -1847,10 +1865,11 @@ fn import_same_remote_and_local_keys() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } -#[test] -fn delete_remotekeys_twice() { +#[tokio::test] +async fn delete_remotekeys_twice() { run_test(|tester| async move { let _ = &tester; @@ -1893,10 +1912,11 @@ fn delete_remotekeys_twice() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, Vec::new()); }) + .await } -#[test] -fn delete_nonexistent_remotekey() { +#[tokio::test] +async fn delete_nonexistent_remotekey() { run_test(|tester| async move { let _ = &tester; @@ -1919,10 +1939,11 @@ fn delete_nonexistent_remotekey() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, Vec::new()); }) + .await } -#[test] -fn delete_then_reimport_remotekeys() { +#[tokio::test] +async fn delete_then_reimport_remotekeys() { run_test(|tester| async move { let _ = &tester; @@ -1984,10 +2005,11 @@ fn delete_then_reimport_remotekeys() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } -#[test] -fn import_remotekey_web3signer() { +#[tokio::test] +async fn import_remotekey_web3signer() { run_test(|tester| async move { let _ = &tester; @@ -2043,10 +2065,11 @@ fn import_remotekey_web3signer() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } -#[test] -fn import_remotekey_web3signer_disabled() { +#[tokio::test] +async fn import_remotekey_web3signer_disabled() { run_test(|tester| async move { let _ = &tester; @@ -2096,10 +2119,11 @@ fn import_remotekey_web3signer_disabled() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } -#[test] -fn import_remotekey_web3signer_enabled() { +#[tokio::test] +async fn import_remotekey_web3signer_enabled() { run_test(|tester| async move { let _ = &tester; @@ -2156,4 +2180,5 @@ fn import_remotekey_web3signer_enabled() { let get_res = tester.client.get_remotekeys().await.unwrap(); check_remotekey_get_response(&get_res, expected_responses); }) + .await } diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 090acbe969b..f15ea27c9b2 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -8,7 +8,7 @@ use crate::signing_method::SigningMethod; use account_utils::{ - read_password, read_password_from_user, + read_password, read_password_from_user, read_password_string, validator_definitions::{ self, SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, CONFIG_FILENAME, @@ -44,6 +44,19 @@ const DEFAULT_REMOTE_SIGNER_REQUEST_TIMEOUT: Duration = Duration::from_secs(12); // Use TTY instead of stdin to capture passwords from users. const USE_STDIN: bool = false; +pub enum OnDecryptFailure { + /// If the key cache fails to decrypt, create a new cache. + CreateNew, + /// Return an error if the key cache fails to decrypt. This should only be + /// used in testing. + Error, +} + +pub struct KeystoreAndPassword { + pub keystore: Keystore, + pub password: Option, +} + #[derive(Debug)] pub enum Error { /// Refused to open a validator with an existing lockfile since that validator may be in-use by @@ -98,6 +111,11 @@ pub enum Error { UnableToBuildWeb3SignerClient(ReqwestError), /// Unable to apply an action to a validator. InvalidActionOnValidator, + UnableToReadValidatorPassword(String), + UnableToReadKeystoreFile(eth2_keystore::Error), + UnableToSaveKeyCache(key_cache::Error), + UnableToDecryptKeyCache(key_cache::Error), + UnableToDeletePasswordFile(PathBuf, io::Error), } impl From for Error { @@ -539,33 +557,78 @@ impl InitializedValidators { &mut self, pubkey: &PublicKey, is_local_keystore: bool, - ) -> Result<(), Error> { + ) -> Result, Error> { // 1. Disable the validator definition. // // We disable before removing so that in case of a crash the auto-discovery mechanism // won't re-activate the keystore. - if let Some(def) = self + let mut uuid_opt = None; + let mut password_path_opt = None; + let keystore_and_password = if let Some(def) = self .definitions .as_mut_slice() .iter_mut() .find(|def| &def.voting_public_key == pubkey) { - // Update definition for local keystore - if def.signing_definition.is_local_keystore() && is_local_keystore { - def.enabled = false; - self.definitions - .save(&self.validators_dir) - .map_err(Error::UnableToSaveDefinitions)?; - } else if !def.signing_definition.is_local_keystore() && !is_local_keystore { - def.enabled = false; - } else { - return Err(Error::InvalidActionOnValidator); + match &def.signing_definition { + SigningDefinition::LocalKeystore { + voting_keystore_path, + voting_keystore_password, + voting_keystore_password_path, + .. + } if is_local_keystore => { + let password = match (voting_keystore_password, voting_keystore_password_path) { + (Some(password), _) => Some(password.clone()), + (_, Some(path)) => { + password_path_opt = Some(path.clone()); + read_password_string(path) + .map(Option::Some) + .map_err(Error::UnableToReadValidatorPassword)? + } + (None, None) => None, + }; + let keystore = Keystore::from_json_file(voting_keystore_path) + .map_err(Error::UnableToReadKeystoreFile)?; + uuid_opt = Some(*keystore.uuid()); + + def.enabled = false; + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Some(KeystoreAndPassword { keystore, password }) + } + SigningDefinition::Web3Signer(_) if !is_local_keystore => { + def.enabled = false; + None + } + _ => return Err(Error::InvalidActionOnValidator), } } else { return Err(Error::ValidatorNotInitialized(pubkey.clone())); + }; + + // 2. Remove the validator from the key cache. This ensures the key + // cache is consistent next time the VC starts. + // + // It's not a big deal if this succeeds and something fails later in + // this function because the VC will self-heal from a corrupt key cache. + // + // Do this before modifying `self.validators` or deleting anything from + // the filesystem. + if let Some(uuid) = uuid_opt { + let key_cache = KeyCache::open_or_create(&self.validators_dir) + .map_err(Error::UnableToOpenKeyCache)?; + let mut decrypted_key_cache = self + .decrypt_key_cache(key_cache, &mut <_>::default(), OnDecryptFailure::CreateNew) + .await?; + decrypted_key_cache.remove(&uuid); + decrypted_key_cache + .save(&self.validators_dir) + .map_err(Error::UnableToSaveKeyCache)?; } - // 2. Delete from `self.validators`, which holds the signing method. + // 3. Delete from `self.validators`, which holds the signing method. // Delete the keystore files. if let Some(initialized_validator) = self.validators.remove(&pubkey.compress()) { if let SigningMethod::LocalKeystore { @@ -583,14 +646,28 @@ impl InitializedValidators { } } - // 3. Delete from validator definitions entirely. + // 4. Delete from validator definitions entirely. self.definitions .retain(|def| &def.voting_public_key != pubkey); self.definitions .save(&self.validators_dir) .map_err(Error::UnableToSaveDefinitions)?; - Ok(()) + // 5. Delete the keystore password if it's not being used by any definition. + if let Some(password_path) = password_path_opt.and_then(|p| p.canonicalize().ok()) { + if self + .definitions + .iter_voting_keystore_password_paths() + // Require canonicalized paths so we can do a true equality check. + .filter_map(|existing| existing.canonicalize().ok()) + .all(|existing| existing != password_path) + { + fs::remove_file(&password_path) + .map_err(|e| Error::UnableToDeletePasswordFile(password_path, e))?; + } + } + + Ok(keystore_and_password) } /// Attempt to delete the voting keystore file, or its entire validator directory. @@ -900,10 +977,11 @@ impl InitializedValidators { /// filesystem accesses for keystores that are already known. In the case that a keystore /// from the validator definitions is not yet in this map, it will be loaded from disk and /// inserted into the map. - async fn decrypt_key_cache( + pub async fn decrypt_key_cache( &self, mut cache: KeyCache, key_stores: &mut HashMap, + on_failure: OnDecryptFailure, ) -> Result { // Read relevant key stores from the filesystem. let mut definitions_map = HashMap::new(); @@ -971,11 +1049,13 @@ impl InitializedValidators { //decrypt tokio::task::spawn_blocking(move || match cache.decrypt(passwords, public_keys) { - Ok(_) | Err(key_cache::Error::AlreadyDecrypted) => cache, - _ => KeyCache::new(), + Ok(_) | Err(key_cache::Error::AlreadyDecrypted) => Ok(cache), + _ if matches!(on_failure, OnDecryptFailure::CreateNew) => Ok(KeyCache::new()), + Err(e) => Err(e), }) .await - .map_err(Error::TokioJoin) + .map_err(Error::TokioJoin)? + .map_err(Error::UnableToDecryptKeyCache) } /// Scans `self.definitions` and attempts to initialize and validators which are not already @@ -1013,7 +1093,8 @@ impl InitializedValidators { // Only decrypt cache when there is at least one local definition. // Decrypting cache is a very expensive operation which is never used for web3signer. let mut key_cache = if has_local_definitions { - self.decrypt_key_cache(cache, &mut key_stores).await? + self.decrypt_key_cache(cache, &mut key_stores, OnDecryptFailure::CreateNew) + .await? } else { // Assign an empty KeyCache if all definitions are of the Web3Signer type. KeyCache::new() @@ -1191,4 +1272,41 @@ impl InitializedValidators { val.index = Some(index); } } + + /// Deletes any passwords stored in the validator definitions file and + /// returns a map of pubkey to deleted password. + /// + /// This should only be used for testing, it's rather destructive. + pub fn delete_passwords_from_validator_definitions( + &mut self, + ) -> Result, Error> { + let mut passwords = HashMap::default(); + + for def in self.definitions.as_mut_slice() { + match &mut def.signing_definition { + SigningDefinition::LocalKeystore { + ref mut voting_keystore_password, + .. + } => { + if let Some(password) = voting_keystore_password.take() { + passwords.insert(def.voting_public_key.clone(), password); + } + } + // Remote signers don't have passwords. + SigningDefinition::Web3Signer { .. } => (), + }; + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(passwords) + } + + /// Prefer other methods in production. Arbitrarily modifying a validator + /// definition manually may result in inconsistencies. + pub fn as_mut_slice_testing_only(&mut self) -> &mut [ValidatorDefinition] { + self.definitions.as_mut_slice() + } } diff --git a/validator_client/src/key_cache.rs b/validator_client/src/key_cache.rs index b7abaaed069..c2dd7aa8fed 100644 --- a/validator_client/src/key_cache.rs +++ b/validator_client/src/key_cache.rs @@ -47,6 +47,12 @@ pub struct KeyCache { type SerializedKeyMap = HashMap; +impl Default for KeyCache { + fn default() -> Self { + Self::new() + } +} + impl KeyCache { pub fn new() -> Self { KeyCache { diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 60943a260c1..f7a80f0a8e7 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -172,9 +172,12 @@ impl ProductionValidatorClient { let new_validators = validator_defs .discover_local_keystores(&config.validator_dir, &config.secrets_dir, &log) .map_err(|e| format!("Unable to discover local validator keystores: {:?}", e))?; - validator_defs - .save(&config.validator_dir) - .map_err(|e| format!("Unable to update validator definitions: {:?}", e))?; + validator_defs.save(&config.validator_dir).map_err(|e| { + format!( + "Provide --suggested-fee-recipient or update validator definitions: {:?}", + e + ) + })?; info!( log, "Completed validator discovery"; @@ -573,6 +576,7 @@ impl ProductionValidatorClient { api_secret, validator_store: Some(self.validator_store.clone()), validator_dir: Some(self.config.validator_dir.clone()), + secrets_dir: Some(self.config.secrets_dir.clone()), graffiti_file: self.config.graffiti_file.clone(), graffiti_flag: self.config.graffiti, spec: self.context.eth2_config.spec.clone(), diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index 732ae68ff86..909e64a78a6 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -94,8 +94,7 @@ async fn notify( info!( log, "No validators present"; - "msg" => "see `lighthouse account validator create --help` \ - or the HTTP API documentation" + "msg" => "see `lighthouse vm create --help` or the HTTP API documentation" ) } else if total_validators == attesting_validators { info!( diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index 7d6e1744c83..2d2221680f9 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -1,5 +1,5 @@ use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; -use crate::validator_store::{DoppelgangerStatus, ValidatorStore}; +use crate::validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore}; use crate::OfflineOnFailure; use bls::PublicKeyBytes; use environment::RuntimeContext; @@ -442,8 +442,23 @@ impl PreparationService { .await { Ok(data) => data, + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!( + log, + "Missing pubkey for registration data"; + "pubkey" => ?pubkey, + ); + continue; + } Err(e) => { - error!(log, "Unable to sign validator registration data"; "error" => ?e, "pubkey" => ?pubkey); + error!( + log, + "Unable to sign validator registration data"; + "error" => ?e, + "pubkey" => ?pubkey + ); continue; } }; diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index cc20cedfc6c..e01bf09cf2f 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -1,5 +1,9 @@ use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; -use crate::{duties_service::DutiesService, validator_store::ValidatorStore, OfflineOnFailure}; +use crate::{ + duties_service::DutiesService, + validator_store::{Error as ValidatorStoreError, ValidatorStore}, + OfflineOnFailure, +}; use environment::RuntimeContext; use eth2::types::BlockId; use futures::future::join_all; @@ -264,6 +268,18 @@ impl SyncCommitteeService { .await { Ok(signature) => Some(signature), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!( + log, + "Missing pubkey for sync committee signature"; + "pubkey" => ?pubkey, + "validator_index" => duty.validator_index, + "slot" => slot, + ); + None + } Err(e) => { crit!( log, @@ -405,6 +421,17 @@ impl SyncCommitteeService { .await { Ok(signed_contribution) => Some(signed_contribution), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!( + log, + "Missing pubkey for sync contribution"; + "pubkey" => ?pubkey, + "slot" => slot, + ); + None + } Err(e) => { crit!( log, diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 73843579a2b..365f7f73474 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -5,7 +5,7 @@ use crate::{ signing_method::{Error as SigningError, SignableMessage, SigningContext, SigningMethod}, Config, }; -use account_utils::{validator_definitions::ValidatorDefinition, ZeroizeString}; +use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; use parking_lot::{Mutex, RwLock}; use slashing_protection::{ interchange::Interchange, InterchangeError, NotSafe, Safe, SlashingDatabase, @@ -170,7 +170,7 @@ impl ValidatorStore { pub async fn add_validator_keystore>( &self, voting_keystore_path: P, - password: ZeroizeString, + password_storage: PasswordStorage, enable: bool, graffiti: Option, suggested_fee_recipient: Option
, @@ -179,7 +179,7 @@ impl ValidatorStore { ) -> Result { let mut validator_def = ValidatorDefinition::new_keystore_with_password( voting_keystore_path, - Some(password), + password_storage, graffiti.map(Into::into), suggested_fee_recipient, gas_limit, diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml new file mode 100644 index 00000000000..851510820e8 --- /dev/null +++ b/validator_manager/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "validator_manager" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +bls = { path = "../crypto/bls" } +clap = "2.33.3" +types = { path = "../consensus/types" } +environment = { path = "../lighthouse/environment" } +eth2_network_config = { path = "../common/eth2_network_config" } +clap_utils = { path = "../common/clap_utils" } +eth2_wallet = { path = "../crypto/eth2_wallet" } +eth2_keystore = { path = "../crypto/eth2_keystore" } +account_utils = { path = "../common/account_utils" } +serde = { version = "1.0.116", features = ["derive"] } +serde_json = "1.0.58" +ethereum_serde_utils = "0.5.0" +tree_hash = "0.5.0" +eth2 = { path = "../common/eth2", features = ["lighthouse"]} +hex = "0.4.2" +tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } + +[dev-dependencies] +tempfile = "3.1.0" +regex = "1.6.0" +eth2_network_config = { path = "../common/eth2_network_config" } +validator_client = { path = "../validator_client" } diff --git a/validator_manager/src/common.rs b/validator_manager/src/common.rs new file mode 100644 index 00000000000..6a3f93a3f78 --- /dev/null +++ b/validator_manager/src/common.rs @@ -0,0 +1,361 @@ +use account_utils::{strip_off_newlines, ZeroizeString}; +use eth2::lighthouse_vc::std_types::{InterchangeJsonStr, KeystoreJsonStr}; +use eth2::{ + lighthouse_vc::{ + http_client::ValidatorClientHttpClient, + std_types::{ImportKeystoreStatus, ImportKeystoresRequest, SingleKeystoreResponse, Status}, + types::UpdateFeeRecipientRequest, + }, + SensitiveUrl, +}; +use serde::{Deserialize, Serialize}; +use std::fs; +use std::path::{Path, PathBuf}; +use tree_hash::TreeHash; +use types::*; + +pub const IGNORE_DUPLICATES_FLAG: &str = "ignore-duplicates"; +pub const STDIN_INPUTS_FLAG: &str = "stdin-inputs"; +pub const COUNT_FLAG: &str = "count"; + +/// When the `ethereum/staking-deposit-cli` tool generates deposit data JSON, it adds a +/// `deposit_cli_version` to protect the web-based "Launchpad" tool against a breaking change that +/// was introduced in `ethereum/staking-deposit-cli`. Lighthouse don't really have a version that it +/// can use here, so we choose a static string that is: +/// +/// 1. High enough that it's accepted by Launchpad. +/// 2. Weird enough to identify Lighthouse. +const LIGHTHOUSE_DEPOSIT_CLI_VERSION: &str = "20.18.20"; + +#[derive(Debug)] +pub enum UploadError { + InvalidPublicKey, + DuplicateValidator(PublicKeyBytes), + FailedToListKeys(eth2::Error), + KeyUploadFailed(eth2::Error), + IncorrectStatusCount(usize), + FeeRecipientUpdateFailed(eth2::Error), + PatchValidatorFailed(eth2::Error), +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct ValidatorSpecification { + pub voting_keystore: KeystoreJsonStr, + pub voting_keystore_password: ZeroizeString, + pub slashing_protection: Option, + pub fee_recipient: Option
, + pub gas_limit: Option, + pub builder_proposals: Option, + pub enabled: Option, +} + +impl ValidatorSpecification { + /// Upload the validator to a validator client via HTTP. + pub async fn upload( + self, + http_client: &ValidatorClientHttpClient, + ignore_duplicates: bool, + ) -> Result, UploadError> { + let ValidatorSpecification { + voting_keystore, + voting_keystore_password, + slashing_protection, + fee_recipient, + gas_limit, + builder_proposals, + enabled, + } = self; + + let voting_public_key = voting_keystore + .public_key() + .ok_or(UploadError::InvalidPublicKey)? + .into(); + + let request = ImportKeystoresRequest { + keystores: vec![voting_keystore], + passwords: vec![voting_keystore_password], + slashing_protection, + }; + + // Check to see if this validator already exists on the remote validator. + match http_client.get_keystores().await { + Ok(response) => { + if response + .data + .iter() + .any(|validator| validator.validating_pubkey == voting_public_key) + { + if ignore_duplicates { + eprintln!( + "Duplicate validators are ignored, ignoring {:?} which exists \ + on the destination validator client", + voting_public_key + ); + } else { + return Err(UploadError::DuplicateValidator(voting_public_key)); + } + } + } + Err(e) => { + return Err(UploadError::FailedToListKeys(e)); + } + }; + + let mut statuses = http_client + .post_keystores(&request) + .await + .map_err(UploadError::KeyUploadFailed)? + .data; + + let status = statuses.pop().ok_or(UploadError::IncorrectStatusCount(0))?; + if !statuses.is_empty() { + return Err(UploadError::IncorrectStatusCount(statuses.len() + 1)); + } + + // Exit early if there's an error uploading. + if status.status == ImportKeystoreStatus::Error { + return Ok(status); + } + + if let Some(fee_recipient) = fee_recipient { + http_client + .post_fee_recipient( + &voting_public_key, + &UpdateFeeRecipientRequest { + ethaddress: fee_recipient, + }, + ) + .await + .map_err(UploadError::FeeRecipientUpdateFailed)?; + } + + if gas_limit.is_some() || builder_proposals.is_some() || enabled.is_some() { + http_client + .patch_lighthouse_validators( + &voting_public_key, + enabled, + gas_limit, + builder_proposals, + None, // Grafitti field is not maintained between validator moves. + ) + .await + .map_err(UploadError::PatchValidatorFailed)?; + } + + Ok(status) + } +} + +#[derive(Serialize, Deserialize)] +pub struct CreateSpec { + pub mnemonic: String, + pub validator_client_url: Option, + pub validator_client_token_path: Option, + pub json_deposit_data_path: Option, + pub ignore_duplicates: bool, + pub validators: Vec, +} + +/// The structure generated by the `staking-deposit-cli` which has become a quasi-standard for +/// browser-based deposit submission tools (e.g., the Ethereum Launchpad and Lido). +/// +/// We assume this code as the canonical definition: +/// +/// https://github.com/ethereum/staking-deposit-cli/blob/76ed78224fdfe3daca788d12442b3d1a37978296/staking_deposit/credentials.py#L131-L144 +#[derive(Debug, PartialEq, Serialize, Deserialize)] +pub struct StandardDepositDataJson { + #[serde(with = "public_key_bytes_without_0x_prefix")] + pub pubkey: PublicKeyBytes, + #[serde(with = "hash256_without_0x_prefix")] + pub withdrawal_credentials: Hash256, + /// The `amount` field is *not* quoted (i.e., a string) like most other `u64` fields in the + /// consensus specs, it's a simple integer. + pub amount: u64, + #[serde(with = "signature_bytes_without_0x_prefix")] + pub signature: SignatureBytes, + #[serde(with = "bytes_4_without_0x_prefix")] + pub fork_version: [u8; 4], + pub network_name: String, + #[serde(with = "hash256_without_0x_prefix")] + pub deposit_message_root: Hash256, + #[serde(with = "hash256_without_0x_prefix")] + pub deposit_data_root: Hash256, + pub deposit_cli_version: String, +} + +impl StandardDepositDataJson { + pub fn new( + keypair: &Keypair, + withdrawal_credentials: Hash256, + amount: u64, + spec: &ChainSpec, + ) -> Result { + let deposit_data = { + let mut deposit_data = DepositData { + pubkey: keypair.pk.clone().into(), + withdrawal_credentials, + amount, + signature: SignatureBytes::empty(), + }; + deposit_data.signature = deposit_data.create_signature(&keypair.sk, spec); + deposit_data + }; + + let deposit_message_root = deposit_data.as_deposit_message().tree_hash_root(); + let deposit_data_root = deposit_data.tree_hash_root(); + + let DepositData { + pubkey, + withdrawal_credentials, + amount, + signature, + } = deposit_data; + + Ok(Self { + pubkey, + withdrawal_credentials, + amount, + signature, + fork_version: spec.genesis_fork_version, + network_name: spec + .config_name + .clone() + .ok_or("The network specification does not have a CONFIG_NAME set")?, + deposit_message_root, + deposit_data_root, + deposit_cli_version: LIGHTHOUSE_DEPOSIT_CLI_VERSION.to_string(), + }) + } +} + +macro_rules! without_0x_prefix { + ($mod_name: ident, $type: ty) => { + pub mod $mod_name { + use super::*; + use std::str::FromStr; + + struct Visitor; + + impl<'de> serde::de::Visitor<'de> for Visitor { + type Value = $type; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("ascii hex without a 0x prefix") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + <$type>::from_str(&format!("0x{}", v)).map_err(serde::de::Error::custom) + } + } + + /// Serialize with quotes. + pub fn serialize(value: &$type, serializer: S) -> Result + where + S: serde::Serializer, + { + let with_prefix = format!("{:?}", value); + let without_prefix = with_prefix + .strip_prefix("0x") + .ok_or_else(|| serde::ser::Error::custom("serialization is missing 0x"))?; + serializer.serialize_str(&without_prefix) + } + + /// Deserialize with quotes. + pub fn deserialize<'de, D>(deserializer: D) -> Result<$type, D::Error> + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_str(Visitor) + } + } + }; +} + +without_0x_prefix!(hash256_without_0x_prefix, Hash256); +without_0x_prefix!(signature_bytes_without_0x_prefix, SignatureBytes); +without_0x_prefix!(public_key_bytes_without_0x_prefix, PublicKeyBytes); + +mod bytes_4_without_0x_prefix { + use serde::de::Error; + + const BYTES_LEN: usize = 4; + + pub fn serialize(bytes: &[u8; BYTES_LEN], serializer: S) -> Result + where + S: serde::Serializer, + { + let hex_string = &hex::encode(bytes); + serializer.serialize_str(hex_string) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; BYTES_LEN], D::Error> + where + D: serde::Deserializer<'de>, + { + let decoded = deserializer.deserialize_str(serde_utils::hex::HexVisitor)?; + + if decoded.len() != BYTES_LEN { + return Err(D::Error::custom(format!( + "expected {} bytes for array, got {}", + BYTES_LEN, + decoded.len() + ))); + } + + let mut array = [0; BYTES_LEN]; + array.copy_from_slice(&decoded); + Ok(array) + } +} + +pub async fn vc_http_client>( + url: SensitiveUrl, + token_path: P, +) -> Result<(ValidatorClientHttpClient, Vec), String> { + let token_path = token_path.as_ref(); + let token_bytes = + fs::read(token_path).map_err(|e| format!("Failed to read {:?}: {:?}", token_path, e))?; + let token_string = String::from_utf8(strip_off_newlines(token_bytes)) + .map_err(|e| format!("Failed to parse {:?} as utf8: {:?}", token_path, e))?; + let http_client = ValidatorClientHttpClient::new(url.clone(), token_string).map_err(|e| { + format!( + "Could not instantiate HTTP client from URL and secret: {:?}", + e + ) + })?; + + // Perform a request to check that the connection works + let remote_keystores = http_client + .get_keystores() + .await + .map_err(|e| format!("Failed to list keystores on VC: {:?}", e))? + .data; + + eprintln!( + "Validator client is reachable at {} and reports {} validators", + url, + remote_keystores.len() + ); + + Ok((http_client, remote_keystores)) +} + +/// Write some object to a file as JSON. +/// +/// The file must be created new, it must not already exist. +pub fn write_to_json_file, S: Serialize>( + path: P, + contents: &S, +) -> Result<(), String> { + eprintln!("Writing {:?}", path.as_ref()); + let mut file = fs::OpenOptions::new() + .write(true) + .create_new(true) + .open(&path) + .map_err(|e| format!("Failed to open {:?}: {:?}", path.as_ref(), e))?; + serde_json::to_writer(&mut file, contents) + .map_err(|e| format!("Failed to write JSON to {:?}: {:?}", path.as_ref(), e)) +} diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs new file mode 100644 index 00000000000..8ea740ff5bb --- /dev/null +++ b/validator_manager/src/create_validators.rs @@ -0,0 +1,934 @@ +use super::common::*; +use crate::DumpConfig; +use account_utils::{random_password_string, read_mnemonic_from_cli, read_password_from_user}; +use clap::{App, Arg, ArgMatches}; +use eth2::{ + lighthouse_vc::std_types::KeystoreJsonStr, + types::{StateId, ValidatorId}, + BeaconNodeHttpClient, SensitiveUrl, Timeouts, +}; +use eth2_wallet::WalletBuilder; +use serde::{Deserialize, Serialize}; +use std::fs; +use std::path::PathBuf; +use std::time::Duration; +use types::*; + +pub const CMD: &str = "create"; +pub const OUTPUT_PATH_FLAG: &str = "output-path"; +pub const DEPOSIT_GWEI_FLAG: &str = "deposit-gwei"; +pub const DISABLE_DEPOSITS_FLAG: &str = "disable-deposits"; +pub const FIRST_INDEX_FLAG: &str = "first-index"; +pub const MNEMONIC_FLAG: &str = "mnemonic-path"; +pub const SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG: &str = "specify-voting-keystore-password"; +pub const ETH1_WITHDRAWAL_ADDRESS_FLAG: &str = "eth1-withdrawal-address"; +pub const GAS_LIMIT_FLAG: &str = "gas-limit"; +pub const FEE_RECIPIENT_FLAG: &str = "suggested-fee-recipient"; +pub const BUILDER_PROPOSALS_FLAG: &str = "builder-proposals"; +pub const BEACON_NODE_FLAG: &str = "beacon-node"; +pub const FORCE_BLS_WITHDRAWAL_CREDENTIALS: &str = "force-bls-withdrawal-credentials"; + +pub const VALIDATORS_FILENAME: &str = "validators.json"; +pub const DEPOSITS_FILENAME: &str = "deposits.json"; + +const BEACON_NODE_HTTP_TIMEOUT: Duration = Duration::from_secs(2); + +pub fn cli_app<'a, 'b>() -> App<'a, 'b> { + App::new(CMD) + .about( + "Creates new validators from BIP-39 mnemonic. A JSON file will be created which \ + contains all the validator keystores and other validator data. This file can then \ + be imported to a validator client using the \"import-validators\" command. \ + Another, optional JSON file is created which contains a list of validator \ + deposits in the same format as the \"ethereum/staking-deposit-cli\" tool.", + ) + .arg( + Arg::with_name(OUTPUT_PATH_FLAG) + .long(OUTPUT_PATH_FLAG) + .value_name("DIRECTORY") + .help( + "The path to a directory where the validator and (optionally) deposits \ + files will be created. The directory will be created if it does not exist.", + ) + .required(true) + .takes_value(true), + ) + .arg( + Arg::with_name(DEPOSIT_GWEI_FLAG) + .long(DEPOSIT_GWEI_FLAG) + .value_name("DEPOSIT_GWEI") + .help( + "The GWEI value of the deposit amount. Defaults to the minimum amount \ + required for an active validator (MAX_EFFECTIVE_BALANCE)", + ) + .conflicts_with(DISABLE_DEPOSITS_FLAG) + .takes_value(true), + ) + .arg( + Arg::with_name(FIRST_INDEX_FLAG) + .long(FIRST_INDEX_FLAG) + .value_name("FIRST_INDEX") + .help("The first of consecutive key indexes you wish to create.") + .takes_value(true) + .required(false) + .default_value("0"), + ) + .arg( + Arg::with_name(COUNT_FLAG) + .long(COUNT_FLAG) + .value_name("VALIDATOR_COUNT") + .help("The number of validators to create, regardless of how many already exist") + .conflicts_with("at-most") + .takes_value(true), + ) + .arg( + Arg::with_name(MNEMONIC_FLAG) + .long(MNEMONIC_FLAG) + .value_name("MNEMONIC_PATH") + .help("If present, the mnemonic will be read in from this file.") + .takes_value(true), + ) + .arg( + Arg::with_name(STDIN_INPUTS_FLAG) + .takes_value(false) + .hidden(cfg!(windows)) + .long(STDIN_INPUTS_FLAG) + .help("If present, read all user inputs from stdin instead of tty."), + ) + .arg( + Arg::with_name(DISABLE_DEPOSITS_FLAG) + .long(DISABLE_DEPOSITS_FLAG) + .help( + "When provided don't generate the deposits JSON file that is \ + commonly used for submitting validator deposits via a web UI. \ + Using this flag will save several seconds per validator if the \ + user has an alternate strategy for submitting deposits.", + ), + ) + .arg( + Arg::with_name(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG) + .long(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG) + .help( + "If present, the user will be prompted to enter the voting keystore \ + password that will be used to encrypt the voting keystores. If this \ + flag is not provided, a random password will be used. It is not \ + necessary to keep backups of voting keystore passwords if the \ + mnemonic is safely backed up.", + ), + ) + .arg( + Arg::with_name(ETH1_WITHDRAWAL_ADDRESS_FLAG) + .long(ETH1_WITHDRAWAL_ADDRESS_FLAG) + .value_name("ETH1_ADDRESS") + .help( + "If this field is set, the given eth1 address will be used to create the \ + withdrawal credentials. Otherwise, it will generate withdrawal credentials \ + with the mnemonic-derived withdrawal public key in EIP-2334 format.", + ) + .conflicts_with(DISABLE_DEPOSITS_FLAG) + .takes_value(true), + ) + .arg( + Arg::with_name(GAS_LIMIT_FLAG) + .long(GAS_LIMIT_FLAG) + .value_name("UINT64") + .help( + "All created validators will use this gas limit. It is recommended \ + to leave this as the default value by not specifying this flag.", + ) + .required(false) + .takes_value(true), + ) + .arg( + Arg::with_name(FEE_RECIPIENT_FLAG) + .long(FEE_RECIPIENT_FLAG) + .value_name("ETH1_ADDRESS") + .help( + "All created validators will use this value for the suggested \ + fee recipient. Omit this flag to use the default value from the VC.", + ) + .required(false) + .takes_value(true), + ) + .arg( + Arg::with_name(BUILDER_PROPOSALS_FLAG) + .long(BUILDER_PROPOSALS_FLAG) + .help( + "When provided, all created validators will attempt to create \ + blocks via builder rather than the local EL.", + ) + .required(false) + .possible_values(&["true", "false"]) + .takes_value(true), + ) + .arg( + Arg::with_name(BEACON_NODE_FLAG) + .long(BEACON_NODE_FLAG) + .value_name("HTTP_ADDRESS") + .help( + "A HTTP(S) address of a beacon node using the beacon-API. \ + If this value is provided, an error will be raised if any validator \ + key here is already known as a validator by that beacon node. This helps \ + prevent the same validator being created twice and therefore slashable \ + conditions.", + ) + .takes_value(true), + ) + .arg( + Arg::with_name(FORCE_BLS_WITHDRAWAL_CREDENTIALS) + .takes_value(false) + .long(FORCE_BLS_WITHDRAWAL_CREDENTIALS) + .help( + "If present, allows BLS withdrawal credentials rather than an execution \ + address. This is not recommended.", + ), + ) +} + +/// The CLI arguments are parsed into this struct before running the application. This step of +/// indirection allows for testing the underlying logic without needing to parse CLI arguments. +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub struct CreateConfig { + pub output_path: PathBuf, + pub first_index: u32, + pub count: u32, + pub deposit_gwei: u64, + pub mnemonic_path: Option, + pub stdin_inputs: bool, + pub disable_deposits: bool, + pub specify_voting_keystore_password: bool, + pub eth1_withdrawal_address: Option
, + pub builder_proposals: Option, + pub fee_recipient: Option
, + pub gas_limit: Option, + pub bn_url: Option, + pub force_bls_withdrawal_credentials: bool, +} + +impl CreateConfig { + fn from_cli(matches: &ArgMatches, spec: &ChainSpec) -> Result { + Ok(Self { + output_path: clap_utils::parse_required(matches, OUTPUT_PATH_FLAG)?, + deposit_gwei: clap_utils::parse_optional(matches, DEPOSIT_GWEI_FLAG)? + .unwrap_or(spec.max_effective_balance), + first_index: clap_utils::parse_required(matches, FIRST_INDEX_FLAG)?, + count: clap_utils::parse_required(matches, COUNT_FLAG)?, + mnemonic_path: clap_utils::parse_optional(matches, MNEMONIC_FLAG)?, + stdin_inputs: cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG), + disable_deposits: matches.is_present(DISABLE_DEPOSITS_FLAG), + specify_voting_keystore_password: matches + .is_present(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG), + eth1_withdrawal_address: clap_utils::parse_optional( + matches, + ETH1_WITHDRAWAL_ADDRESS_FLAG, + )?, + builder_proposals: clap_utils::parse_optional(matches, BUILDER_PROPOSALS_FLAG)?, + fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT_FLAG)?, + gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT_FLAG)?, + bn_url: clap_utils::parse_optional(matches, BEACON_NODE_FLAG)?, + force_bls_withdrawal_credentials: matches.is_present(FORCE_BLS_WITHDRAWAL_CREDENTIALS), + }) + } +} + +struct ValidatorsAndDeposits { + validators: Vec, + deposits: Option>, +} + +impl ValidatorsAndDeposits { + async fn new<'a, T: EthSpec>(config: CreateConfig, spec: &ChainSpec) -> Result { + let CreateConfig { + // The output path is handled upstream. + output_path: _, + first_index, + count, + deposit_gwei, + mnemonic_path, + stdin_inputs, + disable_deposits, + specify_voting_keystore_password, + eth1_withdrawal_address, + builder_proposals, + fee_recipient, + gas_limit, + bn_url, + force_bls_withdrawal_credentials, + } = config; + + // Since Capella, it really doesn't make much sense to use BLS + // withdrawal credentials. Try to guide users away from doing so. + if eth1_withdrawal_address.is_none() && !force_bls_withdrawal_credentials { + return Err(format!( + "--{ETH1_WITHDRAWAL_ADDRESS_FLAG} is required. See --help for more information." + )); + } + + if count == 0 { + return Err(format!("--{} cannot be 0", COUNT_FLAG)); + } + + let bn_http_client = if let Some(bn_url) = bn_url { + let bn_http_client = + BeaconNodeHttpClient::new(bn_url, Timeouts::set_all(BEACON_NODE_HTTP_TIMEOUT)); + + /* + * Print the version of the remote beacon node. + */ + let version = bn_http_client + .get_node_version() + .await + .map_err(|e| format!("Failed to test connection to beacon node: {:?}", e))? + .data + .version; + eprintln!("Connected to beacon node running version {}", version); + + /* + * Attempt to ensure that the beacon node is on the same network. + */ + let bn_config = bn_http_client + .get_config_spec::() + .await + .map_err(|e| format!("Failed to get spec from beacon node: {:?}", e))? + .data; + if let Some(config_name) = &bn_config.config_name { + eprintln!("Beacon node is on {} network", config_name) + } + let bn_spec = bn_config + .apply_to_chain_spec::(&T::default_spec()) + .ok_or("Beacon node appears to be on an incorrect network")?; + if bn_spec.genesis_fork_version != spec.genesis_fork_version { + if let Some(config_name) = bn_spec.config_name { + eprintln!("Beacon node is on {} network", config_name) + } + return Err("Beacon node appears to be on the wrong network".to_string()); + } + + Some(bn_http_client) + } else { + None + }; + + let mnemonic = read_mnemonic_from_cli(mnemonic_path, stdin_inputs)?; + let voting_keystore_password = if specify_voting_keystore_password { + eprintln!("Please enter a voting keystore password when prompted."); + Some(read_password_from_user(stdin_inputs)?) + } else { + None + }; + + /* + * Generate a wallet to be used for HD key generation. + */ + + // A random password is always appropriate for the wallet since it is ephemeral. + let wallet_password = random_password_string(); + // A random password is always appropriate for the withdrawal keystore since we don't ever store + // it anywhere. + let withdrawal_keystore_password = random_password_string(); + let mut wallet = + WalletBuilder::from_mnemonic(&mnemonic, wallet_password.as_ref(), "".to_string()) + .map_err(|e| format!("Unable create seed from mnemonic: {:?}", e))? + .build() + .map_err(|e| format!("Unable to create wallet: {:?}", e))?; + + /* + * Start deriving individual validators. + */ + + eprintln!( + "Starting derivation of {} keystores. Each keystore may take several seconds.", + count + ); + + let mut validators = Vec::with_capacity(count as usize); + let mut deposits = (!disable_deposits).then(Vec::new); + + for (i, derivation_index) in (first_index..first_index + count).enumerate() { + // If the voting keystore password was not provided by the user then use a unique random + // string for each validator. + let voting_keystore_password = voting_keystore_password + .clone() + .unwrap_or_else(random_password_string); + + // Set the wallet to the appropriate derivation index. + wallet + .set_nextaccount(derivation_index) + .map_err(|e| format!("Failure to set validator derivation index: {:?}", e))?; + + // Derive the keystore from the HD wallet. + let keystores = wallet + .next_validator( + wallet_password.as_ref(), + voting_keystore_password.as_ref(), + withdrawal_keystore_password.as_ref(), + ) + .map_err(|e| format!("Failed to derive keystore {}: {:?}", i, e))?; + let voting_keystore = keystores.voting; + let voting_public_key = voting_keystore + .public_key() + .ok_or_else(|| { + format!("Validator keystore at index {} is missing a public key", i) + })? + .into(); + + // If the user has provided a beacon node URL, check that the validator doesn't already + // exist in the beacon chain. + if let Some(bn_http_client) = &bn_http_client { + match bn_http_client + .get_beacon_states_validator_id( + StateId::Head, + &ValidatorId::PublicKey(voting_public_key), + ) + .await + { + Ok(Some(_)) => { + return Err(format!( + "Validator {:?} at derivation index {} already exists in the beacon chain. \ + This indicates a slashing risk, be sure to never run the same validator on two \ + different validator clients. If you understand the risks and are certain you \ + wish to generate this validator again, omit the --{} flag.", + voting_public_key, derivation_index, BEACON_NODE_FLAG + ))? + } + Ok(None) => eprintln!( + "{:?} was not found in the beacon chain", + voting_public_key + ), + Err(e) => { + return Err(format!( + "Error checking if validator exists in beacon chain: {:?}", + e + )) + } + } + } + + if let Some(deposits) = &mut deposits { + // Decrypt the voting keystore so a deposit message can be signed. + let voting_keypair = voting_keystore + .decrypt_keypair(voting_keystore_password.as_ref()) + .map_err(|e| format!("Failed to decrypt voting keystore {}: {:?}", i, e))?; + + // Sanity check to ensure the keystore is reporting the correct public key. + if PublicKeyBytes::from(voting_keypair.pk.clone()) != voting_public_key { + return Err(format!( + "Mismatch for keystore public key and derived public key \ + for derivation index {}", + derivation_index + )); + } + + let withdrawal_credentials = + if let Some(eth1_withdrawal_address) = eth1_withdrawal_address { + WithdrawalCredentials::eth1(eth1_withdrawal_address, spec) + } else { + // Decrypt the withdrawal keystore so withdrawal credentials can be created. It's + // not strictly necessary to decrypt the keystore since we can read the pubkey + // directly from the keystore. However we decrypt the keystore to be more certain + // that we have access to the withdrawal keys. + let withdrawal_keypair = keystores + .withdrawal + .decrypt_keypair(withdrawal_keystore_password.as_ref()) + .map_err(|e| { + format!("Failed to decrypt withdrawal keystore {}: {:?}", i, e) + })?; + WithdrawalCredentials::bls(&withdrawal_keypair.pk, spec) + }; + + // Create a JSON structure equivalent to the one generated by + // `ethereum/staking-deposit-cli`. + let json_deposit = StandardDepositDataJson::new( + &voting_keypair, + withdrawal_credentials.into(), + deposit_gwei, + spec, + )?; + + deposits.push(json_deposit); + } + + let validator = ValidatorSpecification { + voting_keystore: KeystoreJsonStr(voting_keystore), + voting_keystore_password: voting_keystore_password.clone(), + // New validators have no slashing protection history. + slashing_protection: None, + fee_recipient, + gas_limit, + builder_proposals, + // Allow the VC to choose a default "enabled" state. Since "enabled" is not part of + // the standard API, leaving this as `None` means we are not forced to use the + // non-standard API. + enabled: None, + }; + + eprintln!( + "Completed {}/{}: {:?}", + i.saturating_add(1), + count, + voting_public_key + ); + + validators.push(validator); + } + + Ok(Self { + validators, + deposits, + }) + } +} + +pub async fn cli_run<'a, T: EthSpec>( + matches: &'a ArgMatches<'a>, + spec: &ChainSpec, + dump_config: DumpConfig, +) -> Result<(), String> { + let config = CreateConfig::from_cli(matches, spec)?; + if dump_config.should_exit_early(&config)? { + Ok(()) + } else { + run::(config, spec).await + } +} + +async fn run<'a, T: EthSpec>(config: CreateConfig, spec: &ChainSpec) -> Result<(), String> { + let output_path = config.output_path.clone(); + + if !output_path.exists() { + fs::create_dir(&output_path) + .map_err(|e| format!("Failed to create {:?} directory: {:?}", output_path, e))?; + } else if !output_path.is_dir() { + return Err(format!("{:?} must be a directory", output_path)); + } + + let validators_path = output_path.join(VALIDATORS_FILENAME); + if validators_path.exists() { + return Err(format!( + "{:?} already exists, refusing to overwrite", + validators_path + )); + } + let deposits_path = output_path.join(DEPOSITS_FILENAME); + if deposits_path.exists() { + return Err(format!( + "{:?} already exists, refusing to overwrite", + deposits_path + )); + } + + let validators_and_deposits = ValidatorsAndDeposits::new::(config, spec).await?; + + eprintln!("Keystore generation complete"); + + write_to_json_file(&validators_path, &validators_and_deposits.validators)?; + + if let Some(deposits) = &validators_and_deposits.deposits { + write_to_json_file(&deposits_path, deposits)?; + } + + Ok(()) +} + +// The tests use crypto and are too slow in debug. +#[cfg(not(debug_assertions))] +#[cfg(test)] +pub mod tests { + use super::*; + use eth2_network_config::Eth2NetworkConfig; + use regex::Regex; + use std::path::Path; + use std::str::FromStr; + use tempfile::{tempdir, TempDir}; + use tree_hash::TreeHash; + + type E = MainnetEthSpec; + + const TEST_VECTOR_DEPOSIT_CLI_VERSION: &str = "2.3.0"; + + fn junk_execution_address() -> Option
{ + Some(Address::from_str("0x0f51bb10119727a7e5ea3538074fb341f56b09ad").unwrap()) + } + + pub struct TestBuilder { + spec: ChainSpec, + output_dir: TempDir, + mnemonic_dir: TempDir, + config: CreateConfig, + } + + impl Default for TestBuilder { + fn default() -> Self { + Self::new(E::default_spec()) + } + } + + impl TestBuilder { + pub fn new(spec: ChainSpec) -> Self { + let output_dir = tempdir().unwrap(); + let mnemonic_dir = tempdir().unwrap(); + let mnemonic_path = mnemonic_dir.path().join("mnemonic"); + fs::write( + &mnemonic_path, + "test test test test test test test test test test test waste", + ) + .unwrap(); + + let config = CreateConfig { + output_path: output_dir.path().into(), + first_index: 0, + count: 1, + deposit_gwei: spec.max_effective_balance, + mnemonic_path: Some(mnemonic_path), + stdin_inputs: false, + disable_deposits: false, + specify_voting_keystore_password: false, + eth1_withdrawal_address: junk_execution_address(), + builder_proposals: None, + fee_recipient: None, + gas_limit: None, + bn_url: None, + force_bls_withdrawal_credentials: false, + }; + + Self { + spec, + output_dir, + mnemonic_dir, + config, + } + } + + pub fn mutate_config(mut self, func: F) -> Self { + func(&mut self.config); + self + } + + pub async fn run_test(self) -> TestResult { + let Self { + spec, + output_dir, + mnemonic_dir, + config, + } = self; + + let result = run::(config.clone(), &spec).await; + + if result.is_ok() { + let validators_file_contents = + fs::read_to_string(output_dir.path().join(VALIDATORS_FILENAME)).unwrap(); + let validators: Vec = + serde_json::from_str(&validators_file_contents).unwrap(); + + assert_eq!(validators.len(), config.count as usize); + + for (i, validator) in validators.iter().enumerate() { + let voting_keystore = &validator.voting_keystore.0; + let keypair = voting_keystore + .decrypt_keypair(validator.voting_keystore_password.as_ref()) + .unwrap(); + assert_eq!(keypair.pk, voting_keystore.public_key().unwrap()); + assert_eq!( + voting_keystore.path().unwrap(), + format!("m/12381/3600/{}/0/0", config.first_index as usize + i) + ); + assert!(validator.slashing_protection.is_none()); + assert_eq!(validator.fee_recipient, config.fee_recipient); + assert_eq!(validator.gas_limit, config.gas_limit); + assert_eq!(validator.builder_proposals, config.builder_proposals); + assert_eq!(validator.enabled, None); + } + + let deposits_path = output_dir.path().join(DEPOSITS_FILENAME); + if config.disable_deposits { + assert!(!deposits_path.exists()); + } else { + let deposits_file_contents = fs::read_to_string(&deposits_path).unwrap(); + let deposits: Vec = + serde_json::from_str(&deposits_file_contents).unwrap(); + + assert_eq!(deposits.len(), config.count as usize); + + for (validator, deposit) in validators.iter().zip(deposits.iter()) { + let validator_pubkey = validator.voting_keystore.0.public_key().unwrap(); + assert_eq!(deposit.pubkey, validator_pubkey.clone().into()); + if let Some(address) = config.eth1_withdrawal_address { + assert_eq!( + deposit.withdrawal_credentials.as_bytes()[0], + spec.eth1_address_withdrawal_prefix_byte + ); + assert_eq!( + &deposit.withdrawal_credentials.as_bytes()[12..], + address.as_bytes() + ); + } else { + assert_eq!( + deposit.withdrawal_credentials.as_bytes()[0], + spec.bls_withdrawal_prefix_byte + ); + } + assert_eq!(deposit.amount, config.deposit_gwei); + let deposit_message = DepositData { + pubkey: deposit.pubkey, + withdrawal_credentials: deposit.withdrawal_credentials, + amount: deposit.amount, + signature: SignatureBytes::empty(), + } + .as_deposit_message(); + assert!(deposit.signature.decompress().unwrap().verify( + &validator_pubkey, + deposit_message.signing_root(spec.get_deposit_domain()) + )); + assert_eq!(deposit.fork_version, spec.genesis_fork_version); + assert_eq!(&deposit.network_name, spec.config_name.as_ref().unwrap()); + assert_eq!( + deposit.deposit_message_root, + deposit_message.tree_hash_root() + ); + assert_eq!( + deposit.deposit_data_root, + DepositData { + pubkey: deposit.pubkey, + withdrawal_credentials: deposit.withdrawal_credentials, + amount: deposit.amount, + signature: deposit.signature.clone() + } + .tree_hash_root() + ); + } + } + } + + // The directory containing the mnemonic can now be removed. + drop(mnemonic_dir); + + TestResult { result, output_dir } + } + } + + #[must_use] // Use the `assert_ok` or `assert_err` fns to "use" this value. + pub struct TestResult { + pub result: Result<(), String>, + pub output_dir: TempDir, + } + + impl TestResult { + pub fn validators_file_path(&self) -> PathBuf { + self.output_dir.path().join(VALIDATORS_FILENAME) + } + + pub fn validators(&self) -> Vec { + let contents = fs::read_to_string(self.validators_file_path()).unwrap(); + serde_json::from_str(&contents).unwrap() + } + + fn assert_ok(self) { + assert_eq!(self.result, Ok(())) + } + + fn assert_err(self) { + assert!(self.result.is_err()) + } + } + + #[tokio::test] + async fn default_test_values() { + TestBuilder::default().run_test().await.assert_ok(); + } + + #[tokio::test] + async fn no_eth1_address_without_force() { + TestBuilder::default() + .mutate_config(|config| { + config.eth1_withdrawal_address = None; + config.force_bls_withdrawal_credentials = false + }) + .run_test() + .await + .assert_err(); + } + + #[tokio::test] + async fn bls_withdrawal_credentials() { + TestBuilder::default() + .mutate_config(|config| { + config.eth1_withdrawal_address = None; + config.force_bls_withdrawal_credentials = true + }) + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn default_test_values_deposits_disabled() { + TestBuilder::default() + .mutate_config(|config| config.disable_deposits = true) + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn count_is_zero() { + TestBuilder::default() + .mutate_config(|config| config.count = 0) + .run_test() + .await + .assert_err(); + } + + #[tokio::test] + async fn eth1_withdrawal_addresses() { + TestBuilder::default() + .mutate_config(|config| { + config.count = 2; + config.eth1_withdrawal_address = junk_execution_address(); + }) + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn non_zero_first_index() { + TestBuilder::default() + .mutate_config(|config| { + config.first_index = 2; + config.count = 2; + }) + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn misc_modifications() { + TestBuilder::default() + .mutate_config(|config| { + config.deposit_gwei = 42; + config.builder_proposals = Some(true); + config.gas_limit = Some(1337); + }) + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn bogus_bn_url() { + TestBuilder::default() + .mutate_config(|config| { + config.bn_url = + Some(SensitiveUrl::from_str("http://sdjfvwfhsdhfschwkeyfwhwlga.com").unwrap()); + }) + .run_test() + .await + .assert_err(); + } + + #[tokio::test] + async fn staking_deposit_cli_vectors() { + let vectors_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("test_vectors") + .join("vectors"); + for entry in fs::read_dir(vectors_dir).unwrap() { + let entry = entry.unwrap(); + let file_name = entry.file_name(); + let vector_name = file_name.to_str().unwrap(); + let path = entry.path(); + // Leave this `println!` so we can tell which test fails. + println!("Running test {}", vector_name); + run_test_vector(vector_name, &path).await; + } + } + + async fn run_test_vector>(name: &str, vectors_path: P) { + /* + * Parse the test vector name into a set of test parameters. + */ + let re = Regex::new(r"(.*)_(.*)_(.*)_(.*)_(.*)_(.*)_(.*)").unwrap(); + let capture = re.captures_iter(name).next().unwrap(); + let network = capture.get(1).unwrap().as_str(); + let first = u32::from_str(capture.get(3).unwrap().as_str()).unwrap(); + let count = u32::from_str(capture.get(5).unwrap().as_str()).unwrap(); + let uses_eth1 = bool::from_str(capture.get(7).unwrap().as_str()).unwrap(); + + /* + * Use the test parameters to generate equivalent files "locally" (i.e., with our code). + */ + + let spec = Eth2NetworkConfig::constant(network) + .unwrap() + .unwrap() + .chain_spec::() + .unwrap(); + + let test_result = TestBuilder::new(spec) + .mutate_config(|config| { + config.first_index = first; + config.count = count; + if uses_eth1 { + config.eth1_withdrawal_address = Some( + Address::from_str("0x0f51bb10119727a7e5ea3538074fb341f56b09ad").unwrap(), + ); + } else { + config.eth1_withdrawal_address = None; + config.force_bls_withdrawal_credentials = true; + } + }) + .run_test() + .await; + let TestResult { result, output_dir } = test_result; + result.expect("local generation should succeed"); + + /* + * Ensure the deposit data is identical when parsed as JSON. + */ + + let local_deposits = { + let path = output_dir.path().join(DEPOSITS_FILENAME); + let contents = fs::read_to_string(&path).unwrap(); + let mut deposits: Vec = + serde_json::from_str(&contents).unwrap(); + for deposit in &mut deposits { + // Ensures we can match test vectors. + deposit.deposit_cli_version = TEST_VECTOR_DEPOSIT_CLI_VERSION.to_string(); + + // We use "prater" and the vectors use "goerli" now. The two names refer to the same + // network so there should be no issue here. + if deposit.network_name == "prater" { + deposit.network_name = "goerli".to_string(); + } + } + deposits + }; + let vector_deposits: Vec = { + let path = fs::read_dir(vectors_path.as_ref().join("validator_keys")) + .unwrap() + .find_map(|entry| { + let entry = entry.unwrap(); + let file_name = entry.file_name(); + if file_name.to_str().unwrap().starts_with("deposit_data") { + Some(entry.path()) + } else { + None + } + }) + .unwrap(); + let contents = fs::read_to_string(path).unwrap(); + serde_json::from_str(&contents).unwrap() + }; + + assert_eq!(local_deposits, vector_deposits); + + /* + * Note: we don't check the keystores generated by the deposit-cli since there is little + * value in this. + * + * If we check the deposits then we are verifying the signature across the deposit message. + * This implicitly verifies that the keypair generated by the deposit-cli is identical to + * the one created by Lighthouse. + */ + } +} diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs new file mode 100644 index 00000000000..4b924189f20 --- /dev/null +++ b/validator_manager/src/import_validators.rs @@ -0,0 +1,436 @@ +use super::common::*; +use crate::DumpConfig; +use clap::{App, Arg, ArgMatches}; +use eth2::{lighthouse_vc::std_types::ImportKeystoreStatus, SensitiveUrl}; +use serde::{Deserialize, Serialize}; +use std::fs; +use std::path::PathBuf; + +pub const CMD: &str = "import"; +pub const VALIDATORS_FILE_FLAG: &str = "validators-file"; +pub const VC_URL_FLAG: &str = "vc-url"; +pub const VC_TOKEN_FLAG: &str = "vc-token"; + +pub const DETECTED_DUPLICATE_MESSAGE: &str = "Duplicate validator detected!"; + +pub fn cli_app<'a, 'b>() -> App<'a, 'b> { + App::new(CMD) + .about( + "Uploads validators to a validator client using the HTTP API. The validators \ + are defined in a JSON file which can be generated using the \"create-validators\" \ + command.", + ) + .arg( + Arg::with_name(VALIDATORS_FILE_FLAG) + .long(VALIDATORS_FILE_FLAG) + .value_name("PATH_TO_JSON_FILE") + .help( + "The path to a JSON file containing a list of validators to be \ + imported to the validator client. This file is usually named \ + \"validators.json\".", + ) + .required(true) + .takes_value(true), + ) + .arg( + Arg::with_name(VC_URL_FLAG) + .long(VC_URL_FLAG) + .value_name("HTTP_ADDRESS") + .help( + "A HTTP(S) address of a validator client using the keymanager-API. \ + If this value is not supplied then a 'dry run' will be conducted where \ + no changes are made to the validator client.", + ) + .default_value("http://localhost:5062") + .requires(VC_TOKEN_FLAG) + .takes_value(true), + ) + .arg( + Arg::with_name(VC_TOKEN_FLAG) + .long(VC_TOKEN_FLAG) + .value_name("PATH") + .help("The file containing a token required by the validator client.") + .takes_value(true), + ) + .arg( + Arg::with_name(IGNORE_DUPLICATES_FLAG) + .takes_value(false) + .long(IGNORE_DUPLICATES_FLAG) + .help( + "If present, ignore any validators which already exist on the VC. \ + Without this flag, the process will terminate without making any changes. \ + This flag should be used with caution, whilst it does not directly cause \ + slashable conditions, it might be an indicator that something is amiss. \ + Users should also be careful to avoid submitting duplicate deposits for \ + validators that already exist on the VC.", + ), + ) +} + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub struct ImportConfig { + pub validators_file_path: PathBuf, + pub vc_url: SensitiveUrl, + pub vc_token_path: PathBuf, + pub ignore_duplicates: bool, +} + +impl ImportConfig { + fn from_cli(matches: &ArgMatches) -> Result { + Ok(Self { + validators_file_path: clap_utils::parse_required(matches, VALIDATORS_FILE_FLAG)?, + vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, + vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, + ignore_duplicates: matches.is_present(IGNORE_DUPLICATES_FLAG), + }) + } +} + +pub async fn cli_run<'a>( + matches: &'a ArgMatches<'a>, + dump_config: DumpConfig, +) -> Result<(), String> { + let config = ImportConfig::from_cli(matches)?; + if dump_config.should_exit_early(&config)? { + Ok(()) + } else { + run(config).await + } +} + +async fn run<'a>(config: ImportConfig) -> Result<(), String> { + let ImportConfig { + validators_file_path, + vc_url, + vc_token_path, + ignore_duplicates, + } = config; + + if !validators_file_path.exists() { + return Err(format!("Unable to find file at {:?}", validators_file_path)); + } + + let validators_file = fs::OpenOptions::new() + .read(true) + .create(false) + .open(&validators_file_path) + .map_err(|e| format!("Unable to open {:?}: {:?}", validators_file_path, e))?; + let validators: Vec = serde_json::from_reader(&validators_file) + .map_err(|e| { + format!( + "Unable to parse JSON in {:?}: {:?}", + validators_file_path, e + ) + })?; + + let count = validators.len(); + + let (http_client, _keystores) = vc_http_client(vc_url.clone(), &vc_token_path).await?; + + eprintln!( + "Starting to submit {} validators to VC, each validator may take several seconds", + count + ); + + for (i, validator) in validators.into_iter().enumerate() { + match validator.upload(&http_client, ignore_duplicates).await { + Ok(status) => { + match status.status { + ImportKeystoreStatus::Imported => { + eprintln!("Uploaded keystore {} of {} to the VC", i + 1, count) + } + ImportKeystoreStatus::Duplicate => { + if ignore_duplicates { + eprintln!("Re-uploaded keystore {} of {} to the VC", i + 1, count) + } else { + eprintln!( + "Keystore {} of {} was uploaded to the VC, but it was a duplicate. \ + Exiting now, use --{} to allow duplicates.", + i + 1, count, IGNORE_DUPLICATES_FLAG + ); + return Err(DETECTED_DUPLICATE_MESSAGE.to_string()); + } + } + ImportKeystoreStatus::Error => { + eprintln!( + "Upload of keystore {} of {} failed with message: {:?}. \ + A potential solution is run this command again \ + using the --{} flag, however care should be taken to ensure \ + that there are no duplicate deposits submitted.", + i + 1, + count, + status.message, + IGNORE_DUPLICATES_FLAG + ); + return Err(format!("Upload failed with {:?}", status.message)); + } + } + } + e @ Err(UploadError::InvalidPublicKey) => { + eprintln!("Validator {} has an invalid public key", i); + return Err(format!("{:?}", e)); + } + ref e @ Err(UploadError::DuplicateValidator(voting_public_key)) => { + eprintln!( + "Duplicate validator {:?} already exists on the destination validator client. \ + This may indicate that some validators are running in two places at once, which \ + can lead to slashing. If you are certain that there is no risk, add the --{} flag.", + voting_public_key, IGNORE_DUPLICATES_FLAG + ); + return Err(format!("{:?}", e)); + } + Err(UploadError::FailedToListKeys(e)) => { + eprintln!( + "Failed to list keystores. Some keys may have been imported whilst \ + others may not have been imported. A potential solution is run this command again \ + using the --{} flag, however care should be taken to ensure that there are no \ + duplicate deposits submitted.", + IGNORE_DUPLICATES_FLAG + ); + return Err(format!("{:?}", e)); + } + Err(UploadError::KeyUploadFailed(e)) => { + eprintln!( + "Failed to upload keystore. Some keys may have been imported whilst \ + others may not have been imported. A potential solution is run this command again \ + using the --{} flag, however care should be taken to ensure that there are no \ + duplicate deposits submitted.", + IGNORE_DUPLICATES_FLAG + ); + return Err(format!("{:?}", e)); + } + Err(UploadError::IncorrectStatusCount(count)) => { + eprintln!( + "Keystore was uploaded, however the validator client returned an invalid response. \ + A potential solution is run this command again using the --{} flag, however care \ + should be taken to ensure that there are no duplicate deposits submitted.", + IGNORE_DUPLICATES_FLAG + ); + return Err(format!( + "Invalid status count in import response: {}", + count + )); + } + Err(UploadError::FeeRecipientUpdateFailed(e)) => { + eprintln!( + "Failed to set fee recipient for validator {}. This value may need \ + to be set manually. Continuing with other validators. Error was {:?}", + i, e + ); + } + Err(UploadError::PatchValidatorFailed(e)) => { + eprintln!( + "Failed to set some values on validator {} (e.g., builder, enabled or gas limit. \ + These values value may need to be set manually. Continuing with other validators. \ + Error was {:?}", + i, e + ); + } + } + } + + Ok(()) +} + +// The tests use crypto and are too slow in debug. +#[cfg(not(debug_assertions))] +#[cfg(test)] +pub mod tests { + use super::*; + use crate::create_validators::tests::TestBuilder as CreateTestBuilder; + use std::fs; + use tempfile::{tempdir, TempDir}; + use validator_client::http_api::{test_utils::ApiTester, Config as HttpConfig}; + + const VC_TOKEN_FILE_NAME: &str = "vc_token.json"; + + pub struct TestBuilder { + import_config: ImportConfig, + pub vc: ApiTester, + /// Holds the temp directory owned by the `CreateTestBuilder` so it doesn't get cleaned-up + /// before we can read it. + create_dir: Option, + _dir: TempDir, + } + + impl TestBuilder { + pub async fn new() -> Self { + Self::new_with_http_config(ApiTester::default_http_config()).await + } + + pub async fn new_with_http_config(http_config: HttpConfig) -> Self { + let dir = tempdir().unwrap(); + let vc = ApiTester::new_with_http_config(http_config).await; + let vc_token_path = dir.path().join(VC_TOKEN_FILE_NAME); + fs::write(&vc_token_path, &vc.api_token).unwrap(); + + Self { + import_config: ImportConfig { + // This field will be overwritten later on. + validators_file_path: dir.path().into(), + vc_url: vc.url.clone(), + vc_token_path, + ignore_duplicates: false, + }, + vc, + create_dir: None, + _dir: dir, + } + } + + pub fn mutate_import_config(mut self, func: F) -> Self { + func(&mut self.import_config); + self + } + + pub async fn create_validators(mut self, count: u32, first_index: u32) -> Self { + let create_result = CreateTestBuilder::default() + .mutate_config(|config| { + config.count = count; + config.first_index = first_index; + }) + .run_test() + .await; + assert!( + create_result.result.is_ok(), + "precondition: validators are created" + ); + self.import_config.validators_file_path = create_result.validators_file_path(); + self.create_dir = Some(create_result.output_dir); + self + } + + /// Imports validators without running the entire test suite in `Self::run_test`. This is + /// useful for simulating duplicate imports. + pub async fn import_validators_without_checks(self) -> Self { + run(self.import_config.clone()).await.unwrap(); + self + } + + pub async fn run_test(self) -> TestResult { + let result = run(self.import_config.clone()).await; + + if result.is_ok() { + self.vc.ensure_key_cache_consistency().await; + + let local_validators: Vec = { + let contents = + fs::read_to_string(&self.import_config.validators_file_path).unwrap(); + serde_json::from_str(&contents).unwrap() + }; + let list_keystores_response = self.vc.client.get_keystores().await.unwrap().data; + + assert_eq!( + local_validators.len(), + list_keystores_response.len(), + "vc should have exactly the number of validators imported" + ); + + for local_validator in &local_validators { + let local_keystore = &local_validator.voting_keystore.0; + let local_pubkey = local_keystore.public_key().unwrap().into(); + let remote_validator = list_keystores_response + .iter() + .find(|validator| validator.validating_pubkey == local_pubkey) + .expect("validator must exist on VC"); + assert_eq!(&remote_validator.derivation_path, &local_keystore.path()); + assert_eq!(remote_validator.readonly, Some(false)); + } + } + + TestResult { + result, + vc: self.vc, + } + } + } + + #[must_use] // Use the `assert_ok` or `assert_err` fns to "use" this value. + pub struct TestResult { + pub result: Result<(), String>, + pub vc: ApiTester, + } + + impl TestResult { + fn assert_ok(self) { + assert_eq!(self.result, Ok(())) + } + + fn assert_err_contains(self, msg: &str) { + assert!(self.result.unwrap_err().contains(msg)) + } + } + + #[tokio::test] + async fn create_one_validator() { + TestBuilder::new() + .await + .create_validators(1, 0) + .await + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn create_three_validators() { + TestBuilder::new() + .await + .create_validators(3, 0) + .await + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn create_one_validator_with_offset() { + TestBuilder::new() + .await + .create_validators(1, 42) + .await + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn create_three_validators_with_offset() { + TestBuilder::new() + .await + .create_validators(3, 1337) + .await + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn import_duplicates_when_disallowed() { + TestBuilder::new() + .await + .create_validators(1, 0) + .await + .import_validators_without_checks() + .await + .run_test() + .await + .assert_err_contains("DuplicateValidator"); + } + + #[tokio::test] + async fn import_duplicates_when_allowed() { + TestBuilder::new() + .await + .mutate_import_config(|config| { + config.ignore_duplicates = true; + }) + .create_validators(1, 0) + .await + .import_validators_without_checks() + .await + .run_test() + .await + .assert_ok(); + } +} diff --git a/validator_manager/src/lib.rs b/validator_manager/src/lib.rs new file mode 100644 index 00000000000..6889ee79d2c --- /dev/null +++ b/validator_manager/src/lib.rs @@ -0,0 +1,85 @@ +use clap::App; +use clap::ArgMatches; +use common::write_to_json_file; +use environment::Environment; +use serde::Serialize; +use std::path::PathBuf; +use types::EthSpec; + +pub mod common; +pub mod create_validators; +pub mod import_validators; +pub mod move_validators; + +pub const CMD: &str = "validator_manager"; + +/// This flag is on the top-level `lighthouse` binary. +const DUMP_CONFIGS_FLAG: &str = "dump-config"; + +/// Used only in testing, this allows a command to dump its configuration to a file and then exit +/// successfully. This allows for testing how the CLI arguments translate to some configuration. +pub enum DumpConfig { + Disabled, + Enabled(PathBuf), +} + +impl DumpConfig { + /// Returns `Ok(true)` if the configuration was successfully written to a file and the + /// application should exit successfully without doing anything else. + pub fn should_exit_early(&self, config: &T) -> Result { + match self { + DumpConfig::Disabled => Ok(false), + DumpConfig::Enabled(dump_path) => { + dbg!(dump_path); + write_to_json_file(dump_path, config)?; + Ok(true) + } + } + } +} + +pub fn cli_app<'a, 'b>() -> App<'a, 'b> { + App::new(CMD) + .visible_aliases(&["vm", "validator-manager", CMD]) + .about("Utilities for managing a Lighthouse validator client via the HTTP API.") + .subcommand(create_validators::cli_app()) + .subcommand(import_validators::cli_app()) + .subcommand(move_validators::cli_app()) +} + +/// Run the account manager, returning an error if the operation did not succeed. +pub fn run<'a, T: EthSpec>(matches: &'a ArgMatches<'a>, env: Environment) -> Result<(), String> { + let context = env.core_context(); + let spec = context.eth2_config.spec; + let dump_config = clap_utils::parse_optional(matches, DUMP_CONFIGS_FLAG)? + .map(DumpConfig::Enabled) + .unwrap_or_else(|| DumpConfig::Disabled); + + context + .executor + // This `block_on_dangerous` call reasonable since it is at the very highest level of the + // application, the rest of which is all async. All other functions below this should be + // async and should never call `block_on_dangerous` themselves. + .block_on_dangerous( + async { + match matches.subcommand() { + (create_validators::CMD, Some(matches)) => { + create_validators::cli_run::(matches, &spec, dump_config).await + } + (import_validators::CMD, Some(matches)) => { + import_validators::cli_run(matches, dump_config).await + } + (move_validators::CMD, Some(matches)) => { + move_validators::cli_run(matches, dump_config).await + } + ("", _) => Err("No command supplied. See --help.".to_string()), + (unknown, _) => Err(format!( + "{} is not a valid {} command. See --help.", + unknown, CMD + )), + } + }, + "validator_manager", + ) + .ok_or("Shutting down")? +} diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs new file mode 100644 index 00000000000..fa886e8f941 --- /dev/null +++ b/validator_manager/src/move_validators.rs @@ -0,0 +1,1253 @@ +use super::common::*; +use crate::DumpConfig; +use account_utils::{read_password_from_user, ZeroizeString}; +use clap::{App, Arg, ArgMatches}; +use eth2::{ + lighthouse_vc::{ + std_types::{ + DeleteKeystoreStatus, DeleteKeystoresRequest, ImportKeystoreStatus, InterchangeJsonStr, + Status, + }, + types::{ExportKeystoresResponse, SingleExportKeystoresResponse}, + }, + SensitiveUrl, +}; +use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet}; +use std::path::PathBuf; +use std::str::FromStr; +use std::time::Duration; +use tokio::time::sleep; +use types::{Address, PublicKeyBytes}; + +pub const MOVE_DIR_NAME: &str = "lighthouse-validator-move"; +pub const VALIDATOR_SPECIFICATION_FILE: &str = "validator-specification.json"; + +pub const CMD: &str = "move"; +pub const SRC_VC_URL_FLAG: &str = "src-vc-url"; +pub const SRC_VC_TOKEN_FLAG: &str = "src-vc-token"; +pub const DEST_VC_URL_FLAG: &str = "dest-vc-url"; +pub const DEST_VC_TOKEN_FLAG: &str = "dest-vc-token"; +pub const VALIDATORS_FLAG: &str = "validators"; +pub const GAS_LIMIT_FLAG: &str = "gas-limit"; +pub const FEE_RECIPIENT_FLAG: &str = "suggested-fee-recipient"; +pub const BUILDER_PROPOSALS_FLAG: &str = "builder-proposals"; + +const NO_VALIDATORS_MSG: &str = "No validators present on source validator client"; + +const UPLOAD_RETRY_WAIT: Duration = Duration::from_secs(5); + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub enum PasswordSource { + /// Reads the password from the user via the terminal. + Interactive { stdin_inputs: bool }, + /// This variant is panic-y and should only be used during testing. + Testing(HashMap>), +} + +impl PasswordSource { + fn read_password(&mut self, pubkey: &PublicKeyBytes) -> Result { + match self { + PasswordSource::Interactive { stdin_inputs } => { + eprintln!("Please enter a password for keystore {:?}:", pubkey); + read_password_from_user(*stdin_inputs) + } + // This path with panic if the password list is empty. Since the + // password prompt will just keep retrying on a failed password, the + // panic helps us break the loop if we misconfigure the test. + PasswordSource::Testing(passwords) => Ok(passwords + .get_mut(pubkey) + .expect("pubkey should be known") + .remove(0) + .into()), + } + } +} + +pub fn cli_app<'a, 'b>() -> App<'a, 'b> { + App::new(CMD) + .about( + "Uploads validators to a validator client using the HTTP API. The validators \ + are defined in a JSON file which can be generated using the \"create-validators\" \ + command. This command only supports validators signing via a keystore on the local \ + file system (i.e., not Web3Signer validators).", + ) + .arg( + Arg::with_name(SRC_VC_URL_FLAG) + .long(SRC_VC_URL_FLAG) + .value_name("HTTP_ADDRESS") + .help( + "A HTTP(S) address of a validator client using the keymanager-API. \ + This validator client is the \"source\" and contains the validators \ + that are to be moved.", + ) + .required(true) + .requires(SRC_VC_TOKEN_FLAG) + .takes_value(true), + ) + .arg( + Arg::with_name(SRC_VC_TOKEN_FLAG) + .long(SRC_VC_TOKEN_FLAG) + .value_name("PATH") + .help("The file containing a token required by the source validator client.") + .takes_value(true), + ) + .arg( + Arg::with_name(DEST_VC_URL_FLAG) + .long(DEST_VC_URL_FLAG) + .value_name("HTTP_ADDRESS") + .help( + "A HTTP(S) address of a validator client using the keymanager-API. \ + This validator client is the \"destination\" and will have new validators \ + added as they are removed from the \"source\" validator client.", + ) + .required(true) + .requires(DEST_VC_TOKEN_FLAG) + .takes_value(true), + ) + .arg( + Arg::with_name(DEST_VC_TOKEN_FLAG) + .long(DEST_VC_TOKEN_FLAG) + .value_name("PATH") + .help("The file containing a token required by the destination validator client.") + .takes_value(true), + ) + .arg( + Arg::with_name(VALIDATORS_FLAG) + .long(VALIDATORS_FLAG) + .value_name("STRING") + .help( + "The validators to be moved. Either a list of 0x-prefixed \ + validator pubkeys or the keyword \"all\".", + ) + .takes_value(true), + ) + .arg( + Arg::with_name(COUNT_FLAG) + .long(COUNT_FLAG) + .value_name("VALIDATOR_COUNT") + .help("The number of validators to move.") + .conflicts_with(VALIDATORS_FLAG) + .takes_value(true), + ) + .arg( + Arg::with_name(GAS_LIMIT_FLAG) + .long(GAS_LIMIT_FLAG) + .value_name("UINT64") + .help( + "All created validators will use this gas limit. It is recommended \ + to leave this as the default value by not specifying this flag.", + ) + .required(false) + .takes_value(true), + ) + .arg( + Arg::with_name(FEE_RECIPIENT_FLAG) + .long(FEE_RECIPIENT_FLAG) + .value_name("ETH1_ADDRESS") + .help( + "All created validators will use this value for the suggested \ + fee recipient. Omit this flag to use the default value from the VC.", + ) + .required(false) + .takes_value(true), + ) + .arg( + Arg::with_name(BUILDER_PROPOSALS_FLAG) + .long(BUILDER_PROPOSALS_FLAG) + .help( + "When provided, all created validators will attempt to create \ + blocks via builder rather than the local EL.", + ) + .required(false) + .possible_values(&["true", "false"]) + .takes_value(true), + ) + .arg( + Arg::with_name(STDIN_INPUTS_FLAG) + .takes_value(false) + .hidden(cfg!(windows)) + .long(STDIN_INPUTS_FLAG) + .help("If present, read all user inputs from stdin instead of tty."), + ) +} + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub enum Validators { + All, + Count(usize), + Specific(Vec), +} + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub struct MoveConfig { + pub src_vc_url: SensitiveUrl, + pub src_vc_token_path: PathBuf, + pub dest_vc_url: SensitiveUrl, + pub dest_vc_token_path: PathBuf, + pub validators: Validators, + pub builder_proposals: Option, + pub fee_recipient: Option
, + pub gas_limit: Option, + pub password_source: PasswordSource, +} + +impl MoveConfig { + fn from_cli(matches: &ArgMatches) -> Result { + let count_flag = clap_utils::parse_optional(matches, COUNT_FLAG)?; + let validators_flag = matches.value_of(VALIDATORS_FLAG); + let validators = match (count_flag, validators_flag) { + (Some(count), None) => Validators::Count(count), + (None, Some(string)) => match string { + "all" => Validators::All, + pubkeys => pubkeys + .split(',') + .map(PublicKeyBytes::from_str) + .collect::, _>>() + .map(Validators::Specific)?, + }, + (None, None) => Err(format!( + "Must supply either --{VALIDATORS_FLAG} or --{COUNT_FLAG}." + ))?, + (Some(_), Some(_)) => { + Err("Cannot supply both --{VALIDATORS_FLAG} and --{COUNT_FLAG}.")? + } + }; + + Ok(Self { + src_vc_url: clap_utils::parse_required(matches, SRC_VC_URL_FLAG)?, + src_vc_token_path: clap_utils::parse_required(matches, SRC_VC_TOKEN_FLAG)?, + dest_vc_url: clap_utils::parse_required(matches, DEST_VC_URL_FLAG)?, + dest_vc_token_path: clap_utils::parse_required(matches, DEST_VC_TOKEN_FLAG)?, + validators, + builder_proposals: clap_utils::parse_optional(matches, BUILDER_PROPOSALS_FLAG)?, + fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT_FLAG)?, + gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT_FLAG)?, + password_source: PasswordSource::Interactive { + stdin_inputs: cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG), + }, + }) + } +} + +pub async fn cli_run<'a>( + matches: &'a ArgMatches<'a>, + dump_config: DumpConfig, +) -> Result<(), String> { + let config = MoveConfig::from_cli(matches)?; + if dump_config.should_exit_early(&config)? { + Ok(()) + } else { + run(config).await + } +} + +async fn run<'a>(config: MoveConfig) -> Result<(), String> { + let MoveConfig { + src_vc_url, + src_vc_token_path, + dest_vc_url, + dest_vc_token_path, + validators, + builder_proposals, + fee_recipient, + gas_limit, + mut password_source, + } = config; + + // Moving validators between the same VC is unlikely to be useful and probably indicates a user + // error. + if src_vc_url == dest_vc_url { + return Err(format!( + "--{} and --{} must be different", + SRC_VC_URL_FLAG, DEST_VC_URL_FLAG + )); + } + + let (src_http_client, src_keystores) = + vc_http_client(src_vc_url.clone(), &src_vc_token_path).await?; + let (dest_http_client, _dest_keystores) = + vc_http_client(dest_vc_url.clone(), &dest_vc_token_path).await?; + + if src_keystores.is_empty() { + return Err(NO_VALIDATORS_MSG.to_string()); + } + + let pubkeys_to_move = match validators { + Validators::All => src_keystores.iter().map(|v| v.validating_pubkey).collect(), + Validators::Count(count) => { + let mut viable_pubkeys: Vec<_> = src_keystores + .iter() + .filter(|v| !v.readonly.unwrap_or(true)) + .map(|v| v.validating_pubkey) + .collect(); + viable_pubkeys.sort_unstable_by_key(PublicKeyBytes::serialize); + viable_pubkeys + .get(0..count) + .ok_or_else(|| { + format!( + "Cannot move {} keystores since source validator client only has {} \ + keystores which are able to be moved (not read-only).", + count, + viable_pubkeys.len() + ) + })? + .to_vec() + } + Validators::Specific(request_pubkeys) => { + let request_pubkeys_set: HashSet<_> = request_pubkeys.iter().collect(); + let src_pubkeys_set: HashSet<_> = + src_keystores.iter().map(|v| &v.validating_pubkey).collect(); + let difference = request_pubkeys_set + .difference(&src_pubkeys_set) + .collect::>(); + if !difference.is_empty() { + for pk in &difference { + eprintln!("{:?} is not present on {:?}", pk, src_vc_url); + } + return Err(format!( + "{} validators not found on {:?}", + difference.len(), + src_vc_url + )); + } + request_pubkeys + } + }; + + let src_keystores_map: HashMap<_, _> = src_keystores + .iter() + .map(|k| (k.validating_pubkey, k)) + .collect(); + + let count = pubkeys_to_move.len(); + for (i, &pubkey_to_move) in pubkeys_to_move.iter().enumerate() { + // Skip read-only validators rather than exiting. This makes it a bit easier to use the + // "all" flag. + if src_keystores_map + .get(&pubkey_to_move) + .ok_or("Inconsistent src keystore map")? + .readonly + .unwrap_or(true) + { + eprintln!("Skipping read-only validator {:?}", pubkey_to_move); + } + + let request = DeleteKeystoresRequest { + pubkeys: vec![pubkey_to_move], + }; + let deleted = match src_http_client.delete_lighthouse_keystores(&request).await { + Ok(deleted) => deleted, + Err(e) => { + match src_http_client.get_keystores().await { + Ok(response) => { + if response + .data + .iter() + .any(|v| v.validating_pubkey == pubkey_to_move) + { + eprintln!( + "There was an error removing a validator, however the validator \ + is still present on the source validator client. The recommended \ + solution is to run this command again." + ); + } + } + Err(_) => { + eprintln!( + "There was an error removing a validator and it's unclear if \ + the validator was removed or not. Manual user intervention is \ + required." + ); + } + }; + + return Err(format!("Deleting {:?} failed with {:?}", pubkey_to_move, e)); + } + }; + + let ExportKeystoresResponse { + mut data, + slashing_protection, + } = deleted; + + if data.len() != 1 { + return Err(format!( + "Too many deleted validators from VC: {}", + data.len() + )); + } + + let exported_validator = data + .pop() + .ok_or("VC responded with zero deleted validators")?; + + let (voting_keystore, voting_keystore_password) = match exported_validator { + SingleExportKeystoresResponse { + status: + Status { + status: DeleteKeystoreStatus::Deleted, + message: _, + }, + validating_keystore, + validating_keystore_password, + } => match (validating_keystore, validating_keystore_password) { + (Some(keystore), Some(password)) => (keystore, password), + (Some(keystore), None) => { + eprintln!( + "Validator {:?} requires a password, please provide it to continue \ + moving validators. \ + The dest VC will store this password on its filesystem and the password \ + will not be required next time the dest VC starts. \ + If the provided password is incorrect the user will \ + be asked to provide another password. \ + Failing to provide the correct password now will \ + result in the keystore being deleted from the src VC \ + without being transfered to the dest VC. \ + It is strongly recommend to provide a password now rather than exiting.", + pubkey_to_move + ); + + // Read the password from the user, retrying if the password is incorrect. + loop { + match password_source.read_password(&pubkey_to_move) { + Ok(password) => { + if let Err(e) = keystore.decrypt_keypair(password.as_ref()) { + eprintln!("Failed to decrypt keystore: {:?}", e); + } else { + break (keystore, password); + } + } + Err(e) => { + eprintln!( + "Retrying after error: {:?}. If this error persists the user will need to \ + manually recover their keystore for validator {:?} from the mnemonic." + , + e, pubkey_to_move + ); + } + } + + // Add a sleep here to prevent spamming the console. + sleep(Duration::from_secs(1)).await; + } + } + (None, password_opt) => { + eprintln!( + "Validator {:?} was not moved since the validator client did \ + not return a keystore. It is likely that the \ + validator has been deleted from the source validator client \ + without being moved to the destination validator client. \ + This validator will most likely need to be manually recovered \ + from a mnemonic or backup.", + pubkey_to_move + ); + return Err(format!( + "VC returned deleted but keystore not present (password {})", + password_opt.is_some() + )); + } + }, + SingleExportKeystoresResponse { + status: Status { status, .. }, + .. + } if matches!( + status, + DeleteKeystoreStatus::NotFound | DeleteKeystoreStatus::NotActive + ) => + { + eprintln!( + "Validator {:?} was not moved since it was not found or not active. This scenario \ + is unexpected and might indicate that another process is also performing \ + an export from the source validator client. Exiting now for safety. \ + If there is definitely no other process exporting validators then it \ + may be safe to run this command again.", + pubkey_to_move + ); + return Err(format!( + "VC indicated that a previously known validator was {:?}", + status, + )); + } + SingleExportKeystoresResponse { + status: Status { status, message }, + .. + } => { + eprintln!( + "Validator {:?} was not moved because the source validator client \ + indicated there was an error disabling it. Manual intervention is \ + required to recover from this scenario.", + pubkey_to_move + ); + return Err(format!( + "VC returned status {:?} with message {:?}", + status, message + )); + } + }; + + let keystore_derivation_path = voting_keystore.0.path(); + + let validator_specification = ValidatorSpecification { + voting_keystore, + voting_keystore_password, + slashing_protection: Some(InterchangeJsonStr(slashing_protection)), + fee_recipient, + gas_limit, + builder_proposals, + // Allow the VC to choose a default "enabled" state. Since "enabled" is not part of + // the standard API, leaving this as `None` means we are not forced to use the + // non-standard API. + enabled: None, + }; + + // We might as well just ignore validators that already exist on the destination machine, + // there doesn't appear to be much harm just adding them again and removing them from the + // source VC is an improvement. + let ignore_duplicates = true; + + loop { + match validator_specification + .clone() + .upload(&dest_http_client, ignore_duplicates) + .await + { + Ok(status) => { + match status.status { + ImportKeystoreStatus::Imported => { + eprintln!("Moved keystore {} of {}", i + 1, count); + break; + } + ImportKeystoreStatus::Duplicate => { + eprintln!("Moved duplicate keystore {} of {} to the VC", i + 1, count); + break; + } + ImportKeystoreStatus::Error => { + eprintln!( + "Upload of keystore {} of {} failed with message: {:?}.", + i + 1, + count, + status.message, + ); + // Retry uploading this validator. + sleep_with_retry_message( + &pubkey_to_move, + keystore_derivation_path.as_deref(), + ) + .await; + } + } + } + e @ Err(UploadError::InvalidPublicKey) => { + eprintln!("Validator {} has an invalid public key", i); + return Err(format!("{:?}", e)); + } + Err(UploadError::DuplicateValidator(_)) => { + return Err( + "Duplicate validator detected when duplicates are ignored".to_string() + ); + } + Err(UploadError::FailedToListKeys(e)) => { + eprintln!( + "Failed to list keystores. Some keys may have been moved whilst \ + others may not. Error was {:?}", + e + ); + // Retry uploading this validator. + sleep_with_retry_message(&pubkey_to_move, keystore_derivation_path.as_deref()) + .await; + } + Err(UploadError::KeyUploadFailed(e)) => { + eprintln!( + "Failed to upload keystore. Some keys may have been moved whilst \ + others may not. Error was {:?}", + e + ); + // Retry uploading this validator. + sleep_with_retry_message(&pubkey_to_move, keystore_derivation_path.as_deref()) + .await; + } + Err(UploadError::IncorrectStatusCount(count)) => { + eprintln!( + "Keystore was uploaded, however the validator client returned an invalid response." + ); + return Err(format!( + "Invalid status count in import response: {}", + count + )); + } + Err(UploadError::FeeRecipientUpdateFailed(e)) => { + eprintln!( + "Failed to set fee recipient for validator {}. This value may need \ + to be set manually. Continuing with other validators. Error was {:?}", + i, e + ); + // Continue onto the next validator. + break; + } + Err(UploadError::PatchValidatorFailed(e)) => { + eprintln!( + "Failed to set some values on validator {} (e.g., builder, enabled or gas limit). \ + These values value may need to be set manually. Continuing with other validators. \ + Error was {:?}", + i, e + ); + // Continue onto the next validator. + break; + } + } + eprintln!( + "Uploaded keystore {} of {} to the destination VC", + i + 1, + count + ); + } + } + + eprintln!("Done."); + + Ok(()) +} + +async fn sleep_with_retry_message(pubkey: &PublicKeyBytes, path: Option<&str>) { + let path = path.unwrap_or(""); + eprintln!( + "Sleeping for {:?} before retrying. Exiting the application before it completes \ + may result in the loss of a validator keystore. The keystore would need to be \ + restored from a backup or mnemonic. The keystore which may be lost has a public \ + key of {:?} and a derivation path of {}", + UPLOAD_RETRY_WAIT, pubkey, path + ); + sleep(UPLOAD_RETRY_WAIT).await +} + +// The tests use crypto and are too slow in debug. +#[cfg(not(debug_assertions))] +#[cfg(test)] +mod test { + use super::*; + use crate::import_validators::tests::TestBuilder as ImportTestBuilder; + use account_utils::validator_definitions::SigningDefinition; + use std::fs; + use tempfile::{tempdir, TempDir}; + use validator_client::http_api::{test_utils::ApiTester, Config as HttpConfig}; + + const SRC_VC_TOKEN_FILE_NAME: &str = "src_vc_token.json"; + const DEST_VC_TOKEN_FILE_NAME: &str = "dest_vc_token.json"; + + type MutatePasswordFn = Box>)>; + + struct TestBuilder { + src_import_builder: Option, + dest_import_builder: Option, + http_config: HttpConfig, + duplicates: usize, + dir: TempDir, + move_back_again: bool, + remove_passwords_from_src_vc: bool, + mutate_passwords: Option, + passwords: HashMap>, + use_password_files: bool, + reuse_password_files: Option, + } + + impl TestBuilder { + async fn new() -> Self { + let dir = tempdir().unwrap(); + Self { + src_import_builder: None, + dest_import_builder: None, + http_config: ApiTester::default_http_config(), + duplicates: 0, + dir, + move_back_again: false, + remove_passwords_from_src_vc: false, + mutate_passwords: None, + passwords: <_>::default(), + use_password_files: false, + reuse_password_files: None, + } + } + + fn move_back_again(mut self) -> Self { + self.move_back_again = true; + self + } + + fn use_password_files(mut self) -> Self { + self.use_password_files = true; + self.http_config.store_passwords_in_secrets_dir = true; + self + } + + fn reuse_password_files(mut self, index: usize) -> Self { + self.reuse_password_files = Some(index); + self + } + + async fn with_src_validators(mut self, count: u32, first_index: u32) -> Self { + let builder = ImportTestBuilder::new_with_http_config(self.http_config.clone()) + .await + .create_validators(count, first_index) + .await; + self.src_import_builder = Some(builder); + self + } + + async fn with_dest_validators(mut self, count: u32, first_index: u32) -> Self { + let builder = ImportTestBuilder::new_with_http_config(self.http_config.clone()) + .await + .create_validators(count, first_index) + .await; + self.dest_import_builder = Some(builder); + self + } + + fn register_duplicates(mut self, num_duplicates: usize) -> Self { + self.duplicates = num_duplicates; + self + } + + fn remove_passwords_from_src_vc(mut self) -> Self { + self.remove_passwords_from_src_vc = true; + self + } + + fn mutate_passwords>) + 'static>( + mut self, + func: F, + ) -> Self { + self.mutate_passwords = Some(Box::new(func)); + self + } + + async fn move_validators( + &self, + gen_validators_enum: F, + src_vc: &ApiTester, + dest_vc: &ApiTester, + ) -> Result<(), String> + where + F: Fn(&[PublicKeyBytes]) -> Validators, + { + let src_vc_token_path = self.dir.path().join(SRC_VC_TOKEN_FILE_NAME); + fs::write(&src_vc_token_path, &src_vc.api_token).unwrap(); + let (src_vc_client, src_vc_initial_keystores) = + vc_http_client(src_vc.url.clone(), &src_vc_token_path) + .await + .unwrap(); + + let src_vc_initial_pubkeys: Vec<_> = src_vc_initial_keystores + .iter() + .map(|k| k.validating_pubkey) + .collect(); + let validators = gen_validators_enum(&src_vc_initial_pubkeys); + + let dest_vc_token_path = self.dir.path().join(DEST_VC_TOKEN_FILE_NAME); + fs::write(&dest_vc_token_path, &dest_vc.api_token).unwrap(); + + let (dest_vc_client, dest_vc_initial_keystores) = + vc_http_client(dest_vc.url.clone(), &dest_vc_token_path) + .await + .unwrap(); + + let move_config = MoveConfig { + src_vc_url: src_vc.url.clone(), + src_vc_token_path, + dest_vc_url: dest_vc.url.clone(), + dest_vc_token_path: dest_vc_token_path.clone(), + validators: validators.clone(), + builder_proposals: None, + fee_recipient: None, + gas_limit: None, + password_source: PasswordSource::Testing(self.passwords.clone()), + }; + + let result = run(move_config).await; + + if result.is_ok() { + let src_vc_final_keystores = src_vc_client.get_keystores().await.unwrap().data; + let dest_vc_final_keystores = dest_vc_client.get_keystores().await.unwrap().data; + + src_vc.ensure_key_cache_consistency().await; + dest_vc.ensure_key_cache_consistency().await; + + match validators { + Validators::All => { + assert!( + src_vc_final_keystores.is_empty(), + "all keystores should be removed from source vc" + ); + assert_eq!( + dest_vc_final_keystores.len(), + dest_vc_initial_keystores.len() + src_vc_initial_keystores.len() + - self.duplicates, + "the correct count of keystores should have been moved to the dest" + ); + for initial_keystore in &src_vc_initial_keystores { + assert!( + dest_vc_final_keystores.contains(initial_keystore), + "the source keystore should be present at the dest" + ); + assert!( + !src_vc + .secrets_dir + .path() + .join(format!("{:?}", initial_keystore.validating_pubkey)) + .exists(), + "the source password file should be deleted" + ) + } + } + Validators::Count(count) => { + assert_eq!( + src_vc_final_keystores.len(), + src_vc_initial_keystores.len() - count, + "keystores should be removed from source vc" + ); + assert_eq!( + dest_vc_final_keystores.len(), + dest_vc_initial_keystores.len() + count - self.duplicates, + "the correct count of keystores should have been moved to the dest" + ); + let moved_keystores: Vec<_> = { + let initial_set: HashSet<_> = src_vc_initial_keystores.iter().collect(); + let final_set: HashSet<_> = src_vc_final_keystores.iter().collect(); + initial_set.difference(&final_set).cloned().collect() + }; + assert_eq!(moved_keystores.len(), count); + for moved_keystore in &moved_keystores { + assert!( + dest_vc_final_keystores.contains(moved_keystore), + "the moved keystore should be present at the dest" + ); + assert!( + !src_vc + .secrets_dir + .path() + .join(format!("{:?}", moved_keystore.validating_pubkey)) + .exists(), + "the source password file should be deleted" + ) + } + } + Validators::Specific(pubkeys) => { + assert_eq!( + src_vc_final_keystores.len(), + src_vc_initial_keystores + .len() + .checked_sub(pubkeys.len()) + .unwrap(), + "the correct count of validators should have been removed from the src" + ); + assert_eq!( + dest_vc_final_keystores.len(), + dest_vc_initial_keystores.len() + pubkeys.len() - self.duplicates, + "the correct count of keystores should have been moved to the dest" + ); + for pubkey in pubkeys { + let initial_keystore = src_vc_initial_keystores + .iter() + .find(|k| k.validating_pubkey == pubkey) + .unwrap(); + assert!( + !src_vc_final_keystores.contains(initial_keystore), + "the keystore should not be present at the source" + ); + assert!( + dest_vc_final_keystores.contains(initial_keystore), + "the keystore should be present at the dest" + ); + if self.reuse_password_files.is_some() { + assert!( + src_vc + .secrets_dir + .path() + .join(format!("{:?}", pubkey)) + .exists(), + "the source password file was used by another validator and should not be deleted" + ) + } else { + assert!( + !src_vc + .secrets_dir + .path() + .join(format!("{:?}", pubkey)) + .exists(), + "the source password file should be deleted" + ) + } + } + } + } + + // If enabled, check that all VCs still have the password files for their validators. + if self.use_password_files { + src_vc_final_keystores + .iter() + .map(|keystore| (&src_vc, keystore)) + .chain( + dest_vc_final_keystores + .iter() + .map(|keystore| (&dest_vc, keystore)), + ) + .for_each(|(vc, keystore)| { + assert!( + vc.secrets_dir + .path() + .join(format!("{:?}", keystore.validating_pubkey)) + .exists(), + "the password file should exist" + ) + }); + } + } + + result + } + + async fn run_test(mut self, gen_validators_enum: F) -> TestResult + where + F: Fn(&[PublicKeyBytes]) -> Validators + Copy, + { + let src_vc = if let Some(import_builder) = self.src_import_builder.take() { + let import_test_result = import_builder.run_test().await; + assert!(import_test_result.result.is_ok()); + import_test_result.vc + } else { + ApiTester::new_with_http_config(self.http_config.clone()).await + }; + + // If enabled, set all the validator definitions on the src_vc to + // use the same password path as the given `master_index`. This + // helps test that we don't delete a password file if it's in use by + // another validator. + if let Some(primary_index) = self.reuse_password_files { + let mut initialized_validators = src_vc.initialized_validators.write(); + let definitions = initialized_validators.as_mut_slice_testing_only(); + // Find the path of the "primary" definition. + let primary_path = definitions + .get(primary_index) + .map(|def| match &def.signing_definition { + SigningDefinition::LocalKeystore { + voting_keystore_password_path: Some(path), + .. + } => path.clone(), + _ => panic!("primary index does not have password path"), + }) + .unwrap(); + // Set all definitions to use the same password path as the primary. + definitions.iter_mut().enumerate().for_each(|(_, def)| { + match &mut def.signing_definition { + SigningDefinition::LocalKeystore { + voting_keystore_password_path: Some(path), + .. + } => *path = primary_path.clone(), + _ => (), + } + }) + } + + let dest_vc = if let Some(import_builder) = self.dest_import_builder.take() { + let import_test_result = import_builder.run_test().await; + assert!(import_test_result.result.is_ok()); + import_test_result.vc + } else { + ApiTester::new_with_http_config(self.http_config.clone()).await + }; + + if self.remove_passwords_from_src_vc { + let passwords = src_vc + .initialized_validators + .write() + .delete_passwords_from_validator_definitions() + .unwrap(); + + self.passwords = passwords + .into_iter() + .map(|(pubkey, password)| { + ( + PublicKeyBytes::from(&pubkey), + vec![password.as_str().to_string()], + ) + }) + .collect(); + + if let Some(func) = self.mutate_passwords.take() { + func(&mut self.passwords) + } + } + + let result = self + .move_validators(gen_validators_enum, &src_vc, &dest_vc) + .await; + + if self.move_back_again { + self.move_validators(gen_validators_enum, &dest_vc, &src_vc) + .await + .unwrap(); + } + + TestResult { result } + } + } + + #[must_use] // Use the `assert_ok` or `assert_err` fns to "use" this value. + struct TestResult { + result: Result<(), String>, + } + + impl TestResult { + fn assert_ok(self) { + assert_eq!(self.result, Ok(())) + } + + fn assert_err(self) { + assert!(self.result.is_err()) + } + + fn assert_err_is(self, msg: String) { + assert_eq!(self.result, Err(msg)) + } + } + + #[tokio::test] + async fn no_validators() { + TestBuilder::new() + .await + .run_test(|_| Validators::All) + .await + .assert_err_is(NO_VALIDATORS_MSG.to_string()); + } + + #[tokio::test] + async fn one_validator_move_all() { + TestBuilder::new() + .await + .with_src_validators(1, 0) + .await + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + #[tokio::test] + async fn one_validator_move_one() { + TestBuilder::new() + .await + .with_src_validators(1, 0) + .await + .run_test(|pubkeys| Validators::Specific(pubkeys.to_vec())) + .await + .assert_ok(); + } + + #[tokio::test] + async fn one_validator_to_non_empty_dest() { + TestBuilder::new() + .await + .with_src_validators(1, 0) + .await + .with_dest_validators(1, 10) + .await + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + #[tokio::test] + async fn two_validators_move_all_where_one_is_a_duplicate() { + TestBuilder::new() + .await + .with_src_validators(2, 0) + .await + .with_dest_validators(1, 1) + .await + .register_duplicates(1) + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + #[tokio::test] + async fn two_validators_move_one_where_one_is_a_duplicate() { + TestBuilder::new() + .await + .with_src_validators(2, 0) + .await + .with_dest_validators(2, 0) + .await + .register_duplicates(1) + .run_test(|pubkeys| Validators::Specific(pubkeys[0..1].to_vec())) + .await + .assert_ok(); + } + + #[tokio::test] + async fn three_validators_move_all() { + TestBuilder::new() + .await + .with_src_validators(3, 0) + .await + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + #[tokio::test] + async fn three_validators_move_one() { + TestBuilder::new() + .await + .with_src_validators(3, 0) + .await + .run_test(|pubkeys| Validators::Specific(pubkeys[0..1].to_vec())) + .await + .assert_ok(); + } + + #[tokio::test] + async fn three_validators_move_two() { + TestBuilder::new() + .await + .with_src_validators(3, 0) + .await + .run_test(|pubkeys| Validators::Specific(pubkeys[0..2].to_vec())) + .await + .assert_ok(); + } + + #[tokio::test] + async fn three_validators_move_three() { + TestBuilder::new() + .await + .with_src_validators(3, 42) + .await + .run_test(|pubkeys| Validators::Specific(pubkeys.to_vec())) + .await + .assert_ok(); + } + + #[tokio::test] + async fn three_validators_move_one_by_count() { + TestBuilder::new() + .await + .with_src_validators(3, 0) + .await + .run_test(|_| Validators::Count(1)) + .await + .assert_ok(); + } + + #[tokio::test] + async fn three_validators_move_two_by_count() { + TestBuilder::new() + .await + .with_src_validators(3, 0) + .await + .run_test(|_| Validators::Count(2)) + .await + .assert_ok(); + } + + #[tokio::test] + async fn one_validators_move_two_by_count() { + TestBuilder::new() + .await + .with_src_validators(1, 0) + .await + .run_test(|_| Validators::Count(2)) + .await + .assert_err(); + } + + #[tokio::test] + async fn two_validator_move_all_and_back_again() { + TestBuilder::new() + .await + .with_src_validators(2, 0) + .await + .move_back_again() + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + #[tokio::test] + async fn two_validator_move_all_passwords_removed() { + TestBuilder::new() + .await + .with_src_validators(2, 0) + .await + .remove_passwords_from_src_vc() + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + /// This test simulates a src VC that doesn't know the keystore passwords + /// and provide the wrong password before providing the correct password. + #[tokio::test] + async fn two_validator_move_all_passwords_removed_failed_password_attempt() { + TestBuilder::new() + .await + .with_src_validators(2, 0) + .await + .remove_passwords_from_src_vc() + .mutate_passwords(|passwords| { + passwords.iter_mut().for_each(|(_, passwords)| { + passwords.insert(0, "wrong-password".to_string()); + passwords.push("wrong-password".to_string()); + }) + }) + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + /// This test simulates a src VC that doesn't know the keystore passwords + /// and we have not provided the correct password. + #[should_panic] + #[tokio::test] + async fn two_validator_move_all_passwords_removed_without_correct_password() { + TestBuilder::new() + .await + .with_src_validators(2, 0) + .await + .remove_passwords_from_src_vc() + .mutate_passwords(|passwords| { + passwords + .iter_mut() + .for_each(|(_, passwords)| *passwords = vec!["wrong-password".to_string()]) + }) + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + #[tokio::test] + async fn one_validator_move_all_with_password_files() { + TestBuilder::new() + .await + .use_password_files() + .with_src_validators(1, 0) + .await + .run_test(|_| Validators::All) + .await + .assert_ok(); + } + + #[tokio::test] + async fn two_validators_move_one_with_identical_password_files() { + TestBuilder::new() + .await + .use_password_files() + // The password file for validator 0 will be shared with other + // validators on the src vc. + .reuse_password_files(0) + .with_src_validators(2, 0) + .await + .run_test(|validators| Validators::Specific(validators[0..1].to_vec())) + .await + .assert_ok(); + } +} diff --git a/validator_manager/test_vectors/.gitignore b/validator_manager/test_vectors/.gitignore new file mode 100644 index 00000000000..3fec32c8427 --- /dev/null +++ b/validator_manager/test_vectors/.gitignore @@ -0,0 +1 @@ +tmp/ diff --git a/validator_manager/test_vectors/generate.py b/validator_manager/test_vectors/generate.py new file mode 100644 index 00000000000..722414de733 --- /dev/null +++ b/validator_manager/test_vectors/generate.py @@ -0,0 +1,123 @@ +# This script uses the `ethereum/staking-deposit-cli` tool to generate +# deposit data files which are then used for testing by Lighthouse. +# +# To generate vectors, simply run this Python script: +# +# `python generate.py` +# +import os +import sys +import shutil +import subprocess +from subprocess import Popen, PIPE, STDOUT + + +NUM_VALIDATORS=3 +TEST_MNEMONIC = "test test test test test test test test test test test waste" +WALLET_NAME="test_wallet" + + +tmp_dir = os.path.join(".", "tmp") +mnemonic_path = os.path.join(tmp_dir, "mnemonic.txt") +sdc_dir = os.path.join(tmp_dir, "sdc") +sdc_git_dir = os.path.join(sdc_dir, "staking-deposit-cli") +vectors_dir = os.path.join(".", "vectors") + + +def setup(): + cleanup() + + if os.path.exists(vectors_dir): + shutil.rmtree(vectors_dir) + + os.mkdir(tmp_dir) + os.mkdir(sdc_dir) + os.mkdir(vectors_dir) + + setup_sdc() + with open(mnemonic_path, "x") as file: + file.write(TEST_MNEMONIC) + + +def cleanup(): + if os.path.exists(tmp_dir): + shutil.rmtree(tmp_dir) + + # Remove all the keystores since we don't use them in testing. + if os.path.exists(vectors_dir): + for root, dirs, files in os.walk(vectors_dir): + for file in files: + if file.startswith("keystore"): + os.remove(os.path.join(root, file)) + + +def setup_sdc(): + result = subprocess.run([ + "git", + "clone", + "--single-branch", + "https://github.com/ethereum/staking-deposit-cli.git", + str(sdc_git_dir) + ]) + assert(result.returncode == 0) + result = subprocess.run([ + "pip", + "install", + "-r", + "requirements.txt", + ], cwd=sdc_git_dir) + assert(result.returncode == 0) + result = subprocess.run([ + "python", + "setup.py", + "install", + ], cwd=sdc_git_dir) + assert(result.returncode == 0) + + +def sdc_generate(network, first_index, count, eth1_withdrawal_address=None): + if eth1_withdrawal_address is not None: + eth1_flags = ['--eth1_withdrawal_address', eth1_withdrawal_address] + uses_eth1 = True + else: + eth1_flags = [] + uses_eth1 = False + + test_name = "{}_first_{}_count_{}_eth1_{}".format(network, first_index, count, + str(uses_eth1).lower()) + output_dir = os.path.join(vectors_dir, test_name) + os.mkdir(output_dir) + + command = [ + '/bin/sh', + 'deposit.sh', + '--language', 'english', + '--non_interactive', + 'existing-mnemonic', + '--validator_start_index', str(first_index), + '--num_validators', str(count), + '--mnemonic', TEST_MNEMONIC, + '--chain', network, + '--keystore_password', 'MyPassword', + '--folder', os.path.abspath(output_dir), + ] + eth1_flags + + print("Running " + test_name) + process = Popen(command, cwd=sdc_git_dir, text=True, stdin = PIPE) + process.wait() + + +def test_network(network): + sdc_generate(network, first_index=0, count=1) + sdc_generate(network, first_index=0, count=2) + sdc_generate(network, first_index=12, count=1) + sdc_generate(network, first_index=99, count=2) + sdc_generate(network, first_index=1024, count=3) + sdc_generate(network, first_index=0, count=2, + eth1_withdrawal_address="0x0f51bb10119727a7e5ea3538074fb341f56b09ad") + + +setup() +test_network("mainnet") +test_network("prater") +cleanup() diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json new file mode 100644 index 00000000000..31c00c57f24 --- /dev/null +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json new file mode 100644 index 00000000000..2880b7724cf --- /dev/null +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "84b9fc8f260a1488c4c9a438f875edfa2bac964d651b2bc886d8442829b13f89752e807c8ca9bae9d50b1b506d3a64730015dd7f91e271ff9c1757d1996dcf6082fe5205cf6329fa2b6be303c21b66d75be608757a123da6ee4a4f14c01716d7", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "cd991ea8ff32e6b3940aed43b476c720fc1abd3040893b77a8a3efb306320d4c", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json new file mode 100644 index 00000000000..da92a1d0d94 --- /dev/null +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "a8461b58a5a5a0573c4af37da6ee4ba63e35894cffad6797d4a2c80f8f2c79d2c30c0de0299d8edde76e0c3f3e6d4f1e03cc377969f56d8760717d6e86f9316da9375573ce7bb87a8520daedb13c49284377f7a4f64a70aa2ca44b1581d47e20", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "d26d642a880ff8a109260fe69681840f6e1868c8c1cd2163a1db5a094e8db03a", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "93a398c09143203beb94c9223c7e18f36e5ea36090875284b222c2fcb16982e6f2e26f27ca9d30e3c6f6b5ad44857fc50f531925f4736810712f68a9d7a9c0eb664a851180f3b7d2e44a35717d43b3d3e4fd555354fa1dfa92f451870f36084d", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "7c7617a2c11870ec49e975b3691b9f822d63938df38555161e23aa245b150c66", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json b/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json new file mode 100644 index 00000000000..9cc01dc0df7 --- /dev/null +++ b/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json @@ -0,0 +1 @@ +[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "a0a96851892b257c032284928641021e58e0bcd277c3da5a2c41bcce6633d144781e4761261138277b5a8cf0ead59cce073e5a3bbc4704a37abf8cd1e290dc52e56cb0c334303945ebbb79be453c8177937e44e08f980679f1a2997fe58d2d86", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "2bedaf48f8315d8631defc97c1c4c05a8152e2dc3fe779fc8e800dd67bd839a2", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "b469179ad8ba9d6ad71b99a3c7ae662d9b77cca3ee53b20ab2eb20beee31874ad47224e94e75578fa6ecd30c1d40a0b300053817f934169d84425691edf13216445fbc6dd9b0953ad3af20c834fba63c1f50c0b0f92dd8bf383cd2cc8e0431f1", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "69862477671957ab0b3f1167c5cd550c107132a0079eb70eaa4bc5c5fe06b5a0", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "a8b05626657ce5b1801e0824aaeb21de2e1a11bc16cad6100ac911bcb873aaf7e7282f1f8465df4aaea998a1a4e1645f075e7e65f8c6b8688b0162f86be2128541f91fc9feb628bcab3b4afec1f7aeccaba04aaa54dc17c738233d360f94b97e", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "34ef32901d793cd9a0a3d93e7ee40e7be9abe6fb26f0b49a86b8ff29dc649930", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json b/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json new file mode 100644 index 00000000000..3a971d0959a --- /dev/null +++ b/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json @@ -0,0 +1 @@ +[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "a57299cde3c2ea8dc17ad3ce5a38a5f6de69d198599150dc4df02624ba1d8672440d02c0d27c3dc3b8c9f86c679571ab14c798426acd9b059895f1f5887bdee805fb4e31bd8f93ec9e78403c23d7924f23eae6af056154f35fee03bf9ffe0e98", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "246619823b45d80f53a30404542ec4be447d4e268cc0afcdf480e6a846d58411", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json b/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json new file mode 100644 index 00000000000..2efa5c4ec8c --- /dev/null +++ b/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json @@ -0,0 +1 @@ +[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "8ca8a6f30b4346d7b9912e3dcd820652bc472511f89d91fd102acfb0c8df1cfc7a2629f44170727e126e88f2847fe5c9081b13fb0838a2b2343a95cabf16f57708fc0cf846bc5307209ae976c34500cc826ff48ab64169d8bebec99dded5dd1d", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "c0c6cd40b43ea0fe7fcc284de9acd9c1bd001bb88c059c155393af22a6c85d46", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "8c0784645c611b4f514a6519b737f2d02df3eba0e04cd30efebffcca769af8cc599ce28e4421cefe665ec31d3c34e44c174e0cca4891d8196796085e712459b45e411efecd07cf3258f1d6309a07a6dd52a0ae186e6184d37bf11cee36ec84e8", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "c57790b77ef97318d4ec7b97ea07ea458d08209ba372bfe76171e2ece22d6130", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json b/validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json new file mode 100644 index 00000000000..c736d75b7e9 --- /dev/null +++ b/validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "a940e0142ad9b56a1310326137347d1ada275b31b3748af4accc63bd189573376615be8e8ae047766c6d10864e54b2e7098177598edf3a043eb560bbdf1a1c12588375a054d1323a0900e2286d0993cde9675e5b74523e6e8e03715cc96b3ce5", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "28484efb20c961a1354689a556d4c352fe9deb24684efdb32d22e1af17e2a45d", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json b/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json new file mode 100644 index 00000000000..e86500d14f2 --- /dev/null +++ b/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "a940e0142ad9b56a1310326137347d1ada275b31b3748af4accc63bd189573376615be8e8ae047766c6d10864e54b2e7098177598edf3a043eb560bbdf1a1c12588375a054d1323a0900e2286d0993cde9675e5b74523e6e8e03715cc96b3ce5", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "28484efb20c961a1354689a556d4c352fe9deb24684efdb32d22e1af17e2a45d", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "87b4b4e9c923aa9e1687219e9df0e838956ee6e15b7ab18142467430d00940dc7aa243c9996e85125dfe72d9dbdb00a30a36e16a2003ee0c86f29c9f5d74f12bfe5b7f62693dbf5187a093555ae8d6b48acd075788549c4b6a249b397af24cd0", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "ea80b639356a03f6f58e4acbe881fabefc9d8b93375a6aa7e530c77d7e45d3e4", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json b/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json new file mode 100644 index 00000000000..c79ae5a4fc0 --- /dev/null +++ b/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "ab32595d8201c2b4e8173aece9151fdc15f4d2ad36008462d0416598ddbf0f37ed0877f06d284a9669e73dbc0885bd2207fe64385e95a4488dc2bcb2c324d5c20da3248a6244463583dfbba8db20805765421e59cb56b0bc3ee6d24a9218216d", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "b4df3a3a26dd5f6eb32999d8a7051a7d1a8573a16553d4b45ee706a0d59c1066", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "9655e195eda5517efe6f36bcebd45250c889a4177d7bf5fcd59598d2d03f37f038b5ee2ec079a30a8382ea42f351943f08a6f006bab9c2130db2742bd7315c8ad5aa1f03a0801c26d4c9efdef71c4c59c449c7f9b21fa62600ab8f5f1e2b938a", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "7661474fba11bfb453274f62df022cab3c0b6f4a58af4400f6bce83c9cb5fcb8", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json b/validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json new file mode 100644 index 00000000000..136dc38554c --- /dev/null +++ b/validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json @@ -0,0 +1 @@ +[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "b5dae79ce8f3d7326b46f93182981c5f3d64257a457f038caa78ec8e5cc25a9fdac52c7beb221ab2a3205404131366ad18e1e13801393b3d486819e8cca96128bf1244884a91d05dced092c74bc1e7259788f30dd3432df15f3d2f629645f345", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "94213d76aba9e6a434589d4939dd3764e0832df78f66d30db22a760c14ba1b89", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "816f38a321c4f84ad5187eda58f6d9c1fd1e81c860ed1722bdb76b920fdd430a1e814b9bb893837ae3b38ad738684fbf1795fa687f617c52121472b1ac8d2e34e5c1127186233a8833ffb54c509d9e52cb7242c6c6a65b5e496296b3caa90d89", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "7ad1d059d69794680a1deef5e72c33827f0c449a5f0917095821c0343572789d", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "95d20c35484dea6b2a0bd7c2da2d2e810d7829e14c03657b2524adfc2111aa5ed95908ecb975ff75ff742c68ce8df417016c048959b0f807675430f6d981478e26d48e594e0830a0406da9817f8a1ecb94bd8be1f9281eeb5e952a82173c72bb", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "83abfb2a166f7af708526a9bdd2767c4be3cd231c9bc4e2f047a80df88a2860c", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json b/validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json new file mode 100644 index 00000000000..ccd2ece0699 --- /dev/null +++ b/validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json @@ -0,0 +1 @@ +[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "8f75836ceb390dd4fc8c16bc4be52ca09b9c5aa0ab5bc16dcfdb344787b29ddfd76d877b0a2330bc8e904b233397c6bd124845d1b868e4951cb6daacea023c986bdf0c6ac28d73f65681d941ea96623bc23acc7c84dcfc1304686240d9171cfc", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "3011f5cac32f13e86ecc061e89ed6675c27a46ab6ecb1ec6f6e5f133ae1d0287", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json b/validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json new file mode 100644 index 00000000000..2ab5908307b --- /dev/null +++ b/validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json @@ -0,0 +1 @@ +[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "a7706e102bfb0b986a5c8050044f7e221919463149771a92c3ca46ff7d4564867db48eaf89b5237fed8db2cdb9c9c057099d0982bbdb3fbfcbe0ab7259ad3f31f7713692b78ee25e6251982e7081d049804632b70b8a24d8c3e59b624a0bd221", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "8a26fbee0c3a99fe090af1fce68afc525b4e7efa70df72abaa91f29148b2f672", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "8b7aa5b0e97d15ec8c2281b919fde9e064f6ac064b163445ea99441ab063f9d10534bfde861b5606021ae46614ff075e0c2305ce5a6cbcc9f0bc8e7df1a177c4d969a5ed4ac062b0ea959bdac963fe206b73565a1a3937adcca736c6117c15f0", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "d38575167a94b516455c5b7e36d24310a612fa0f4580446c5f9d45e4e94f0642", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file From d401633100254d8f9503a34f301c75576dc68ef7 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 8 Aug 2023 17:25:29 +1000 Subject: [PATCH 19/20] Add same error handling for blob signing when pubkey is missing --- validator_client/src/block_service.rs | 31 +++++++++++++++++++++------ 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index ae08a22d50e..e59838991cb 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -522,16 +522,33 @@ impl BlockService { }; let maybe_signed_blobs = match maybe_blob_sidecars { - Some(blob_sidecars) => Some( - // TODO(jimmy): add same error handling as block signing, i.e. handle ValidatorStoreError::UnknownPubkey - self_ref + Some(blob_sidecars) => { + match self_ref .validator_store .sign_blobs(*validator_pubkey_ref, blob_sidecars) .await - .map_err(|e| { - BlockError::Recoverable(format!("Unable to sign blob: {:?}", e)) - })?, - ), + { + Ok(signed_blobs) => Some(signed_blobs), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently removed + // via the API. + warn!( + log, + "Missing pubkey for blobs"; + "info" => "a validator may have recently been removed from this VC", + "pubkey" => ?pubkey, + "slot" => ?slot + ); + return Ok(()); + } + Err(e) => { + return Err(BlockError::Recoverable(format!( + "Unable to sign blobs: {:?}", + e + ))) + } + } + } None => None, }; let signing_time_ms = From 3ba90474375197281f58c06196fb9c45af02c320 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 8 Aug 2023 17:33:25 +1000 Subject: [PATCH 20/20] Fix release tests --- beacon_node/http_api/tests/broadcast_validation_tests.rs | 5 +++-- beacon_node/http_api/tests/tests.rs | 6 +++++- beacon_node/network/src/network_beacon_processor/tests.rs | 2 +- common/eth2/src/lib.rs | 2 +- common/eth2/src/types.rs | 3 ++- 5 files changed, 12 insertions(+), 6 deletions(-) diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 8609938e364..d6287038e49 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -210,7 +210,8 @@ pub async fn gossip_full_pass_ssz() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + let ((block, _), _): ((SignedBeaconBlock, _), _) = + tester.harness.make_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client @@ -906,7 +907,7 @@ pub async fn blinded_gossip_full_pass_ssz() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBlindedBeaconBlock, _) = + let ((block, _), _): ((SignedBlindedBeaconBlock, _), _) = tester.harness.make_blinded_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 89c76beaa2a..992eb92b196 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1307,7 +1307,11 @@ impl ApiTester { .await .0; - assert!(self.client.post_beacon_blocks_ssz(&block).await.is_err()); + assert!(self + .client + .post_beacon_blocks_ssz(&SignedBlockContents::from(block)) + .await + .is_err()); assert!( self.network_rx.network_recv.recv().await.is_some(), diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 1f8ff010636..ebe5bd12388 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -343,7 +343,7 @@ impl TestRig { self.network_beacon_processor .send_blobs_by_range_request( PeerId::random(), - (ConnectionId::new(42), SubstreamId::new(24)), + (ConnectionId::new_unchecked(42), SubstreamId::new(24)), BlobsByRangeRequest { start_slot: 0, count, diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index ec4c7d63f38..820ca23bdb9 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -707,7 +707,7 @@ impl BeaconNodeHttpClient { /// Returns `Ok(None)` on a 404 error. pub async fn post_beacon_blocks_ssz>( &self, - block: &SignedBeaconBlock, + block: &SignedBlockContents, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 7851b590db4..60ee3041037 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1424,9 +1424,10 @@ pub type BlockContentsTuple = ( ); /// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobSidecars`]. -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Encode, Serialize, Deserialize)] #[serde(untagged)] #[serde(bound = "T: EthSpec")] +#[ssz(enum_behaviour = "transparent")] pub enum SignedBlockContents = FullPayload> { BlockAndBlobSidecars(SignedBeaconBlockAndBlobSidecars), Block(SignedBeaconBlock),