Skip to content

Commit

Permalink
remove generic E from unrequired types
Browse files Browse the repository at this point in the history
  • Loading branch information
jxs committed Dec 18, 2024
1 parent 0ecd6c3 commit b01c568
Show file tree
Hide file tree
Showing 29 changed files with 218 additions and 255 deletions.
27 changes: 6 additions & 21 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 0 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,6 @@ members = [

"validator_client",
"validator_client/beacon_node_fallback",
"validator_client/doppelganger_service",
"validator_client/graffiti_file",
"validator_client/http_api",
"validator_client/http_metrics",
Expand Down Expand Up @@ -215,7 +214,6 @@ clap_utils = { path = "common/clap_utils" }
compare_fields = { path = "common/compare_fields" }
deposit_contract = { path = "common/deposit_contract" }
directory = { path = "common/directory" }
doppelganger_service = { path = "validator_client/doppelganger_service" }
validator_services = { path = "validator_client/validator_services" }
environment = { path = "lighthouse/environment" }
eth1 = { path = "beacon_node/eth1" }
Expand Down
30 changes: 20 additions & 10 deletions testing/web3signer_tests/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ mod tests {

/// A testing rig which holds a `ValidatorStore`.
struct ValidatorStoreRig {
validator_store: Arc<ValidatorStore<TestingSlotClock, E>>,
validator_store: Arc<ValidatorStore<TestingSlotClock>>,
_validator_dir: TempDir,
runtime: Arc<tokio::runtime::Runtime>,
_runtime_shutdown: async_channel::Sender<()>,
Expand Down Expand Up @@ -361,7 +361,7 @@ mod tests {
..Default::default()
};

let validator_store = ValidatorStore::<_, E>::new(
let validator_store = ValidatorStore::<_>::new(
initialized_validators,
slashing_protection,
Hash256::repeat_byte(42),
Expand All @@ -370,6 +370,7 @@ mod tests {
slot_clock,
&config,
executor,
E::slots_per_epoch(),
log.clone(),
);

Expand Down Expand Up @@ -486,7 +487,7 @@ mod tests {
generate_sig: F,
) -> Self
where
F: Fn(PublicKeyBytes, Arc<ValidatorStore<TestingSlotClock, E>>) -> R,
F: Fn(PublicKeyBytes, Arc<ValidatorStore<TestingSlotClock>>) -> R,
R: Future<Output = S>,
// We use the `SignedObject` trait to white-list objects for comparison. This avoids
// accidentally comparing something meaningless like a `()`.
Expand Down Expand Up @@ -521,7 +522,7 @@ mod tests {
web3signer_should_sign: bool,
) -> Self
where
F: Fn(PublicKeyBytes, Arc<ValidatorStore<TestingSlotClock, E>>) -> R,
F: Fn(PublicKeyBytes, Arc<ValidatorStore<TestingSlotClock>>) -> R,
R: Future<Output = Result<(), ValidatorStoreError>>,
{
for validator_rig in &self.validator_rigs {
Expand Down Expand Up @@ -588,7 +589,7 @@ mod tests {
.await
.assert_signatures_match("randao_reveal", |pubkey, validator_store| async move {
validator_store
.randao_reveal(pubkey, Epoch::new(0))
.randao_reveal::<E>(pubkey, Epoch::new(0))
.await
.unwrap()
})
Expand Down Expand Up @@ -629,7 +630,7 @@ mod tests {
.await
.assert_signatures_match("selection_proof", |pubkey, validator_store| async move {
validator_store
.produce_selection_proof(pubkey, Slot::new(0))
.produce_selection_proof::<E>(pubkey, Slot::new(0))
.await
.unwrap()
})
Expand All @@ -639,7 +640,7 @@ mod tests {
|pubkey, validator_store| async move {
let val_reg_data = get_validator_registration(pubkey);
validator_store
.sign_validator_registration_data(val_reg_data)
.sign_validator_registration_data::<E>(val_reg_data)
.await
.unwrap()
},
Expand Down Expand Up @@ -679,7 +680,11 @@ mod tests {
"sync_selection_proof",
|pubkey, validator_store| async move {
validator_store
.produce_sync_selection_proof(&pubkey, altair_fork_slot, SyncSubnetId::from(0))
.produce_sync_selection_proof::<E>(
&pubkey,
altair_fork_slot,
SyncSubnetId::from(0),
)
.await
.unwrap()
},
Expand All @@ -689,7 +694,12 @@ mod tests {
"sync_committee_signature",
|pubkey, validator_store| async move {
validator_store
.produce_sync_committee_signature(altair_fork_slot, Hash256::zero(), 0, &pubkey)
.produce_sync_committee_signature::<E>(
altair_fork_slot,
Hash256::zero(),
0,
&pubkey,
)
.await
.unwrap()
},
Expand Down Expand Up @@ -722,7 +732,7 @@ mod tests {
|pubkey, validator_store| async move {
let val_reg_data = get_validator_registration(pubkey);
validator_store
.sign_validator_registration_data(val_reg_data)
.sign_validator_registration_data::<E>(val_reg_data)
.await
.unwrap()
},
Expand Down
1 change: 0 additions & 1 deletion validator_client/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ beacon_node_fallback = { workspace = true }
clap = { workspace = true }
clap_utils = { workspace = true }
directory = { workspace = true }
doppelganger_service = { workspace = true }
dirs = { workspace = true }
eth2 = { workspace = true }
environment = { workspace = true }
Expand Down
47 changes: 23 additions & 24 deletions validator_client/beacon_node_fallback/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ use std::cmp::Ordering;
use std::fmt;
use std::fmt::Debug;
use std::future::Future;
use std::marker::PhantomData;
use std::sync::Arc;
use std::time::{Duration, Instant};
use strum::{EnumString, EnumVariantNames};
Expand Down Expand Up @@ -60,7 +59,7 @@ pub struct LatencyMeasurement {
/// See `SLOT_LOOKAHEAD` for information about when this should run.
pub fn start_fallback_updater_service<T: SlotClock + 'static, E: EthSpec>(
context: RuntimeContext<E>,
beacon_nodes: Arc<BeaconNodeFallback<T, E>>,
beacon_nodes: Arc<BeaconNodeFallback<T>>,
) -> Result<(), &'static str> {
let executor = context.executor;
if beacon_nodes.slot_clock.is_none() {
Expand All @@ -69,7 +68,7 @@ pub fn start_fallback_updater_service<T: SlotClock + 'static, E: EthSpec>(

let future = async move {
loop {
beacon_nodes.update_all_candidates().await;
beacon_nodes.update_all_candidates::<E>().await;

let sleep_time = beacon_nodes
.slot_clock
Expand Down Expand Up @@ -184,29 +183,27 @@ impl Serialize for CandidateInfo {
/// Represents a `BeaconNodeHttpClient` inside a `BeaconNodeFallback` that may or may not be used
/// for a query.
#[derive(Clone, Debug)]
pub struct CandidateBeaconNode<E> {
pub struct CandidateBeaconNode {
pub index: usize,
pub beacon_node: BeaconNodeHttpClient,
pub health: Arc<RwLock<Result<BeaconNodeHealth, CandidateError>>>,
_phantom: PhantomData<E>,
}

impl<E: EthSpec> PartialEq for CandidateBeaconNode<E> {
impl PartialEq for CandidateBeaconNode {
fn eq(&self, other: &Self) -> bool {
self.index == other.index && self.beacon_node == other.beacon_node
}
}

impl<E: EthSpec> Eq for CandidateBeaconNode<E> {}
impl Eq for CandidateBeaconNode {}

impl<E: EthSpec> CandidateBeaconNode<E> {
impl CandidateBeaconNode {
/// Instantiate a new node.
pub fn new(beacon_node: BeaconNodeHttpClient, index: usize) -> Self {
Self {
index,
beacon_node,
health: Arc::new(RwLock::new(Err(CandidateError::Uninitialized))),
_phantom: PhantomData,
}
}

Expand All @@ -215,14 +212,14 @@ impl<E: EthSpec> CandidateBeaconNode<E> {
*self.health.read().await
}

pub async fn refresh_health<T: SlotClock>(
pub async fn refresh_health<E: EthSpec, T: SlotClock>(
&self,
distance_tiers: &BeaconNodeSyncDistanceTiers,
slot_clock: Option<&T>,
spec: &ChainSpec,
log: &Logger,
) -> Result<(), CandidateError> {
if let Err(e) = self.is_compatible(spec, log).await {
if let Err(e) = self.is_compatible::<E>(spec, log).await {
*self.health.write().await = Err(e);
return Err(e);
}
Expand Down Expand Up @@ -286,7 +283,11 @@ impl<E: EthSpec> CandidateBeaconNode<E> {
}

/// Checks if the node has the correct specification.
async fn is_compatible(&self, spec: &ChainSpec, log: &Logger) -> Result<(), CandidateError> {
async fn is_compatible<E: EthSpec>(
&self,
spec: &ChainSpec,
log: &Logger,
) -> Result<(), CandidateError> {
let config = self
.beacon_node
.get_config_spec::<ConfigSpec>()
Expand Down Expand Up @@ -371,18 +372,18 @@ impl<E: EthSpec> CandidateBeaconNode<E> {
/// behaviour, where the failure of one candidate results in the next candidate receiving an
/// identical query.
#[derive(Clone, Debug)]
pub struct BeaconNodeFallback<T, E> {
pub candidates: Arc<RwLock<Vec<CandidateBeaconNode<E>>>>,
pub struct BeaconNodeFallback<T> {
pub candidates: Arc<RwLock<Vec<CandidateBeaconNode>>>,
distance_tiers: BeaconNodeSyncDistanceTiers,
slot_clock: Option<T>,
broadcast_topics: Vec<ApiTopic>,
spec: Arc<ChainSpec>,
log: Logger,
}

impl<T: SlotClock, E: EthSpec> BeaconNodeFallback<T, E> {
impl<T: SlotClock> BeaconNodeFallback<T> {
pub fn new(
candidates: Vec<CandidateBeaconNode<E>>,
candidates: Vec<CandidateBeaconNode>,
config: Config,
broadcast_topics: Vec<ApiTopic>,
spec: Arc<ChainSpec>,
Expand Down Expand Up @@ -466,15 +467,15 @@ impl<T: SlotClock, E: EthSpec> BeaconNodeFallback<T, E> {
/// It is possible for a node to return an unsynced status while continuing to serve
/// low quality responses. To route around this it's best to poll all connected beacon nodes.
/// A previous implementation of this function polled only the unavailable BNs.
pub async fn update_all_candidates(&self) {
pub async fn update_all_candidates<E: EthSpec>(&self) {
// Clone the vec, so we release the read lock immediately.
// `candidate.health` is behind an Arc<RwLock>, so this would still allow us to mutate the values.
let candidates = self.candidates.read().await.clone();
let mut futures = Vec::with_capacity(candidates.len());
let mut nodes = Vec::with_capacity(candidates.len());

for candidate in candidates.iter() {
futures.push(candidate.refresh_health(
futures.push(candidate.refresh_health::<E, T>(
&self.distance_tiers,
self.slot_clock.as_ref(),
&self.spec,
Expand Down Expand Up @@ -693,7 +694,7 @@ impl<T: SlotClock, E: EthSpec> BeaconNodeFallback<T, E> {
}

/// Helper functions to allow sorting candidate nodes by health.
async fn sort_nodes_by_health<E: EthSpec>(nodes: &mut Vec<CandidateBeaconNode<E>>) {
async fn sort_nodes_by_health(nodes: &mut Vec<CandidateBeaconNode>) {
// Fetch all health values.
let health_results: Vec<Result<BeaconNodeHealth, CandidateError>> =
future::join_all(nodes.iter().map(|node| node.health())).await;
Expand All @@ -711,7 +712,7 @@ async fn sort_nodes_by_health<E: EthSpec>(nodes: &mut Vec<CandidateBeaconNode<E>
});

// Reorder candidates based on the sorted indices.
let sorted_nodes: Vec<CandidateBeaconNode<E>> = indices_with_health
let sorted_nodes: Vec<CandidateBeaconNode> = indices_with_health
.into_iter()
.map(|(index, _)| nodes[index].clone())
.collect();
Expand Down Expand Up @@ -743,9 +744,7 @@ mod tests {
use eth2::Timeouts;
use std::str::FromStr;
use strum::VariantNames;
use types::{MainnetEthSpec, Slot};

type E = MainnetEthSpec;
use types::Slot;

#[test]
fn api_topic_all() {
Expand All @@ -764,7 +763,7 @@ mod tests {
let optimistic_status = IsOptimistic::No;
let execution_status = ExecutionEngineHealth::Healthy;

fn new_candidate(index: usize) -> CandidateBeaconNode<E> {
fn new_candidate(index: usize) -> CandidateBeaconNode {
let beacon_node = BeaconNodeHttpClient::new(
SensitiveUrl::parse(&format!("http://example_{index}.com")).unwrap(),
Timeouts::set_all(Duration::from_secs(index as u64)),
Expand Down
20 changes: 0 additions & 20 deletions validator_client/doppelganger_service/Cargo.toml

This file was deleted.

1 change: 0 additions & 1 deletion validator_client/http_api/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ bls = { workspace = true }
beacon_node_fallback = { workspace = true }
deposit_contract = { workspace = true }
directory = { workspace = true }
doppelganger_service = { workspace = true }
dirs = { workspace = true }
graffiti_file = { workspace = true }
eth2 = { workspace = true }
Expand Down
Loading

0 comments on commit b01c568

Please sign in to comment.