Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Delete legacy payload reconstruction #6213

Merged
merged 5 commits into from
Sep 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
147 changes: 1 addition & 146 deletions beacon_node/beacon_chain/src/beacon_block_streamer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -710,10 +710,8 @@ impl From<Error> for BeaconChainError {
mod tests {
use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches};
use crate::test_utils::{test_spec, BeaconChainHarness, EphemeralHarnessType};
use execution_layer::test_utils::{Block, DEFAULT_ENGINE_CAPABILITIES};
use execution_layer::EngineCapabilities;
use execution_layer::test_utils::Block;
use std::sync::LazyLock;
use std::time::Duration;
use tokio::sync::mpsc;
use types::{
ChainSpec, Epoch, EthSpec, FixedBytesExtended, Hash256, Keypair, MinimalEthSpec, Slot,
Expand Down Expand Up @@ -864,147 +862,4 @@ mod tests {
}
}
}

#[tokio::test]
async fn check_fallback_altair_to_electra() {
jimmygchen marked this conversation as resolved.
Show resolved Hide resolved
let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize;
let num_epochs = 10;
let bellatrix_fork_epoch = 2usize;
let capella_fork_epoch = 4usize;
let deneb_fork_epoch = 6usize;
let electra_fork_epoch = 8usize;
let num_blocks_produced = num_epochs * slots_per_epoch;

let mut spec = test_spec::<MinimalEthSpec>();
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64));
spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64));
spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64));
spec.electra_fork_epoch = Some(Epoch::new(electra_fork_epoch as u64));

let harness = get_harness(VALIDATOR_COUNT, spec);

// modify execution engine so it doesn't support engine_payloadBodiesBy* methods
let mock_execution_layer = harness.mock_execution_layer.as_ref().unwrap();
mock_execution_layer
.server
.set_engine_capabilities(EngineCapabilities {
get_payload_bodies_by_hash_v1: false,
get_payload_bodies_by_range_v1: false,
..DEFAULT_ENGINE_CAPABILITIES
});
// refresh capabilities cache
harness
.chain
.execution_layer
.as_ref()
.unwrap()
.get_engine_capabilities(Some(Duration::ZERO))
.await
.unwrap();

// go to bellatrix fork
harness
.extend_slots(bellatrix_fork_epoch * slots_per_epoch)
.await;
// extend half an epoch
harness.extend_slots(slots_per_epoch / 2).await;
// trigger merge
harness
.execution_block_generator()
.move_to_terminal_block()
.expect("should move to terminal block");
let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot;
harness
.execution_block_generator()
.modify_last_block(|block| {
if let Block::PoW(terminal_block) = block {
terminal_block.timestamp = timestamp;
}
});
// finish out merge epoch
harness.extend_slots(slots_per_epoch / 2).await;
// finish rest of epochs
harness
.extend_slots((num_epochs - 1 - bellatrix_fork_epoch) * slots_per_epoch)
.await;

let head = harness.chain.head_snapshot();
let state = &head.beacon_state;

assert_eq!(
state.slot(),
Slot::new(num_blocks_produced as u64),
"head should be at the current slot"
);
assert_eq!(
state.current_epoch(),
num_blocks_produced as u64 / MinimalEthSpec::slots_per_epoch(),
"head should be at the expected epoch"
);
assert_eq!(
state.current_justified_checkpoint().epoch,
state.current_epoch() - 1,
"the head should be justified one behind the current epoch"
);
assert_eq!(
state.finalized_checkpoint().epoch,
state.current_epoch() - 2,
"the head should be finalized two behind the current epoch"
);

let block_roots: Vec<Hash256> = harness
.chain
.forwards_iter_block_roots(Slot::new(0))
.expect("should get iter")
.map(Result::unwrap)
.map(|(root, _)| root)
.collect();

let mut expected_blocks = vec![];
// get all blocks the old fashioned way
for root in &block_roots {
let block = harness
.chain
.get_block(root)
.await
.expect("should get block")
.expect("block should exist");
expected_blocks.push(block);
}

for epoch in 0..num_epochs {
let start = epoch * slots_per_epoch;
let mut epoch_roots = vec![Hash256::zero(); slots_per_epoch];
epoch_roots[..].clone_from_slice(&block_roots[start..(start + slots_per_epoch)]);
let streamer = BeaconBlockStreamer::new(&harness.chain, CheckCaches::No)
.expect("should create streamer");
let (block_tx, mut block_rx) = mpsc::unbounded_channel();
streamer.stream(epoch_roots.clone(), block_tx).await;

for (i, expected_root) in epoch_roots.into_iter().enumerate() {
let (found_root, found_block_result) =
block_rx.recv().await.expect("should get block");

assert_eq!(
found_root, expected_root,
"expected block root should match"
);
match found_block_result.as_ref() {
Ok(maybe_block) => {
let found_block = maybe_block.clone().expect("should have a block");
let expected_block = expected_blocks
.get(start + i)
.expect("should get expected block");
assert_eq!(
found_block.as_ref(),
expected_block,
"expected block should match found block"
);
}
Err(e) => panic!("Error retrieving block {}: {:?}", expected_root, e),
}
}
}
}
}
45 changes: 0 additions & 45 deletions beacon_node/beacon_chain/src/bellatrix_readiness.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
use crate::{BeaconChain, BeaconChainError as Error, BeaconChainTypes};
use execution_layer::BlockByNumberQuery;
use serde::{Deserialize, Serialize, Serializer};
use slog::debug;
use std::fmt;
use std::fmt::Write;
use types::*;
Expand Down Expand Up @@ -199,7 +198,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
else {
return Ok(GenesisExecutionPayloadStatus::Irrelevant);
};
let fork = self.spec.fork_name_at_epoch(Epoch::new(0));

let execution_layer = self
.execution_layer
Expand All @@ -222,49 +220,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
});
}

// Double-check the block by reconstructing it.
let execution_payload = execution_layer
.get_payload_by_hash_legacy(exec_block_hash, fork)
.await
.map_err(|e| Error::ExecutionLayerGetBlockByHashFailed(Box::new(e)))?
.ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?;

// Verify payload integrity.
let header_from_payload = ExecutionPayloadHeader::from(execution_payload.to_ref());

let got_transactions_root = header_from_payload.transactions_root();
let expected_transactions_root = latest_execution_payload_header.transactions_root();
let got_withdrawals_root = header_from_payload.withdrawals_root().ok();
let expected_withdrawals_root = latest_execution_payload_header.withdrawals_root().ok();

if got_transactions_root != expected_transactions_root {
return Ok(GenesisExecutionPayloadStatus::TransactionsRootMismatch {
got: got_transactions_root,
expected: expected_transactions_root,
});
}

if let Some(expected) = expected_withdrawals_root {
if let Some(got) = got_withdrawals_root {
if got != expected {
return Ok(GenesisExecutionPayloadStatus::WithdrawalsRootMismatch {
got,
expected,
});
}
}
}

if header_from_payload.to_ref() != latest_execution_payload_header {
debug!(
self.log,
"Genesis execution payload reconstruction failure";
"consensus_node_header" => ?latest_execution_payload_header,
"execution_node_header" => ?header_from_payload
);
return Ok(GenesisExecutionPayloadStatus::OtherMismatch);
}

Ok(GenesisExecutionPayloadStatus::Correct(exec_block_hash))
}
}
Expand Down
Loading
Loading