|
| 1 | +use clap::ArgMatches; |
| 2 | +use clap_utils::{parse_optional, parse_required}; |
| 3 | +use environment::Environment; |
| 4 | +use eth2::{ |
| 5 | + types::{BlockId, ChainSpec, ForkName, PublishBlockRequest, SignedBlockContents}, |
| 6 | + BeaconNodeHttpClient, Error, SensitiveUrl, Timeouts, |
| 7 | +}; |
| 8 | +use eth2_network_config::Eth2NetworkConfig; |
| 9 | +use ssz::Encode; |
| 10 | +use std::fs; |
| 11 | +use std::fs::File; |
| 12 | +use std::io::{Read, Write}; |
| 13 | +use std::path::{Path, PathBuf}; |
| 14 | +use std::sync::Arc; |
| 15 | +use std::time::Duration; |
| 16 | +use types::EthSpec; |
| 17 | + |
| 18 | +const HTTP_TIMEOUT: Duration = Duration::from_secs(3600); |
| 19 | +const DEFAULT_CACHE_DIR: &str = "./cache"; |
| 20 | + |
| 21 | +pub fn run<T: EthSpec>( |
| 22 | + env: Environment<T>, |
| 23 | + network_config: Eth2NetworkConfig, |
| 24 | + matches: &ArgMatches, |
| 25 | +) -> Result<(), String> { |
| 26 | + let executor = env.core_context().executor; |
| 27 | + executor |
| 28 | + .handle() |
| 29 | + .ok_or("shutdown in progress")? |
| 30 | + .block_on(async move { run_async::<T>(network_config, matches).await }) |
| 31 | +} |
| 32 | + |
| 33 | +pub async fn run_async<T: EthSpec>( |
| 34 | + network_config: Eth2NetworkConfig, |
| 35 | + matches: &ArgMatches, |
| 36 | +) -> Result<(), String> { |
| 37 | + let spec = &network_config.chain_spec::<T>()?; |
| 38 | + let source_url: SensitiveUrl = parse_required(matches, "source-url")?; |
| 39 | + let target_url: SensitiveUrl = parse_required(matches, "target-url")?; |
| 40 | + let start_block: BlockId = parse_required(matches, "start-block")?; |
| 41 | + let maybe_common_ancestor_block: Option<BlockId> = |
| 42 | + parse_optional(matches, "known–common-ancestor")?; |
| 43 | + let cache_dir_path: PathBuf = |
| 44 | + parse_optional(matches, "block-cache-dir")?.unwrap_or(DEFAULT_CACHE_DIR.into()); |
| 45 | + |
| 46 | + let source = BeaconNodeHttpClient::new(source_url, Timeouts::set_all(HTTP_TIMEOUT)); |
| 47 | + let target = BeaconNodeHttpClient::new(target_url, Timeouts::set_all(HTTP_TIMEOUT)); |
| 48 | + |
| 49 | + if !cache_dir_path.exists() { |
| 50 | + fs::create_dir_all(&cache_dir_path) |
| 51 | + .map_err(|e| format!("Unable to create block cache dir: {:?}", e))?; |
| 52 | + } |
| 53 | + |
| 54 | + // 1. Download blocks back from head, looking for common ancestor. |
| 55 | + let mut blocks = vec![]; |
| 56 | + let mut next_block_id = start_block; |
| 57 | + loop { |
| 58 | + println!("downloading {next_block_id:?}"); |
| 59 | + |
| 60 | + let publish_block_req = |
| 61 | + get_block_from_source::<T>(&source, next_block_id, spec, &cache_dir_path).await; |
| 62 | + let block = publish_block_req.signed_block(); |
| 63 | + |
| 64 | + next_block_id = BlockId::Root(block.parent_root()); |
| 65 | + blocks.push((block.slot(), publish_block_req)); |
| 66 | + |
| 67 | + if let Some(ref common_ancestor_block) = maybe_common_ancestor_block { |
| 68 | + if common_ancestor_block == &next_block_id { |
| 69 | + println!("reached known common ancestor: {next_block_id:?}"); |
| 70 | + break; |
| 71 | + } |
| 72 | + } |
| 73 | + |
| 74 | + let block_exists_in_target = target |
| 75 | + .get_beacon_blocks_ssz::<T>(next_block_id, spec) |
| 76 | + .await |
| 77 | + .unwrap() |
| 78 | + .is_some(); |
| 79 | + if block_exists_in_target { |
| 80 | + println!("common ancestor found: {next_block_id:?}"); |
| 81 | + break; |
| 82 | + } |
| 83 | + } |
| 84 | + |
| 85 | + // 2. Apply blocks to target. |
| 86 | + for (slot, block) in blocks.iter().rev() { |
| 87 | + println!("posting block at slot {slot}"); |
| 88 | + if let Err(e) = target.post_beacon_blocks(block).await { |
| 89 | + if let Error::ServerMessage(ref e) = e { |
| 90 | + if e.code == 202 { |
| 91 | + println!("duplicate block detected while posting block at slot {slot}"); |
| 92 | + continue; |
| 93 | + } |
| 94 | + } |
| 95 | + return Err(format!("error posting {slot}: {e:?}")); |
| 96 | + } else { |
| 97 | + println!("success"); |
| 98 | + } |
| 99 | + } |
| 100 | + |
| 101 | + println!("SYNCED!!!!"); |
| 102 | + |
| 103 | + Ok(()) |
| 104 | +} |
| 105 | + |
| 106 | +async fn get_block_from_source<T: EthSpec>( |
| 107 | + source: &BeaconNodeHttpClient, |
| 108 | + block_id: BlockId, |
| 109 | + spec: &ChainSpec, |
| 110 | + cache_dir_path: &Path, |
| 111 | +) -> PublishBlockRequest<T> { |
| 112 | + let mut cache_path = cache_dir_path.join(format!("block_{block_id}")); |
| 113 | + |
| 114 | + if cache_path.exists() { |
| 115 | + let mut f = File::open(&cache_path).unwrap(); |
| 116 | + let mut bytes = vec![]; |
| 117 | + f.read_to_end(&mut bytes).unwrap(); |
| 118 | + PublishBlockRequest::from_ssz_bytes(&bytes, ForkName::Deneb).unwrap() |
| 119 | + } else { |
| 120 | + let block_from_source = source |
| 121 | + .get_beacon_blocks_ssz::<T>(block_id, spec) |
| 122 | + .await |
| 123 | + .unwrap() |
| 124 | + .unwrap(); |
| 125 | + let blobs_from_source = source |
| 126 | + .get_blobs::<T>(block_id, None) |
| 127 | + .await |
| 128 | + .unwrap() |
| 129 | + .unwrap() |
| 130 | + .data; |
| 131 | + |
| 132 | + let (kzg_proofs, blobs): (Vec<_>, Vec<_>) = blobs_from_source |
| 133 | + .iter() |
| 134 | + .cloned() |
| 135 | + .map(|sidecar| (sidecar.kzg_proof, sidecar.blob.clone())) |
| 136 | + .unzip(); |
| 137 | + |
| 138 | + let block_root = block_from_source.canonical_root(); |
| 139 | + let block_contents = SignedBlockContents { |
| 140 | + signed_block: Arc::new(block_from_source), |
| 141 | + kzg_proofs: kzg_proofs.into(), |
| 142 | + blobs: blobs.into(), |
| 143 | + }; |
| 144 | + let publish_block_req = PublishBlockRequest::BlockContents(block_contents); |
| 145 | + |
| 146 | + cache_path = cache_dir_path.join(format!("block_{block_root:?}")); |
| 147 | + let mut f = File::create(&cache_path).unwrap(); |
| 148 | + f.write_all(&publish_block_req.as_ssz_bytes()).unwrap(); |
| 149 | + |
| 150 | + publish_block_req |
| 151 | + } |
| 152 | +} |
0 commit comments