diff --git a/Cargo.lock b/Cargo.lock index 343a27b41f9..2eb3d372c85 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -515,6 +515,11 @@ dependencies = [ name = "beefy-merkle-tree" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#666f39b8a22108f57732215de006518738034ba2" +dependencies = [ + "hex", + "log", + "tiny-keccak", +] [[package]] name = "beefy-primitives" @@ -732,6 +737,28 @@ dependencies = [ "thiserror", ] +[[package]] +name = "bp-beefy" +version = "0.1.0" +dependencies = [ + "beefy-merkle-tree", + "beefy-primitives", + "bp-runtime", + "frame-support", + "pallet-beefy-mmr", + "pallet-mmr", + "pallet-mmr-primitives", + "parity-scale-codec", + "scale-info", + "serde", + "sp-application-crypto", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "static_assertions", +] + [[package]] name = "bp-header-chain" version = "0.1.0" @@ -800,6 +827,7 @@ dependencies = [ name = "bp-millau" version = "0.1.0" dependencies = [ + "bp-beefy", "bp-messages", "bp-runtime", "fixed-hash", @@ -5754,6 +5782,34 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-bridge-beefy" +version = "0.1.0" +dependencies = [ + "beefy-merkle-tree", + "beefy-primitives", + "bp-beefy", + "bp-runtime", + "ckb-merkle-mountain-range", + "env_logger 0.8.4", + "frame-support", + "frame-system", + "hash256-std-hasher", + "hex", + "libsecp256k1", + "log", + "pallet-beefy-mmr", + "pallet-mmr", + "parity-scale-codec", + "rand 0.8.5", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-bridge-dispatch" version = "0.1.0" @@ -8512,6 +8568,7 @@ dependencies = [ name = "relay-millau-client" version = "0.1.0" dependencies = [ + "bp-beefy", "bp-messages", "bp-millau", "frame-support", @@ -8606,6 +8663,7 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", + "bp-beefy", "bp-header-chain", "bp-messages", "bp-runtime", @@ -8946,6 +9004,7 @@ dependencies = [ "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", + "pallet-bridge-beefy", "pallet-bridge-dispatch", "pallet-bridge-grandpa", "pallet-bridge-messages", @@ -11554,6 +11613,7 @@ dependencies = [ "anyhow", "async-std", "async-trait", + "bp-beefy", "bp-header-chain", "bp-kusama", "bp-message-dispatch", @@ -11622,6 +11682,7 @@ dependencies = [ "anyhow", "async-std", "async-trait", + "bp-beefy", "bp-header-chain", "bp-messages", "bp-millau", @@ -11639,6 +11700,7 @@ dependencies = [ "messages-relay", "num-traits", "pallet-balances", + "pallet-bridge-beefy", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-transaction-payment", @@ -11907,6 +11969,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + [[package]] name = "tinyvec" version = "1.5.1" diff --git a/bin/rialto/runtime/Cargo.toml b/bin/rialto/runtime/Cargo.toml index 59b9a8e9b57..89e8db62e20 100644 --- a/bin/rialto/runtime/Cargo.toml +++ b/bin/rialto/runtime/Cargo.toml @@ -24,6 +24,7 @@ bp-millau = { path = "../../../primitives/chain-millau", default-features = fals bp-rialto = { path = "../../../primitives/chain-rialto", default-features = false } bp-runtime = { path = "../../../primitives/runtime", default-features = false } bridge-runtime-common = { path = "../../runtime-common", default-features = false } +pallet-bridge-beefy = { path = "../../../modules/beefy", default-features = false } pallet-bridge-dispatch = { path = "../../../modules/dispatch", default-features = false } pallet-bridge-grandpa = { path = "../../../modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../../modules/messages", default-features = false } @@ -103,6 +104,7 @@ std = [ "pallet-balances/std", "pallet-beefy/std", "pallet-beefy-mmr/std", + "pallet-bridge-beefy/std", "pallet-bridge-dispatch/std", "pallet-bridge-grandpa/std", "pallet-bridge-messages/std", diff --git a/bin/rialto/runtime/src/lib.rs b/bin/rialto/runtime/src/lib.rs index 87cddf5e268..98426c2438e 100644 --- a/bin/rialto/runtime/src/lib.rs +++ b/bin/rialto/runtime/src/lib.rs @@ -68,6 +68,7 @@ pub use frame_support::{ pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; +pub use pallet_bridge_beefy::Call as BridgeBeefyCall; pub use pallet_bridge_grandpa::Call as BridgeGrandpaMillauCall; pub use pallet_bridge_messages::Call as MessagesCall; pub use pallet_sudo::Call as SudoCall; @@ -427,7 +428,7 @@ parameter_types! { pub const GetDeliveryConfirmationTransactionFee: Balance = bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT as _; pub const RootAccountForPayments: Option = None; - pub const BridgedChainId: bp_runtime::ChainId = bp_runtime::MILLAU_CHAIN_ID; + pub const BridgedChainId: bp_runtime::ChainId = bp_runtime::MILLAU_CHAIN_ID; } /// Instance of the messages pallet used to relay messages to/from Millau chain. @@ -467,6 +468,14 @@ impl pallet_bridge_messages::Config for Runtime { type BridgedChainId = BridgedChainId; } +pub type MillauBeefyInstance = (); +impl pallet_bridge_beefy::Config for Runtime { + type MaxRequests = frame_support::traits::ConstU32<16>; + type ExpectedMmrLeafMajorVersion = frame_support::traits::ConstU8<0>; + type CommitmentsToKeep = frame_support::traits::ConstU32<8>; + type BridgedChain = bp_millau::Millau; +} + construct_runtime!( pub enum Runtime where Block = Block, @@ -494,11 +503,14 @@ construct_runtime!( Mmr: pallet_mmr::{Pallet, Storage}, MmrLeaf: pallet_beefy_mmr::{Pallet, Storage}, - // Millau bridge modules. + // Millau bridge modules (GRANDPA based). BridgeMillauGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage}, BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event}, BridgeMillauMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event, Config}, + // Millau bridge modules (BEEFY based). + BridgeMillauBeefy: pallet_bridge_beefy::{Pallet, Call, Storage}, + // Parachain modules. ParachainsOrigin: polkadot_runtime_parachains::origin::{Pallet, Origin}, Configuration: polkadot_runtime_parachains::configuration::{Pallet, Call, Storage, Config}, diff --git a/modules/beefy/Cargo.toml b/modules/beefy/Cargo.toml new file mode 100644 index 00000000000..cf1a7e0ba84 --- /dev/null +++ b/modules/beefy/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "pallet-bridge-beefy" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2021" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +hash256-std-hasher = { version = "0.15.2", default-features = false } +log = { version = "0.4.14", default-features = false } +scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } +serde = { version = "1.0", optional = true } + +# Bridge Dependencies + +bp-beefy = { path = "../../primitives/beefy", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } + +# Substrate Dependencies + +beefy-merkle-tree = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[dev-dependencies] +beefy-merkle-tree = { git = "https://github.com/paritytech/substrate", branch = "master" } +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "master" } +env_logger = "0.8" +hex = "0.4" +libsecp256k1 = "0.7" +mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.3.2" } +pallet-beefy-mmr = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-mmr = { git = "https://github.com/paritytech/substrate", branch = "master" } +rand = "0.8" +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[features] +default = ["std"] +std = [ + "beefy-merkle-tree/std", + "bp-beefy/std", + "bp-runtime/std", + "codec/std", + "frame-support/std", + "frame-system/std", + "hash256-std-hasher/std", + "log/std", + "scale-info/std", + "serde", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/modules/beefy/src/commitment.rs b/modules/beefy/src/commitment.rs new file mode 100644 index 00000000000..abeba4208cc --- /dev/null +++ b/modules/beefy/src/commitment.rs @@ -0,0 +1,277 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! BEEFY commitment verification. + +use crate::{ + BridgedBeefyCommitmentHasher, BridgedBeefySignedCommitment, BridgedBeefyValidatorSet, + BridgedBlockNumber, Config, Error, +}; + +use bp_beefy::{BeefyMmrHash, BeefyRuntimeAppPublic}; +use codec::Encode; +use frame_support::ensure; +use sp_runtime::{traits::Hash, RuntimeDebug}; + +/// Artifacts of BEEFY commitment verification. +#[derive(RuntimeDebug, PartialEq)] +pub struct CommitmentVerificationArtifacts { + /// Finalized block number. + pub finalized_block_number: BlockNumber, + /// MMR root at the finalized block. + pub mmr_root: BeefyMmrHash, +} + +/// Verify that the commitment is valid and signed by the current validator set. +/// +/// Returns MMR root, extracted from commitment payload. +pub fn verify_beefy_signed_commitment, I: 'static>( + best_block_number: BridgedBlockNumber, + validators: &BridgedBeefyValidatorSet, + commitment: &BridgedBeefySignedCommitment, +) -> Result>, Error> { + // ensure that the commitment is signed by the best known BEEFY validators set + ensure!( + commitment.commitment.validator_set_id == validators.id(), + Error::::InvalidValidatorSetId + ); + ensure!( + commitment.signatures.len() == validators.len(), + Error::::InvalidSignaturesLength + ); + + // ensure that the commitment is for the better block that we know of + ensure!(commitment.commitment.block_number > best_block_number, Error::::OldCommitment); + + // ensure that the enough validators have signed on commitment + let commitment_hash = + BridgedBeefyCommitmentHasher::::hash(&commitment.commitment.encode()); + let correct_signatures_required = signatures_required(validators.len()); + let mut correct_signatures = 0; + for (validator_index, signature) in commitment.signatures.iter().enumerate() { + if let Some(signature) = signature { + let validator_public = &validators.validators()[validator_index]; + if validator_public.verify_prehashed(signature, &commitment_hash) { + correct_signatures += 1; + if correct_signatures >= correct_signatures_required { + break + } + } else { + log::debug!( + target: "runtime::bridge-beefy", + "Signed commitment contains incorrect signature of validator {} ({:?}): {:?}", + validator_index, + validator_public, + signature, + ); + } + } + } + ensure!( + correct_signatures >= correct_signatures_required, + Error::::NotEnoughCorrectSignatures + ); + + extract_mmr_root(commitment).map(|mmr_root| CommitmentVerificationArtifacts { + finalized_block_number: commitment.commitment.block_number, + mmr_root, + }) +} + +/// Number of correct signatures, required from given validators set to accept signed commitment. +/// +/// We're using 'conservative' approach here, where signatures of `2/3+1` validators are required.. +pub(crate) fn signatures_required(validators_len: usize) -> usize { + validators_len - validators_len.saturating_sub(1) / 3 +} + +/// Extract MMR root from commitment payload. +fn extract_mmr_root, I: 'static>( + commitment: &BridgedBeefySignedCommitment, +) -> Result> { + commitment + .commitment + .payload + .get_decoded(&bp_beefy::MMR_ROOT_PAYLOAD_ID) + .ok_or(Error::MmrRootMissingFromCommitment) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{mock::*, mock_chain::*, *}; + use bp_beefy::{BeefyPayload, Commitment, MMR_ROOT_PAYLOAD_ID}; + use frame_support::assert_noop; + + #[test] + fn fails_to_import_commitment_if_signed_by_unexpected_validator_set() { + run_test_with_initialize(1, || { + // when `validator_set_id` is different from what's stored in the runtime + let mut commitment = ChainBuilder::new(1).append_finalized_header().to_header(); + commitment.commitment.as_mut().unwrap().commitment.validator_set_id += 1; + + assert_noop!( + import_commitment(commitment), + Error::::InvalidValidatorSetId, + ); + }); + } + + #[test] + fn fails_to_import_commitment_if_number_of_signatures_is_invalid() { + run_test_with_initialize(8, || { + // when additional signature is provided + let mut commitment = ChainBuilder::new(1).append_finalized_header().to_header(); + commitment.commitment.as_mut().unwrap().signatures.push(Default::default()); + + assert_noop!( + import_commitment(commitment.clone()), + Error::::InvalidSignaturesLength, + ); + + // when there's lack of signatures + commitment.commitment.as_mut().unwrap().signatures.pop(); + commitment.commitment.as_mut().unwrap().signatures.pop(); + + assert_noop!( + import_commitment(commitment), + Error::::InvalidSignaturesLength, + ); + }); + } + + #[test] + fn fails_to_import_commitment_if_it_does_not_improve_best_block() { + run_test_with_initialize(1, || { + BestBlockNumber::::put(10); + + // when commitment is for the same block + let mut commitment = ChainBuilder::new(1).append_finalized_header().to_header(); + commitment.commitment.as_mut().unwrap().commitment.block_number = 10; + + assert_noop!( + import_commitment(commitment.clone()), + Error::::OldCommitment, + ); + + // when commitment is for the ancestor of best block + commitment.commitment.as_mut().unwrap().commitment.block_number = 5; + + assert_noop!(import_commitment(commitment), Error::::OldCommitment,); + }); + } + + #[test] + fn fails_to_import_commitment_if_it_has_no_enough_valid_signatures() { + run_test_with_initialize(1, || { + // invalidate single signature + let mut commitment = ChainBuilder::new(1).append_finalized_header().to_header(); + *commitment + .commitment + .as_mut() + .unwrap() + .signatures + .iter_mut() + .find(|s| s.is_some()) + .unwrap() = Default::default(); + + assert_noop!( + import_commitment(commitment), + Error::::NotEnoughCorrectSignatures, + ); + }); + } + + #[test] + fn fails_to_import_commitment_if_there_is_no_mmr_root_in_the_payload() { + run_test_with_initialize(1, || { + // remove MMR root from the payload + let mut commitment = ChainBuilder::new(1).append_finalized_header().to_header(); + commitment.commitment = Some(sign_commitment( + Commitment { + payload: BeefyPayload::new(*b"xy", vec![]), + block_number: commitment.commitment.as_ref().unwrap().commitment.block_number, + validator_set_id: commitment + .commitment + .as_ref() + .unwrap() + .commitment + .validator_set_id, + }, + &validator_keys(0, 1), + )); + + assert_noop!( + import_commitment(commitment), + Error::::MmrRootMissingFromCommitment, + ); + }); + } + + #[test] + fn fails_to_import_commitment_if_mmr_root_decode_fails() { + run_test_with_initialize(1, || { + // MMR root is a 32-byte array and we have replaced it with single byte + let mut commitment = ChainBuilder::new(1).append_finalized_header().to_header(); + commitment.commitment = Some(sign_commitment( + Commitment { + payload: BeefyPayload::new(MMR_ROOT_PAYLOAD_ID, vec![42]), + block_number: commitment.commitment.as_ref().unwrap().commitment.block_number, + validator_set_id: commitment + .commitment + .as_ref() + .unwrap() + .commitment + .validator_set_id, + }, + &validator_keys(0, 1), + )); + + assert_noop!( + import_commitment(commitment), + Error::::MmrRootMissingFromCommitment, + ); + }); + } + + #[test] + fn verify_beefy_signed_commitment_works() { + let artifacts = verify_beefy_signed_commitment::( + 0, + &BridgedBeefyValidatorSet::::new(validator_ids(0, 8), 0).unwrap(), + &sign_commitment( + Commitment { + payload: BeefyPayload::new( + MMR_ROOT_PAYLOAD_ID, + BeefyMmrHash::from([42u8; 32]).encode(), + ), + block_number: 20, + validator_set_id: 0, + }, + &validator_keys(0, 8), + ), + ) + .unwrap(); + + assert_eq!( + artifacts, + CommitmentVerificationArtifacts { + finalized_block_number: 20, + mmr_root: BeefyMmrHash::from([42u8; 32]), + } + ); + } +} diff --git a/modules/beefy/src/leaf.rs b/modules/beefy/src/leaf.rs new file mode 100644 index 00000000000..c567e0e008b --- /dev/null +++ b/modules/beefy/src/leaf.rs @@ -0,0 +1,423 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! BEEFY MMR leaf verification verification. + +use crate::{ + BridgedBeefyMmrHasher, BridgedBeefyMmrLeaf, BridgedBeefyMmrLeafUnpacked, + BridgedBeefyValidatorIdToMerkleLeaf, BridgedBeefyValidatorSet, BridgedBlockHash, + BridgedBlockNumber, Config, Error, +}; + +use bp_beefy::{ + beefy_merkle_root, verify_mmr_leaf_proof, BeefyMmrHash, BeefyMmrProof, MmrDataOrHash, + MmrLeafVersion, +}; +use codec::Decode; +use frame_support::{ensure, traits::Get, RuntimeDebug, RuntimeDebugNoBound}; +use sp_runtime::traits::{Convert, One, Saturating}; +use sp_std::{marker::PhantomData, prelude::*}; + +/// Artifacts of MMR leaf proof verification. +#[derive(RuntimeDebug)] +pub struct BeefyMmrLeafVerificationArtifacts, I: 'static> { + /// Block number and hash of the finalized block parent. + pub parent_number_and_hash: (BridgedBlockNumber, BridgedBlockHash), + /// Next validator set, if handoff is happening. + pub next_validator_set: Option>, + /// Parachain heads merkle root at the imported block. + pub parachain_heads: BeefyMmrHash, +} + +/// Verify MMR proof of given leaf. +/// +/// Returns new BEEFY validator set if it is enacted. +pub fn verify_beefy_mmr_leaf, I: 'static>( + validators: &BridgedBeefyValidatorSet, + at_header: BridgedBlockNumber, + mmr_leaf: BridgedBeefyMmrLeafUnpacked, + mmr_proof: BeefyMmrProof, + mmr_root: BeefyMmrHash, +) -> Result, Error> +where + BridgedBeefyMmrHasher: 'static + Send + Sync, +{ + // decode raw MMR leaf + let raw_mmr_leaf = decode_raw_mmr_leaf::(mmr_leaf.leaf())?; + ensure!( + raw_mmr_leaf.parent_number_and_hash.0 == at_header.saturating_sub(One::one()), + Error::::InvalidParentNumberAndHash, + ); + + // TODO: is it the right condition? can id is increased by say +3? + let is_updating_validator_set = raw_mmr_leaf.beefy_next_authority_set.id == validators.id() + 2; + ensure!( + raw_mmr_leaf.beefy_next_authority_set.id == validators.id() + 1 || + is_updating_validator_set, + Error::::InvalidNextValidatorsSetId, + ); + // technically it is not an error, but we'd like to reduce tx size on real chains + ensure!( + !mmr_leaf.next_validators().is_some() || is_updating_validator_set, + Error::::RedundantNextValidatorsProvided, + ); + ensure!( + mmr_leaf.next_validators().is_some() || !is_updating_validator_set, + Error::::NextValidatorsAreNotProvided, + ); + + // verify mmr proof for the provided leaf + let mmr_proof_leaf_index = mmr_proof.leaf_index; + let mmr_proof_leaf_count = mmr_proof.leaf_count; + let mmr_proof_length = mmr_proof.items.len(); + let mmr_leaf_hash = + as bp_beefy::BeefyMmrHasher>::hash(mmr_leaf.leaf()); + verify_mmr_leaf_proof::< + BridgedBeefyMmrHasherAdapter>, + MmrDataOrHash>, BridgedBeefyMmrLeaf>, + >(mmr_root, MmrDataOrHash::Hash(mmr_leaf_hash), mmr_proof) + .map_err(|e| { + log::error!( + target: "runtime::bridge-beefy", + "MMR proof of leaf {:?} (root: {:?} leaf: {} total: {} len: {}) verification has failed with error: {:?}", + mmr_leaf_hash, + mmr_root, + mmr_proof_leaf_index, + mmr_proof_leaf_count, + mmr_proof_length, + e, + ); + + Error::::MmrProofVeriricationFailed + })?; + + // if new validators are provided, ensure that they match data from the leaf + let next_validator_set = if let Some(next_validators) = mmr_leaf.into_next_validators() { + ensure!(!next_validators.is_empty(), Error::::EmptyNextValidatorSet); + + let next_validator_addresses = next_validators + .iter() + .cloned() + .map(BridgedBeefyValidatorIdToMerkleLeaf::::convert) + .collect::>(); + let next_validator_addresses_root: BeefyMmrHash = + beefy_merkle_root::, _, _>(next_validator_addresses).into(); + ensure!( + next_validator_addresses_root == raw_mmr_leaf.beefy_next_authority_set.root, + Error::::InvalidNextValidatorSetRoot + ); + + Some( + BridgedBeefyValidatorSet::::new( + next_validators, + raw_mmr_leaf.beefy_next_authority_set.id, + ) + .unwrap(), + ) + } else { + None + }; + + Ok(BeefyMmrLeafVerificationArtifacts { + parent_number_and_hash: raw_mmr_leaf.parent_number_and_hash, + next_validator_set, + parachain_heads: raw_mmr_leaf.parachain_heads, + }) +} + +/// Decode MMR leaf of given major version. +fn decode_raw_mmr_leaf, I: 'static>( + encoded_leaf: &[u8], +) -> Result, Error> { + // decode version first, so that we know that the leaf format hasn't changed + let version = MmrLeafVersion::decode(&mut &encoded_leaf[..]).map_err(|e| { + // this shall never happen, because (as of now) leaf version is simple `u8` + // and we can't fail to decode `u8`. So this is here to support potential + // future changes + log::error!( + target: "runtime::bridge-beefy", + "MMR leaf version decode has failed with error: {:?}", + e, + ); + + Error::::FailedToDecodeMmrLeafVersion + })?; + ensure!( + version.split().0 == T::ExpectedMmrLeafMajorVersion::get(), + Error::::UnsupportedMmrLeafVersion + ); + + // decode the whole leaf + BridgedBeefyMmrLeaf::::decode(&mut &encoded_leaf[..]).map_err(|e| { + log::error!( + target: "runtime::bridge-beefy", + "MMR leaf decode has failed with error: {:?}", + e, + ); + + Error::::FailedToDecodeMmrLeaf + }) +} + +#[derive(RuntimeDebugNoBound)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +struct BridgedBeefyMmrHasherAdapter(PhantomData); + +/*#[cfg(feature = "std")] +impl sp_std::fmt::Debug for BridgedBeefyMmrHasherAdapter { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "BridgedBeefyMmrHasherAdapter") + } +}*/ + +impl Eq for BridgedBeefyMmrHasherAdapter {} + +impl PartialEq> for BridgedBeefyMmrHasherAdapter { + fn eq(&self, _: &Self) -> bool { + true + } +} + +impl Clone for BridgedBeefyMmrHasherAdapter { + fn clone(&self) -> Self { + BridgedBeefyMmrHasherAdapter(Default::default()) + } +} + +impl sp_core::Hasher for BridgedBeefyMmrHasherAdapter +where + H: beefy_merkle_tree::Hasher + Send + Sync, +{ + type Out = BeefyMmrHash; + type StdHasher = hash256_std_hasher::Hash256StdHasher; + const LENGTH: usize = 32; + + fn hash(s: &[u8]) -> Self::Out { + H::hash(s) + } +} + +impl sp_runtime::traits::Hash for BridgedBeefyMmrHasherAdapter +where + H: 'static + beefy_merkle_tree::Hasher + Send + Sync, +{ + type Output = BeefyMmrHash; + + fn ordered_trie_root( + _input: Vec>, + _state_version: sp_runtime::StateVersion, + ) -> Self::Output { + unreachable!("MMR never needs trie root functions; qed") + } + + fn trie_root( + _input: Vec<(Vec, Vec)>, + _state_version: sp_runtime::StateVersion, + ) -> Self::Output { + unreachable!("MMR never needs trie root functions; qed") + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{mock::*, mock_chain::*, *}; + use codec::Encode; + use frame_support::assert_noop; + + #[test] + fn fails_to_import_commitment_if_leaf_version_is_unexpected() { + run_test_with_initialize(1, || { + // let's change leaf version to something lesser than expected + let commitment = ChainBuilder::new(1) + .custom_header() + .customize_leaf(|leaf| { + let mut raw_leaf = BridgedRawMmrLeaf::decode(&mut &leaf.leaf()[..]).unwrap(); + raw_leaf.version = MmrLeafVersion::new(EXPECTED_MMR_LEAF_MAJOR_VERSION - 1, 0); + leaf.set_leaf(raw_leaf.encode()) + }) + .finalize() + .to_header(); + + assert_noop!( + import_commitment(commitment), + Error::::UnsupportedMmrLeafVersion, + ); + }); + } + + #[test] + fn fails_to_import_commitment_if_leaf_decode_fails() { + run_test_with_initialize(1, || { + // let's leave leaf version, but replace other leaf data with something that can't be + // decoded + let commitment = ChainBuilder::new(1) + .custom_header() + .customize_leaf(|leaf| { + let mut raw_leaf = + MmrLeafVersion::new(EXPECTED_MMR_LEAF_MAJOR_VERSION, 0).encode(); + raw_leaf.push(42); + leaf.set_leaf(raw_leaf) + }) + .finalize() + .to_header(); + + assert_noop!( + import_commitment(commitment), + Error::::FailedToDecodeMmrLeaf, + ); + }); + } + + #[test] + fn fails_to_import_commitment_if_leaf_is_not_for_parent() { + run_test_with_initialize(1, || { + // let's change parent number in MMR leaf to something that isn't expected + let commitment = ChainBuilder::new(1) + .custom_header() + .customize_leaf(|leaf| { + let mut raw_leaf = BridgedRawMmrLeaf::decode(&mut &leaf.leaf()[..]).unwrap(); + raw_leaf.parent_number_and_hash.0 = raw_leaf.parent_number_and_hash.0 + 1; + leaf.set_leaf(raw_leaf.encode()) + }) + .finalize() + .to_header(); + + assert_noop!( + import_commitment(commitment), + Error::::InvalidParentNumberAndHash, + ); + }); + } + + #[test] + fn fails_to_import_commitment_if_signed_by_wrong_validator_set_id() { + run_test_with_initialize(1, || { + // let's change next validator set id, so that it won't match next + // validator set id and new valdiator set id + let commitment = ChainBuilder::new(1) + .custom_header() + .customize_leaf(|leaf| { + let mut raw_leaf = BridgedRawMmrLeaf::decode(&mut &leaf.leaf()[..]).unwrap(); + raw_leaf.beefy_next_authority_set.id += 10; + leaf.set_leaf(raw_leaf.encode()) + }) + .finalize() + .to_header(); + + assert_noop!( + import_commitment(commitment), + Error::::InvalidNextValidatorsSetId, + ); + }); + } + + #[test] + fn fails_to_import_commitment_if_leaf_provides_redundant_new_validator_set() { + run_test_with_initialize(1, || { + // let's change leaf so that signals handoff where handoff is not happening + let commitment = ChainBuilder::new(1) + .custom_header() + .customize_leaf(|leaf| leaf.set_next_validators(Some(Vec::new()))) + .finalize() + .to_header(); + + assert_noop!( + import_commitment(commitment), + Error::::RedundantNextValidatorsProvided, + ); + }); + } + + #[test] + fn fails_to_import_commitment_if_new_validator_set_is_not_provided() { + run_test_with_initialize(1, || { + // let's change leaf so that it should provide new validator set, but it does not + let commitment = ChainBuilder::new(1) + .custom_header() + .customize_leaf(|leaf| { + let mut raw_leaf = BridgedRawMmrLeaf::decode(&mut &leaf.leaf()[..]).unwrap(); + raw_leaf.beefy_next_authority_set.id += 1; + leaf.set_leaf(raw_leaf.encode()) + }) + .finalize() + .to_header(); + + assert_noop!( + import_commitment(commitment), + Error::::NextValidatorsAreNotProvided, + ); + }); + } + + #[test] + fn fails_to_import_commitment_if_mmr_proof_is_wrong() { + run_test_with_initialize(1, || { + // let's change proof so that its verification fails + let commitment = ChainBuilder::new(1) + .custom_header() + .customize_proof(|mut proof| { + proof.leaf_index += 1; + proof + }) + .finalize() + .to_header(); + + assert_noop!( + import_commitment(commitment), + Error::::MmrProofVeriricationFailed, + ); + }); + } + + #[test] + fn fails_to_import_commitment_if_new_validator_set_is_empty() { + run_test_with_initialize(1, || { + // let's change leaf so that it handoffs to empty validator set + let commitment = ChainBuilder::new(1) + .custom_handoff_header(1) + .customize_leaf(|leaf| leaf.set_next_validators(Some(Vec::new()))) + .finalize() + .to_header(); + + assert_noop!( + import_commitment(commitment), + Error::::EmptyNextValidatorSet, + ); + }); + } + + #[test] + fn fails_to_import_commitment_if_validators_merkle_root_mismatch() { + run_test_with_initialize(1, || { + // let's change leaf so that merkle root of new validators is wrong + let commitment = ChainBuilder::new(1) + .custom_handoff_header(1) + .customize_leaf(|leaf| { + let mut raw_leaf = BridgedRawMmrLeaf::decode(&mut &leaf.leaf()[..]).unwrap(); + raw_leaf.beefy_next_authority_set.root = Default::default(); + leaf.set_leaf(raw_leaf.encode()) + }) + .finalize() + .to_header(); + + assert_noop!( + import_commitment(commitment), + Error::::InvalidNextValidatorSetRoot, + ); + }); + } +} diff --git a/modules/beefy/src/lib.rs b/modules/beefy/src/lib.rs new file mode 100644 index 00000000000..9389265d0cc --- /dev/null +++ b/modules/beefy/src/lib.rs @@ -0,0 +1,856 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! BEEFY bridge pallet. +//! +//! This pallet is an on-chain BEEFY client for Substrate-based chains that are using following +//! pallets bundle: `pallet-mmr`, `pallet-beefy` and `pallet-beefy-mmr`. +//! +//! The pallet is able to verify MMR leaf proofs, so it has a **direct** access to the following +//! data of the bridged chain: +//! +//! - header hashes; +//! - changes of BEEFY authorities; +//! - extra data of MMR leafs (e.g. parachains heads when bridged with relay chain and properly +//! configured). +//! +//! Given the header hash (and parachain heads), other pallets are able to verify header-based +//! proofs. For example - storage proofs, transaction inclusion proofs, ...There are two options to +//! do that: +//! +//! - the cheap option only works when proof is header-proof is based on some recent header. Then +//! the submitter may relay on the fact that the pallet is storing hashes of the most recent +//! bridged headers. Then you may ensure that the provided header is valid by checking that the +//! `RecentHeaderHashes` map contains an entry for your header. +//! - the expensive option works for any header that is "covered" with MMR. The proof then must +//! include MMR proof for leaf, corresponding to the header and the header itself. + +#![cfg_attr(not(feature = "std"), no_std)] + +use bp_beefy::{BeefyMmrProof, ChainWithBeefy, InitializationData}; +use frame_system::RawOrigin; +use sp_runtime::traits::BadOrigin; +use sp_std::prelude::*; + +// Re-export in crate namespace for `construct_runtime!` +pub use pallet::*; + +/// Configured bridged chain. +pub type BridgedChain = >::BridgedChain; +/// Block number, used by configured bridged chain. +pub type BridgedBlockNumber = bp_runtime::BlockNumberOf>; +/// Block hash, used by configured bridged chain. +pub type BridgedBlockHash = bp_runtime::HashOf>; + +/// Pallet initialization data. +pub type InitializationDataOf = + InitializationData, bp_beefy::BeefyValidatorIdOf>>; +/// BEEFY commitment hasher, used by configured bridged chain. +pub type BridgedBeefyCommitmentHasher = bp_beefy::BeefyCommitmentHasher>; +/// BEEFY validator set, used by configured bridged chain. +pub type BridgedBeefyValidatorSet = bp_beefy::BeefyValidatorSetOf>; +/// BEEFY signed commitment, used by configured bridged chain. +pub type BridgedBeefySignedCommitment = bp_beefy::BeefySignedCommitmentOf>; +/// MMR hash algorithm, used by configured bridged chain. +pub type BridgedBeefyMmrHasher = bp_beefy::BeefyMmrHasherOf>; +/// Unpacked MMR leaf type, used by the pallet. +pub type BridgedBeefyMmrLeafUnpacked = bp_beefy::BeefyMmrLeafUnpackedOf>; +/// MMR leaf type, used by configured bridged chain. +pub type BridgedBeefyMmrLeaf = bp_beefy::BeefyMmrLeafOf>; +/// A way to encode validator id to the BEEFY merkle tree leaf. +pub type BridgedBeefyValidatorIdToMerkleLeaf = + bp_beefy::BeefyValidatorIdToMerkleLeafOf>; +/// Imported commitment data, stored by the pallet. +pub type ImportedCommitment = + bp_beefy::ImportedCommitment, BridgedBlockHash>; + +mod commitment; +mod leaf; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod mock_chain; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The upper bound on the number of requests allowed by the pallet. + /// + /// A request refers to an action which writes a header to storage. + /// + /// Once this bound is reached the pallet will reject all commitments + /// until the request count has decreased. + #[pallet::constant] + type MaxRequests: Get; + + /// Expected MMR leaf version. + /// + /// The pallet will reject all leafs with mismatching major version. + #[pallet::constant] + type ExpectedMmrLeafMajorVersion: Get; + + /// Maximal number of imported commitments to keep in the storage. + /// + /// The setting is there to prevent growing the on-chain state indefinitely. Note + /// the setting does not relate to block numbers - we will simply keep as much items + /// in the storage, so it doesn't guarantee any fixed timeframe for imported commitments. + #[pallet::constant] + type CommitmentsToKeep: Get; + + /// The chain we are bridging to here. + type BridgedChain: ChainWithBeefy; + } + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + fn on_initialize(_n: T::BlockNumber) -> frame_support::weights::Weight { + >::mutate(|count| *count = count.saturating_sub(1)); + + (0_u64) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + } + + #[pallet::call] + impl, I: 'static> Pallet + where + BridgedBeefyMmrHasher: 'static + Send + Sync, + { + /// Initialize pallet with BEEFY validator set and best finalized block number. + #[pallet::weight((T::DbWeight::get().reads_writes(2, 4), DispatchClass::Operational))] + pub fn initialize( + origin: OriginFor, + init_data: InitializationDataOf, + ) -> DispatchResult { + ensure_owner_or_root::(origin)?; + + let init_allowed = !>::exists(); + ensure!(init_allowed, >::AlreadyInitialized); + + log::info!(target: "runtime::bridge-beefy", "Initializing bridge BEEFY pallet: {:?}", init_data); + Ok(initialize::(init_data)?.into()) + } + + /// Halt or resume all pallet operations. + /// + /// May only be called either by root, or by `PalletOwner`. + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_operational(origin: OriginFor, operational: bool) -> DispatchResult { + ensure_owner_or_root::(origin)?; + >::put(!operational); + + if operational { + log::info!(target: "runtime::bridge-beefy", "Resuming pallet operations"); + } else { + log::warn!(target: "runtime::bridge-beefy", "Stopping pallet operations"); + } + + Ok(().into()) + } + + /// Change `PalletOwner`. + /// + /// May only be called either by root, or by `PalletOwner`. + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_owner( + origin: OriginFor, + new_owner: Option, + ) -> DispatchResultWithPostInfo { + ensure_owner_or_root::(origin)?; + match new_owner { + Some(new_owner) => { + PalletOwner::::put(&new_owner); + log::info!(target: "runtime::bridge-beefy", "Setting pallet Owner to: {:?}", new_owner); + }, + None => { + PalletOwner::::kill(); + log::info!(target: "runtime::bridge-beefy", "Removed Owner of pallet."); + }, + } + + Ok(().into()) + } + + /// Submit commitment, generated by BEEFY validators. + /// + /// Apart from the generic payload, the commitment contains the finalized (by BEEFY) block + /// number, so they must be always be imported in order. Importing commitment gives us + /// knowledge of header hash that has been finalized by BEEFY validators. + #[pallet::weight(0)] // TODO: compute weights + pub fn submit_commitment( + origin: OriginFor, + // TODO: implement `TypeInfo` for `BridgedBeefySignedCommitment`, `BeefyMmrProof` + // and `BridgedBeefyMmrLeafUnpacked::` + encoded_commitment: Vec, + encoded_mmr_proof: Vec, + mmr_leaf: BridgedBeefyMmrLeafUnpacked, + ) -> DispatchResult { + ensure_operational::()?; + let _ = ensure_signed(origin)?; + + ensure!(Self::request_count() < T::MaxRequests::get(), >::TooManyRequests); + + // verify BEEFY commitment: once verification is completed, we know that BEEFY + // validators have finalized block with given number and given MMR root + let best_block_number = + BestBlockNumber::::get().ok_or(Error::::NotInitialized)?; + let validators = + CurrentValidatorSet::::get().ok_or(Error::::NotInitialized)?; + let commitment = + BridgedBeefySignedCommitment::::decode(&mut &encoded_commitment[..]) + .map_err(|e| { + log::error!( + target: "runtime::bridge-beefy", + "Signed commitment decode has failed with error: {:?}", + e, + ); + + Error::::FailedToDecodeArgument + })?; + + log::trace!( + target: "runtime::bridge-beefy", + "Importing commitment for block {:?}: {:?}", + commitment.commitment.block_number, + commitment, + ); + + let commitment_artifacts = commitment::verify_beefy_signed_commitment::( + best_block_number, + &validators, + &commitment, + )?; + + // MMR proof verification + let mmr_proof = BeefyMmrProof::decode(&mut &encoded_mmr_proof[..]).map_err(|e| { + log::error!( + target: "runtime::bridge-beefy", + "MMR proof decode has failed with error: {:?}", + e, + ); + + Error::::FailedToDecodeArgument + })?; + let mmr_leaf_artifacts = leaf::verify_beefy_mmr_leaf::( + &validators, + commitment_artifacts.finalized_block_number, + mmr_leaf, + mmr_proof, + commitment_artifacts.mmr_root, + )?; + + // update storage, essential for pallet operation + RequestCount::::mutate(|count| *count += 1); + BestBlockNumber::::put(commitment.commitment.block_number); + if let Some(new_next_validator_set) = mmr_leaf_artifacts.next_validator_set { + let next_validator_set = + NextValidatorSet::::get().ok_or(Error::::NotInitialized)?; + log::info!( + target: "runtime::bridge-beefy", + "Enacting new BEEFY validator set #{} with {} validators. Next validator set: #{} with {} validators.", + next_validator_set.id(), + next_validator_set.len(), + new_next_validator_set.id(), + new_next_validator_set.len(), + ); + + CurrentValidatorSet::::put(next_validator_set); + NextValidatorSet::::put(new_next_validator_set); + } + + // store imported commitment data + let index = ImportedCommitmentNumbersPointer::::get(); + let to_prune = ImportedCommitmentNumbers::::try_get(index); + ImportedCommitments::::insert( + commitment_artifacts.finalized_block_number, + ImportedCommitment:: { + parent_number_and_hash: mmr_leaf_artifacts.parent_number_and_hash, + mmr_root: commitment_artifacts.mmr_root, + parachain_heads: mmr_leaf_artifacts.parachain_heads, + }, + ); + ImportedCommitmentNumbers::::insert( + index, + commitment_artifacts.finalized_block_number, + ); + ImportedCommitmentNumbersPointer::::put( + (index + 1) % T::CommitmentsToKeep::get(), + ); + if let Ok(commitment_number) = to_prune { + log::debug!(target: "runtime::bridge-beefy", "Pruning old commitment: {:?}.", commitment_number); + ImportedCommitments::::remove(commitment_number); + } + + log::info!( + target: "runtime::bridge-beefy", + "Successfully imported commitment for block {:?}", + commitment.commitment.block_number, + ); + + Ok(()) + } + } + + /// The current number of requests which have written to storage. + /// + /// If the `RequestCount` hits `MaxRequests`, no more calls will be allowed to the pallet until + /// the request capacity is increased. + /// + /// The `RequestCount` is decreased by one at the beginning of every block. This is to ensure + /// that the pallet can always make progress. + #[pallet::storage] + #[pallet::getter(fn request_count)] + pub type RequestCount, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; + + /// Best known block number of the bridged chain, finalized by BEEFY. + #[pallet::storage] + pub type BestBlockNumber, I: 'static = ()> = + StorageValue<_, BridgedBlockNumber>; + + /// All unpruned commitments that we have imported. + #[pallet::storage] + pub type ImportedCommitments, I: 'static = ()> = + StorageMap<_, Blake2_128Concat, BridgedBlockNumber, ImportedCommitment>; + + /// A ring buffer of imported commitment numbers. Ordered by the insertion time + #[pallet::storage] + pub(super) type ImportedCommitmentNumbers, I: 'static = ()> = + StorageMap<_, Identity, u32, BridgedBlockNumber>; + + /// Current ring buffer position. + #[pallet::storage] + pub type ImportedCommitmentNumbersPointer, I: 'static = ()> = + StorageValue<_, u32, ValueQuery>; + + /// Current BEEFY validators set at the bridged chain. + #[pallet::storage] + pub type CurrentValidatorSet, I: 'static = ()> = + StorageValue<_, BridgedBeefyValidatorSet>; + + /// Next BEEFY validators set at the bridged chain. + #[pallet::storage] + pub type NextValidatorSet, I: 'static = ()> = + StorageValue<_, BridgedBeefyValidatorSet>; + + /// Optional pallet owner. + /// + /// Pallet owner has a right to halt all pallet operations and then resume it. If it is + /// `None`, then there are no direct ways to halt/resume pallet operations, but other + /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt + /// flag directly or call the `halt_operations`). + #[pallet::storage] + pub type PalletOwner, I: 'static = ()> = + StorageValue<_, T::AccountId, OptionQuery>; + + /// If true, all pallet transactions (except `set_operational`) are failed immediately. + #[pallet::storage] + pub(super) type IsHalted, I: 'static = ()> = StorageValue<_, bool, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + /// Optional module owner account. + pub owner: Option, + /// Optional module initialization data. + pub init_data: Option>, + } + + #[cfg(feature = "std")] + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + Self { owner: None, init_data: None } + } + } + + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) { + if let Some(ref owner) = self.owner { + >::put(owner); + } + + if let Some(init_data) = self.init_data.clone() { + initialize::(init_data) + .expect("invalid initialization data of BEEFY bridge pallet"); + } else { + // Since the bridge hasn't been initialized we shouldn't allow anyone to perform + // transactions. + >::put(true); + } + } + } + + #[pallet::error] + pub enum Error { + /// The pallet has already been initialized. + AlreadyInitialized, + /// Invalid initial current validator set. + InvalidCurrentValidatorSet, + /// Invalid initial next validator set. + InvalidNextValidatorSet, + /// All pallet operations are halted. + Halted, + /// There are too many requests for the current window to handle. + TooManyRequests, + /// Failed to decode method arguments (will be removed once `TypeInfo` will be + /// implemented for all arguments). + FailedToDecodeArgument, + /// The pallet has not been initialized yet. + NotInitialized, + /// The commitment being imported is older than the best commitment known to the pallet. + OldCommitment, + /// The commitment is signed by unknown validator set. + InvalidValidatorSetId, + /// The number of signatures in the commitment is invalid. + InvalidSignaturesLength, + /// There are not enough correct signatures in commitment to finalize block. + NotEnoughCorrectSignatures, + /// MMR root is missing from the commitment. + MmrRootMissingFromCommitment, + /// Failed to decode MMR leaf version. + FailedToDecodeMmrLeafVersion, + /// The leaf has unsupported version. + UnsupportedMmrLeafVersion, + /// Failed to decode MMR leaf version. + FailedToDecodeMmrLeaf, + /// Parent header number and hash field of the MMR leaf is invalid. + InvalidParentNumberAndHash, + /// MMR proof verification has failed. + MmrProofVeriricationFailed, + /// Next validator set id is invalid. + InvalidNextValidatorsSetId, + /// Next validators are provided when leaf is not signalling set change. + RedundantNextValidatorsProvided, + /// Next validators are not provided when leaf is signalling set change. + NextValidatorsAreNotProvided, + /// Next validators are not matching the merkle tree root. + InvalidNextValidatorSetRoot, + /// Next validator set is empty. + EmptyNextValidatorSet, + } + + /// Initialize pallet with given parameters. + pub(super) fn initialize, I: 'static>( + init_data: InitializationDataOf, + ) -> Result<(), Error> { + let current_set = BridgedBeefyValidatorSet::::new( + init_data.current_validator_set.1, + init_data.current_validator_set.0, + ) + .ok_or(Error::::InvalidCurrentValidatorSet)?; + let next_set = BridgedBeefyValidatorSet::::new( + init_data.next_validator_set.1, + init_data.next_validator_set.0, + ) + .ok_or(Error::::InvalidNextValidatorSet)?; + + IsHalted::::put(init_data.is_halted); + BestBlockNumber::::put(init_data.best_beefy_block_number); + CurrentValidatorSet::::put(current_set); + NextValidatorSet::::put(next_set); + + Ok(()) + } + + /// Ensure that the origin is either root, or `PalletOwner`. + fn ensure_owner_or_root, I: 'static>(origin: T::Origin) -> Result<(), BadOrigin> { + match origin.into() { + Ok(RawOrigin::Root) => Ok(()), + Ok(RawOrigin::Signed(ref signer)) + if Some(signer) == >::get().as_ref() => + Ok(()), + _ => Err(BadOrigin), + } + } + + /// Ensure that the pallet is in operational mode (not halted). + fn ensure_operational, I: 'static>() -> Result<(), Error> { + if >::get() { + Err(>::Halted) + } else { + Ok(()) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use frame_support::{assert_noop, assert_ok, traits::Get}; + use mock::*; + use mock_chain::*; + + fn next_block() { + use frame_support::traits::OnInitialize; + + let current_number = frame_system::Pallet::::block_number(); + frame_system::Pallet::::set_block_number(current_number + 1); + let _ = Pallet::::on_initialize(current_number); + } + + fn import_header_chain(headers: Vec) { + for header in headers { + if header.commitment.is_some() { + assert_ok!(import_commitment(header)); + } + } + } + + #[test] + fn fails_to_initialize_if_not_owner_and_root() { + run_test(|| { + PalletOwner::::put(1); + assert_noop!( + Pallet::::initialize( + Origin::signed(10), + InitializationData { + is_halted: false, + best_beefy_block_number: 0, + current_validator_set: (0, validator_ids(0, 1)), + next_validator_set: (1, validator_ids(0, 1)), + } + ), + BadOrigin, + ); + }) + } + + #[test] + fn root_is_able_to_initialize_pallet() { + run_test(|| { + assert_ok!(Pallet::::initialize( + Origin::root(), + InitializationData { + is_halted: false, + best_beefy_block_number: 0, + current_validator_set: (0, validator_ids(0, 1)), + next_validator_set: (1, validator_ids(0, 1)), + } + )); + + assert_eq!(BestBlockNumber::::get(), Some(0)); + }); + } + + #[test] + fn owner_is_able_to_initialize_pallet() { + run_test(|| { + PalletOwner::::put(10); + assert_ok!(Pallet::::initialize( + Origin::signed(10), + InitializationData { + is_halted: false, + best_beefy_block_number: 0, + current_validator_set: (0, validator_ids(0, 1)), + next_validator_set: (1, validator_ids(0, 1)), + } + )); + + assert_eq!(BestBlockNumber::::get(), Some(0)); + }); + } + + #[test] + fn fails_to_initialize_if_already_initialized() { + run_test_with_initialize(32, || { + assert_noop!( + Pallet::::initialize( + Origin::root(), + InitializationData { + is_halted: false, + best_beefy_block_number: 0, + current_validator_set: (0, validator_ids(0, 1)), + next_validator_set: (1, validator_ids(0, 1)), + } + ), + Error::::AlreadyInitialized, + ); + }); + } + + #[test] + fn fails_to_initialize_if_current_set_is_empty() { + run_test(|| { + assert_noop!( + Pallet::::initialize( + Origin::root(), + InitializationData { + is_halted: false, + best_beefy_block_number: 0, + current_validator_set: (0, Vec::new()), + next_validator_set: (1, validator_ids(0, 1)), + } + ), + Error::::InvalidCurrentValidatorSet, + ); + }); + } + + #[test] + fn fails_to_initialize_if_next_set_is_empty() { + run_test(|| { + assert_noop!( + Pallet::::initialize( + Origin::root(), + InitializationData { + is_halted: false, + best_beefy_block_number: 0, + current_validator_set: (0, validator_ids(0, 1)), + next_validator_set: (1, Vec::new()), + } + ), + Error::::InvalidNextValidatorSet, + ); + }); + } + + #[test] + fn fails_to_change_operation_mode_if_not_owner_and_root() { + run_test_with_initialize(1, || { + assert_noop!( + Pallet::::set_operational(Origin::signed(10), false), + BadOrigin, + ); + }); + } + + #[test] + fn root_is_able_to_change_operation_mode() { + run_test_with_initialize(1, || { + assert_ok!(Pallet::::set_operational(Origin::root(), false)); + assert_eq!(IsHalted::::get(), true); + + assert_ok!(Pallet::::set_operational(Origin::root(), true)); + assert_eq!(IsHalted::::get(), false); + }); + } + + #[test] + fn owner_is_able_to_change_operation_mode() { + run_test_with_initialize(1, || { + PalletOwner::::put(10); + + assert_ok!(Pallet::::set_operational(Origin::signed(10), false)); + assert_eq!(IsHalted::::get(), true); + + assert_ok!(Pallet::::set_operational(Origin::signed(10), true)); + assert_eq!(IsHalted::::get(), false); + }); + } + + #[test] + fn fails_to_set_owner_if_not_owner_and_root() { + run_test_with_initialize(1, || { + assert_noop!(Pallet::::set_owner(Origin::signed(10), Some(42)), BadOrigin,); + }); + } + + #[test] + fn root_is_able_to_set_owner() { + run_test_with_initialize(1, || { + assert_ok!(Pallet::::set_owner(Origin::root(), Some(42))); + assert_eq!(PalletOwner::::get(), Some(42)); + + assert_ok!(Pallet::::set_owner(Origin::root(), None)); + assert_eq!(PalletOwner::::get(), None); + }); + } + + #[test] + fn owner_is_able_to_set_owner() { + run_test_with_initialize(1, || { + PalletOwner::::put(10); + + assert_ok!(Pallet::::set_owner(Origin::signed(10), Some(42))); + assert_eq!(PalletOwner::::get(), Some(42)); + + assert_ok!(Pallet::::set_owner(Origin::signed(42), None)); + assert_eq!(PalletOwner::::get(), None); + }); + } + + #[test] + fn fails_to_import_commitment_if_halted() { + run_test_with_initialize(1, || { + assert_ok!(Pallet::::set_operational(Origin::root(), false)); + assert_noop!( + import_commitment(ChainBuilder::new(1).append_finalized_header().to_header()), + Error::::Halted, + ); + }) + } + + #[test] + fn fails_to_import_commitment_if_too_many_requests() { + run_test_with_initialize(1, || { + let max_requests = <::MaxRequests as Get>::get() as u64; + let mut chain = ChainBuilder::new(1); + for _ in 0..max_requests + 2 { + chain = chain.append_finalized_header(); + } + + // import `max_request` headers + for i in 0..max_requests { + assert_ok!(import_commitment(chain.header(i + 1))); + } + + // try to import next header: it fails because we are no longer accepting commitments + assert_noop!( + import_commitment(chain.header(max_requests + 1)), + Error::::TooManyRequests, + ); + + // when next block is "started", we allow import of next header + next_block(); + assert_ok!(import_commitment(chain.header(max_requests + 1))); + + // but we can't import two headers until next block and so on + assert_noop!( + import_commitment(chain.header(max_requests + 2)), + Error::::TooManyRequests, + ); + }) + } + + #[test] + fn fails_to_import_commitment_if_not_initialized() { + run_test(|| { + assert_noop!( + import_commitment(ChainBuilder::new(1).append_finalized_header().to_header()), + Error::::NotInitialized, + ); + }) + } + + #[test] + fn submit_commitment_works_with_long_chain_with_handoffs() { + run_test_with_initialize(3, || { + let chain = ChainBuilder::new(3) + .append_finalized_header() // 1 + .append_default_headers(16) // 2..17 + .append_finalized_header() // 18 + .append_default_headers(16) // 19..34 + .append_handoff_header(9) // 35 + .append_default_headers(8) // 36..43 + .append_finalized_header() // 44 + .append_default_headers(8) // 45..52 + .append_handoff_header(17) // 53 + .append_default_headers(4) // 54..57 + .append_finalized_header() // 58 + .append_default_headers(4); // 59..63 + import_header_chain(chain.to_chain()); + + assert_eq!(BestBlockNumber::::get().unwrap(), 58); + assert_eq!(CurrentValidatorSet::::get().unwrap().id(), 2); + assert_eq!(CurrentValidatorSet::::get().unwrap().len(), 9); + assert_eq!(NextValidatorSet::::get().unwrap().id(), 3); + assert_eq!(NextValidatorSet::::get().unwrap().len(), 17); + + let imported_commitment = ImportedCommitments::::get(58).unwrap(); + assert_eq!( + imported_commitment, + bp_beefy::ImportedCommitment { + parent_number_and_hash: (57, chain.header(57).header.hash()), + mmr_root: chain.header(58).mmr_root, + parachain_heads: parachain_heads(&chain.header(58).header), + }, + ); + }) + } + + #[test] + fn commitment_pruning_works() { + run_test_with_initialize(3, || { + let commitments_to_keep = >::CommitmentsToKeep::get(); + let commitments_to_import: Vec = ChainBuilder::new(3) + .append_finalized_headers(commitments_to_keep as usize + 2) + .to_chain(); + + // import exactly `CommitmentsToKeep` commitments + for index in 0..commitments_to_keep { + next_block(); + import_commitment(commitments_to_import[index as usize].clone()) + .expect("must succeed"); + assert_eq!( + ImportedCommitmentNumbersPointer::::get(), + (index + 1) % commitments_to_keep + ); + } + + // ensure that all commitments are in the storage + assert_eq!( + BestBlockNumber::::get().unwrap(), + commitments_to_keep as mock::BridgedBlockNumber + ); + assert_eq!(ImportedCommitmentNumbersPointer::::get(), 0); + for index in 0..commitments_to_keep { + assert!(ImportedCommitments::::get( + index as mock::BridgedBlockNumber + 1 + ) + .is_some()); + assert_eq!( + ImportedCommitmentNumbers::::get(index), + Some(index + 1).map(Into::into) + ); + } + + // import next commitment + next_block(); + import_commitment(commitments_to_import[commitments_to_keep as usize].clone()) + .expect("must succeed"); + assert_eq!(ImportedCommitmentNumbersPointer::::get(), 1); + assert!(ImportedCommitments::::get( + commitments_to_keep as mock::BridgedBlockNumber + 1 + ) + .is_some()); + assert_eq!( + ImportedCommitmentNumbers::::get(0), + Some(commitments_to_keep + 1).map(Into::into) + ); + + // the side effect of the import is that the commitment#1 is pruned + assert!(ImportedCommitments::::get(1).is_none()); + + // import next commitment + next_block(); + import_commitment(commitments_to_import[commitments_to_keep as usize + 1].clone()) + .expect("must succeed"); + assert_eq!(ImportedCommitmentNumbersPointer::::get(), 2); + assert!(ImportedCommitments::::get( + commitments_to_keep as mock::BridgedBlockNumber + 2 + ) + .is_some()); + assert_eq!( + ImportedCommitmentNumbers::::get(1), + Some(commitments_to_keep + 2).map(Into::into) + ); + + // the side effect of the import is that the commitment#2 is pruned + assert!(ImportedCommitments::::get(1).is_none()); + assert!(ImportedCommitments::::get(2).is_none()); + }); + } +} diff --git a/modules/beefy/src/mock.rs b/modules/beefy/src/mock.rs new file mode 100644 index 00000000000..91c706426ba --- /dev/null +++ b/modules/beefy/src/mock.rs @@ -0,0 +1,249 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate as beefy; +use crate::{ + BridgedBeefyCommitmentHasher, BridgedBeefyMmrHasher, BridgedBeefyMmrLeafUnpacked, + BridgedBeefySignedCommitment, BridgedBeefyValidatorIdToMerkleLeaf, +}; + +use bp_beefy::{BeefyMmrHash, ChainWithBeefy, Commitment, MmrDataOrHash}; +use bp_runtime::Chain; +use codec::Encode; +use frame_support::{construct_runtime, parameter_types, weights::Weight}; +use libsecp256k1::{sign, Message, PublicKey, SecretKey}; +use sp_core::sr25519::Signature; +use sp_runtime::{ + testing::{Header, H256}, + traits::{BlakeTwo256, Hash, Header as HeaderT, IdentityLookup}, + Perbill, +}; +use std::collections::BTreeSet; + +pub use beefy_primitives::crypto::AuthorityId as BeefyId; + +pub type AccountId = u64; +pub type BridgedBlockNumber = u64; +pub type BridgedBlockHash = H256; +pub type BridgedHeader = Header; +pub type BridgedCommitment = BridgedBeefySignedCommitment; +pub type BridgedCommitmentHasher = BridgedBeefyCommitmentHasher; +pub type BridgedMmrHasher = BridgedBeefyMmrHasher; +pub type BridgedMmrLeaf = BridgedBeefyMmrLeafUnpacked; +pub type BridgedRawMmrLeaf = + beefy_primitives::mmr::MmrLeaf; +pub type BridgedMmrNode = MmrDataOrHash; +pub type BridgedValidatorIdToMerkleLeaf = BridgedBeefyValidatorIdToMerkleLeaf; + +type Block = frame_system::mocking::MockBlock; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + +construct_runtime! { + pub enum TestRuntime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Beefy: beefy::{Pallet}, + } +} + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} + +impl frame_system::Config for TestRuntime { + type Origin = Origin; + type Index = u64; + type Call = Call; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type BaseCallFilter = frame_support::traits::Everything; + type SystemWeightInfo = (); + type DbWeight = (); + type BlockWeights = (); + type BlockLength = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +pub const EXPECTED_MMR_LEAF_MAJOR_VERSION: u8 = 3; + +impl beefy::Config for TestRuntime { + type MaxRequests = frame_support::traits::ConstU32<16>; + type BridgedChain = TestBridgedChain; + type ExpectedMmrLeafMajorVersion = + frame_support::traits::ConstU8; + type CommitmentsToKeep = frame_support::traits::ConstU32<16>; +} + +#[derive(Debug)] +pub struct TestBridgedChain; + +impl Chain for TestBridgedChain { + type BlockNumber = BridgedBlockNumber; + type Hash = H256; + type Hasher = BlakeTwo256; + type Header = ::Header; + + type AccountId = AccountId; + type Balance = u64; + type Index = u64; + type Signature = Signature; + + fn max_extrinsic_size() -> u32 { + unreachable!() + } + fn max_extrinsic_weight() -> Weight { + unreachable!() + } +} + +impl ChainWithBeefy for TestBridgedChain { + type CommitmentHasher = sp_runtime::traits::Keccak256; + type MmrHasher = beefy_merkle_tree::Keccak256; + type ValidatorId = BeefyId; + type ValidatorIdToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum; +} + +/// Run test within test runtime. +pub fn run_test(test: impl FnOnce() -> T) -> T { + sp_io::TestExternalities::new(Default::default()).execute_with(test) +} + +/// Initialize pallet and run test. +pub fn run_test_with_initialize(initial_validators_count: usize, test: impl FnOnce() -> T) -> T { + run_test(|| { + crate::Pallet::::initialize( + Origin::root(), + bp_beefy::InitializationData { + is_halted: false, + best_beefy_block_number: 0, + current_validator_set: (0, validator_ids(0, initial_validators_count)), + next_validator_set: (1, validator_ids(0, initial_validators_count)), + }, + ) + .expect("initialization data is correct"); + + test() + }) +} + +/// Import given commitment. +pub fn import_commitment( + header: crate::mock_chain::HeaderAndCommitment, +) -> sp_runtime::DispatchResult { + crate::Pallet::::submit_commitment( + Origin::signed(1), + header + .commitment + .expect("thou shall not call import_commitment on header without commitment") + .encode(), + header.leaf_proof.encode(), + header.leaf, + ) +} + +/// Return secret of validator with given index. +pub fn validator_key(index: usize) -> SecretKey { + let mut raw_secret = [1u8; 32]; + raw_secret[0..8].copy_from_slice(&(index as u64).encode()); + SecretKey::parse(&raw_secret).expect("only zero key is invalid; qed") +} + +/// Convert validator secret to public. +pub fn validator_key_to_public(key: SecretKey) -> PublicKey { + PublicKey::from_secret_key(&key) +} + +/// Return secrets of validators, starting at given index. +pub fn validator_keys(index: usize, size: usize) -> Vec { + (index..index + size).map(validator_key).collect() +} + +/// Return identifiers of validators, starting at given index. +pub fn validator_ids(index: usize, size: usize) -> Vec { + validator_keys(index, size) + .into_iter() + .map(|k| { + sp_core::ecdsa::Public::from_raw(validator_key_to_public(k).serialize_compressed()) + .into() + }) + .collect() +} + +/// Sign BEEFY commitment. +pub fn sign_commitment( + commitment: Commitment, + validator_keys: &[SecretKey], +) -> BridgedCommitment { + let total_validators = validator_keys.len(); + let signatures_required = crate::commitment::signatures_required(total_validators); + let random_validators = + rand::seq::index::sample(&mut rand::thread_rng(), total_validators, signatures_required) + .into_iter() + .collect::>(); + + let commitment_hash = + Message::parse(BridgedCommitmentHasher::hash(&commitment.encode()).as_fixed_bytes()); + let mut signatures = vec![None; total_validators]; + for validator in 0..total_validators { + if !random_validators.contains(&validator) { + continue + } + + let validator_key = &validator_keys[validator]; + let (signature, recovery_id) = sign(&commitment_hash, validator_key); + let mut raw_signature_with_recovery = [recovery_id.serialize(); 65]; + raw_signature_with_recovery[..64].copy_from_slice(&signature.serialize()); + log::trace!( + target: "runtime::bridge-beefy", + "Validator {} ({:?}) has signed commitment hash ({:?}): {:?}", + validator, + hex::encode(validator_key_to_public(validator_key.clone()).serialize_compressed()), + hex::encode(commitment_hash.serialize()), + hex::encode(signature.serialize()), + ); + signatures[validator] = + Some(sp_core::ecdsa::Signature::from_raw(raw_signature_with_recovery).into()); + } + + BridgedCommitment { commitment, signatures } +} + +/// Returns dummy parachain heads for given header. +pub fn parachain_heads(header: &BridgedHeader) -> BeefyMmrHash { + bp_beefy::beefy_merkle_root::(vec![ + header.number().encode(), + header.hash().encode(), + ]) +} diff --git a/modules/beefy/src/mock_chain.rs b/modules/beefy/src/mock_chain.rs new file mode 100644 index 00000000000..ad7498df831 --- /dev/null +++ b/modules/beefy/src/mock_chain.rs @@ -0,0 +1,330 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Utilities to build bridged chain and BEEFY+MMR structures. + +use crate::mock::{ + parachain_heads, sign_commitment, validator_key_to_public, validator_keys, BridgedBlockNumber, + BridgedCommitment, BridgedHeader, BridgedMmrHasher, BridgedMmrLeaf, BridgedMmrNode, + BridgedValidatorIdToMerkleLeaf, EXPECTED_MMR_LEAF_MAJOR_VERSION, +}; + +use beefy_primitives::mmr::{BeefyNextAuthoritySet, MmrLeafVersion}; +use bp_beefy::{ + BeefyMmrHash, BeefyMmrProof, BeefyPayload, Commitment, ValidatorSetId, MMR_ROOT_PAYLOAD_ID, +}; +use codec::Encode; +use libsecp256k1::SecretKey; +use pallet_mmr::NodeIndex; +use rand::Rng; +use sp_runtime::traits::{Convert, Header as HeaderT}; +use std::collections::HashMap; + +#[derive(Debug, Clone)] +pub struct HeaderAndCommitment { + pub header: BridgedHeader, + pub commitment: Option, + pub leaf: BridgedMmrLeaf, + pub leaf_proof: BeefyMmrProof, + pub mmr_root: BeefyMmrHash, +} + +pub struct ChainBuilder { + headers: Vec, + validator_set_id: ValidatorSetId, + validator_keys: Vec, + next_validator_keys: Vec, + mmr: mmr_lib::MMR, +} + +struct BridgedMmrStorage { + nodes: HashMap, +} + +impl mmr_lib::MMRStore for BridgedMmrStorage { + fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result> { + Ok(self.nodes.get(&pos).cloned()) + } + + fn append(&mut self, pos: NodeIndex, elems: Vec) -> mmr_lib::Result<()> { + for (i, elem) in elems.into_iter().enumerate() { + self.nodes.insert(pos + i as NodeIndex, elem); + } + Ok(()) + } +} + +impl ChainBuilder { + /// Creates new chain builder with given validator set size. + pub fn new(initial_validators_count: usize) -> Self { + ChainBuilder { + headers: Vec::new(), + validator_set_id: 0, + validator_keys: validator_keys(0, initial_validators_count), + // validators for session 0 and 1 are always the same + next_validator_keys: validator_keys(0, initial_validators_count), + mmr: mmr_lib::MMR::new(0, BridgedMmrStorage { nodes: HashMap::new() }), + } + } + + /// Get header with given number. + pub fn header(&self, number: BridgedBlockNumber) -> HeaderAndCommitment { + self.headers[number as usize - 1].clone() + } + + /// Returns single built header. + pub fn to_header(&self) -> HeaderAndCommitment { + assert_eq!(self.headers.len(), 1); + self.headers[0].clone() + } + + /// Returns built chain. + pub fn to_chain(&self) -> Vec { + self.headers.clone() + } + + /// Append custom regular header using `HeaderBuilder`. + pub fn custom_header(self) -> HeaderBuilder { + let next_validator_set_id = self.validator_set_id + 1; + let next_validator_keys = self.next_validator_keys.clone(); + HeaderBuilder::with_chain(self, false, next_validator_set_id, next_validator_keys) + } + + /// Append custom handoff header using `HeaderBuilder`. + pub fn custom_handoff_header(self, next_validators_len: usize) -> HeaderBuilder { + let new_validator_set_id = self.validator_set_id + 2; + let new_validator_keys = validator_keys( + rand::thread_rng().gen::() % (usize::MAX / 2), + next_validators_len, + ); + HeaderBuilder::with_chain(self, true, new_validator_set_id, new_validator_keys) + } + + /// Appends header, that has been finalized by BEEFY (so it has a linked signed commitment). + pub fn append_finalized_header(self) -> Self { + let next_validator_set_id = self.validator_set_id + 1; + let next_validator_keys = self.next_validator_keys.clone(); + HeaderBuilder::with_chain(self, false, next_validator_set_id, next_validator_keys) + .finalize() + } + + /// Append multiple finalized headers at once. + pub fn append_finalized_headers(mut self, count: usize) -> Self { + for _ in 0..count { + self = self.append_finalized_header(); + } + self + } + + /// Appends header, that enacts new validator set. + /// + /// Such headers are explicitly finalized by BEEFY. + pub fn append_handoff_header(self, next_validators_len: usize) -> Self { + let new_validator_set_id = self.validator_set_id + 2; + let new_validator_keys = validator_keys( + rand::thread_rng().gen::() % (usize::MAX / 2), + next_validators_len, + ); + + HeaderBuilder::with_chain(self, true, new_validator_set_id, new_validator_keys.clone()) + .finalize() + } + + /// Append single default header without commitment. + pub fn append_default_header(self) -> Self { + let next_validator_set_id = self.validator_set_id + 1; + let next_validator_keys = self.next_validator_keys.clone(); + HeaderBuilder::with_chain(self, false, next_validator_set_id, next_validator_keys).build() + } + + /// Append several default header without commitment. + pub fn append_default_headers(mut self, count: usize) -> Self { + for _ in 0..count { + self = self.append_default_header(); + } + self + } +} + +/// Custom header builder. +pub struct HeaderBuilder { + chain: ChainBuilder, + header: BridgedHeader, + leaf: BridgedMmrLeaf, + leaf_proof: Option, + new_validator_keys: Option>, +} + +impl HeaderBuilder { + fn with_chain( + chain: ChainBuilder, + handoff: bool, + next_validator_set_id: ValidatorSetId, + next_validator_keys: Vec, + ) -> Self { + // we're starting with header#1, since header#0 is always finalized + let header_number = chain.headers.len() as BridgedBlockNumber + 1; + let header = BridgedHeader::new( + header_number, + Default::default(), + Default::default(), + chain.headers.last().map(|h| h.header.hash()).unwrap_or_default(), + Default::default(), + ); + + let next_validator_publics = next_validator_keys + .iter() + .cloned() + .map(|k| { + sp_core::ecdsa::Public::from_raw(validator_key_to_public(k).serialize_compressed()) + .into() + }) + .collect::>(); + let next_validator_addresses = next_validator_publics + .iter() + .cloned() + .map(BridgedValidatorIdToMerkleLeaf::convert) + .collect::>(); + let raw_leaf = beefy_primitives::mmr::MmrLeaf { + version: MmrLeafVersion::new(EXPECTED_MMR_LEAF_MAJOR_VERSION, 0), + parent_number_and_hash: (header.number().saturating_sub(1), *header.parent_hash()), + beefy_next_authority_set: BeefyNextAuthoritySet { + id: next_validator_set_id, + len: next_validator_publics.len() as _, + root: bp_beefy::beefy_merkle_root::( + next_validator_addresses, + ), + }, + parachain_heads: parachain_heads(&header), + }; + + HeaderBuilder { + chain, + header, + leaf: if !handoff { + BridgedMmrLeaf::Regular(raw_leaf.encode()) + } else { + BridgedMmrLeaf::Handoff(raw_leaf.encode(), next_validator_publics) + }, + leaf_proof: None, + new_validator_keys: if handoff { Some(next_validator_keys) } else { None }, + } + } + + /// Customize header MMR leaf. + pub fn customize_leaf(mut self, f: impl FnOnce(BridgedMmrLeaf) -> BridgedMmrLeaf) -> Self { + self.leaf = f(self.leaf); + self + } + + /// Customize generated proof of header MMR leaf. + /// + /// Can only be called once. + pub fn customize_proof(mut self, f: impl FnOnce(BeefyMmrProof) -> BeefyMmrProof) -> Self { + assert!(self.leaf_proof.is_none()); + + let raw_leaf_hash = BridgedMmrHasher::hash(self.leaf.leaf()); + let node = BridgedMmrNode::Hash(raw_leaf_hash.into()); + log::trace!( + target: "runtime::bridge-beefy", + "Inserting MMR leaf with hash {} for header {}", + node.hash(), + self.header.number(), + ); + let leaf_position = self.chain.mmr.push(node).unwrap(); + + let proof = self.chain.mmr.gen_proof(vec![leaf_position]).unwrap(); + // genesis has no leaf => leaf index is header number minus 1 + let leaf_index = *self.header.number() - 1; + let leaf_count = *self.header.number(); + let proof_size = proof.proof_items().len(); + self.leaf_proof = Some(f(BeefyMmrProof { + leaf_index, + leaf_count, + items: proof.proof_items().iter().map(|i| i.hash().to_fixed_bytes()).collect(), + })); + log::trace!( + target: "runtime::bridge-beefy", + "Proof of leaf {}/{} (for header {}) has {} items. Root: {}", + leaf_index, + leaf_count, + self.header.number(), + proof_size, + self.chain.mmr.get_root().unwrap().hash(), + ); + + self + } + + /// Build header without commitment. + pub fn build(mut self) -> ChainBuilder { + if self.leaf_proof.is_none() { + self = self.customize_proof(|proof| proof); + } + + if let Some(new_validator_keys) = self.new_validator_keys { + self.chain.validator_set_id = self.chain.validator_set_id + 1; + self.chain.validator_keys = self.chain.next_validator_keys; + self.chain.next_validator_keys = new_validator_keys; + } + + self.chain.headers.push(HeaderAndCommitment { + header: self.header, + commitment: None, + leaf: self.leaf, + leaf_proof: self.leaf_proof.expect("guaranteed by the customize_proof call above; qed"), + mmr_root: self.chain.mmr.get_root().unwrap().hash().into(), + }); + + self.chain + } + + /// Build header with commitment. + pub fn finalize(self) -> ChainBuilder { + let current_validator_set_id = self.chain.validator_set_id; + let current_validator_set_keys = self.chain.validator_keys.clone(); + let mut chain = self.build(); + + let last_header = chain.headers.last_mut().expect("added by append_header; qed"); + last_header.commitment = Some(sign_commitment( + Commitment { + payload: BeefyPayload::new( + MMR_ROOT_PAYLOAD_ID, + chain.mmr.get_root().unwrap().hash().encode(), + ), + block_number: *last_header.header.number(), + validator_set_id: current_validator_set_id, + }, + ¤t_validator_set_keys, + )); + + chain + } +} + +/// Default Merging & Hashing behavior for MMR. +pub struct BridgedMmrHashMerge; + +impl mmr_lib::Merge for BridgedMmrHashMerge { + type Item = BridgedMmrNode; + + fn merge(left: &Self::Item, right: &Self::Item) -> Self::Item { + let mut concat = left.hash().as_ref().to_vec(); + concat.extend_from_slice(right.hash().as_ref()); + + BridgedMmrNode::Hash(BridgedMmrHasher::hash(&concat).into()) + } +} diff --git a/primitives/beefy/Cargo.toml b/primitives/beefy/Cargo.toml new file mode 100644 index 00000000000..8c928381315 --- /dev/null +++ b/primitives/beefy/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "bp-beefy" +description = "Primitives of pallet-bridge-beefy module." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2021" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "bit-vec"] } +scale-info = { version = "2.0.1", default-features = false, features = ["bit-vec", "derive"] } +serde = { version = "1.0", optional = true } +static_assertions = "1.1" + +# Bridge Dependencies + +bp-runtime = { path = "../runtime", default-features = false } + +# Substrate Dependencies + +beefy-merkle-tree = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, features = ["keccak"] } +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-beefy-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-mmr-primitives = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[features] +default = ["std"] +std = [ + "beefy-primitives/std", + "bp-runtime/std", + "codec/std", + "frame-support/std", + "pallet-mmr/std", + "pallet-mmr-primitives/std", + "scale-info/std", + "serde", + "sp-application-crypto/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std" +] diff --git a/primitives/beefy/src/lib.rs b/primitives/beefy/src/lib.rs new file mode 100644 index 00000000000..6c26675dfe6 --- /dev/null +++ b/primitives/beefy/src/lib.rs @@ -0,0 +1,268 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Primitives that are used to interact with BEEFY bridge pallet. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_docs)] + +pub use beefy_merkle_tree::{ + merkle_root as beefy_merkle_root, Hasher as BeefyMmrHasher, Keccak256 as BeefyKeccak256, +}; +pub use beefy_primitives::{ + crypto::AuthorityId as EcdsaValidatorId, known_payload_ids::MMR_ROOT_ID as MMR_ROOT_PAYLOAD_ID, + mmr::MmrLeafVersion, Commitment, Payload as BeefyPayload, SignedCommitment, ValidatorSet, + ValidatorSetId, BEEFY_ENGINE_ID, +}; +pub use pallet_beefy_mmr::BeefyEcdsaToEthereum; +pub use pallet_mmr::verify_leaf_proof as verify_mmr_leaf_proof; +pub use pallet_mmr_primitives::{DataOrHash as MmrDataOrHash, Proof as MmrProof}; + +use bp_runtime::{BlockNumberOf, Chain, HashOf}; +use codec::{Decode, Encode}; +use frame_support::Parameter; +use scale_info::TypeInfo; +use sp_core::H256; +use sp_runtime::{ + app_crypto::RuntimeAppPublic, + traits::{Convert, MaybeSerializeDeserialize}, + RuntimeDebug, +}; +use sp_std::prelude::*; + +pub mod storage_keys; + +/// Substrate-based chain with BEEFY && MMR pallets deployed. +/// +/// Both BEEFY and MMR pallets and their clients may be configured to use different +/// primitives. Some of types can be configured in low-level pallets, but are constrained +/// when BEEFY+MMR bundle is used. +pub trait ChainWithBeefy: Chain { + /// Hash algorithm used to compute digest of the BEEFY commitment. + /// + /// Corresponds to the hashing algorithm, used `beefy_gadget::BeefyKeystore`. + type CommitmentHasher: sp_runtime::traits::Hash; + + /// Hash algorithm used to build MMR. + /// + /// Corresponds to the `Hashing` field` of the `pallet-mmr` configuration. In BEEFY+MMR + /// bundle, its output is hardcoded to be `H256` (see `beefy_merkle_tree::Hash` trait). + /// + /// The same algorithm is also used to compute merkle roots in BEEFY - e.g. `parachain_heads` + /// and validator addresses root in leaf data. + type MmrHasher: beefy_merkle_tree::Hasher + Send + Sync; + + /// A way to identify BEEFY validator and verify its signature. + /// + /// Corresponds to the `BeefyId` field of the `pallet-beefy` configuration. + type ValidatorId: BeefyRuntimeAppPublic<::Output> + + Parameter + + MaybeSerializeDeserialize + + Send + + Sync; + + /// A way to convert validator id to its raw representation in the BEEFY merkle tree. + /// + /// Corresponds to the `BeefyAuthorityToMerkleLeaf` field of the `pallet-beefy-mmr` + /// configuration. + type ValidatorIdToMerkleLeaf: Convert>; +} + +/// Extended version of `RuntimeAppPublic`, which is able to verify signature of pre-hashed +/// message. Regular `RuntimeAppPublic` is hashing message itself (using `blake2`), which +/// is not how things work in BEEFY. +pub trait BeefyRuntimeAppPublic: RuntimeAppPublic { + /// Verify a signature on a pre-hashed message. Return `true` if the signature is valid + /// and thus matches the given `public` key. + fn verify_prehashed(&self, sig: &Self::Signature, msg_hash: &CommitmentHash) -> bool; +} + +// this implementation allows to bridge with BEEFY chains, that are using default (eth-compatible) +// BEEFY configuration +impl BeefyRuntimeAppPublic for beefy_primitives::crypto::AuthorityId { + fn verify_prehashed(&self, sig: &Self::Signature, msg_hash: &H256) -> bool { + use sp_application_crypto::AppKey; + static_assertions::assert_type_eq_all!( + <::Signature as AppKey>::UntypedGeneric, + sp_core::ecdsa::Signature, + ); + static_assertions::assert_type_eq_all!( + ::UntypedGeneric, + sp_core::ecdsa::Public, + ); + + // why it is here: + // + // 1) we need to call `sp_io::crypto::ecdsa_verify_prehashed` to be sure that the host + // function is used to verify signature; + // 2) there's no explicit conversions from app-crypto sig+key types to matching underlying + // types; 3) `ecdsa_verify_prehashed` works with underlying ECDSA types; + // 4) hence this "convert". + const PROOF: &'static str = + "static assertion guarantees that both underlying types are equal; \ + conversion between same types can't fail; \ + qed"; + let ecdsa_signature = sp_core::ecdsa::Signature::try_from(sig.as_ref()).expect(PROOF); + let ecdsa_public = sp_core::ecdsa::Public::try_from(self.as_ref()).expect(PROOF); + sp_io::crypto::ecdsa_verify_prehashed( + &ecdsa_signature, + msg_hash.as_fixed_bytes(), + &ecdsa_public, + ) + } +} + +/// BEEFY validator id used by given Substrate chain. +pub type BeefyValidatorIdOf = ::ValidatorId; + +/// BEEFY validator signature used by given Substrate chain. +pub type BeefyValidatorSignatureOf = + <::ValidatorId as RuntimeAppPublic>::Signature; + +/// Signed BEEFY commitment used by given Substrate chain. +pub type BeefySignedCommitmentOf = + SignedCommitment, BeefyValidatorSignatureOf>; + +/// BEEFY validator set, containing both validator identifiers and the numeric set id. +pub type BeefyValidatorSetOf = ValidatorSet>; + +/// Hash algorithm, used to compute digest of the BEEFY commitment before validators are signing the +/// commitment. +pub type BeefyCommitmentHasher = ::CommitmentHasher; + +/// unpacked BEEFY MMR leaf contents. +/// +/// See `BeefyMmrLeafUnpacked` for details. +pub type BeefyMmrLeafUnpackedOf = BeefyMmrLeafUnpacked>; + +/// BEEFY version of MMR leaf proof. +/// +/// Even though original struct supports different hash types, we're constraining it with the +/// hash type, used by BEEFY. +pub type BeefyMmrProof = MmrProof; + +/// Hash algorithm used in MMR construction by given Substrate chain. +pub type BeefyMmrHasherOf = ::MmrHasher; + +/// Hash type, used in MMR construction at the chain with BEEFY support. +pub type BeefyMmrHash = beefy_merkle_tree::Hash; + +/// A way to convert validator id to its raw representation in the BEEFY merkle tree, used by given +/// Substrate chain. +pub type BeefyValidatorIdToMerkleLeafOf = ::ValidatorIdToMerkleLeaf; + +/// Actual type of leafs in the BEEFY MMR. +pub type BeefyMmrLeafOf = + beefy_primitives::mmr::MmrLeaf, HashOf, BeefyMmrHash>; + +/// MMR leaf with unpacked validators set when they're changed. +/// +/// There are two options on how to deal with validator set in the BEEFY client. The first one is +/// when instead of storing public keys of all validators, the commitment is submitted with public +/// validator keys and proof-of-membership for every such key. Another one is when we're actually +/// receiving public keys of all validators when validator set changes and are immediately verifying +/// all these keys against validators merkle root. This makes the handoff procedure more heavy, +/// but all subsequent operations on the same set are cheaper. +#[derive(Encode, Decode, RuntimeDebug, PartialEq, Eq, Clone, TypeInfo)] +pub enum BeefyMmrLeafUnpacked { + /// This variant shall be used when containing MMR leaf is not signalling BEEFY authorities + /// change. + /// + /// The vector is encoded MMR leaf contents (`beefy_primitives::mmr::MmrLeaf`). We can't + /// use it here directly, because leaf structure may change in the future. + Regular(Vec), + /// This variant shall be used when containing MMR leaf is signalling BEEFY authorities change. + /// + /// The vector is encoded MMR leaf contents (`beefy_primitives::mmr::MmrLeaf`). We can't + /// use it here directly, because leaf structure may change in the future. + /// + /// The pallet will reject this variant if MMR leaf is not changing authorities. + Handoff(Vec, Vec), +} + +impl BeefyMmrLeafUnpacked { + /// Returns reference to the actual MMR leaf contents. + pub fn leaf(&self) -> &[u8] { + match *self { + BeefyMmrLeafUnpacked::Regular(ref leaf) => leaf, + BeefyMmrLeafUnpacked::Handoff(ref leaf, _) => leaf, + } + } + + /// Returns reference to the next validator set, if available. + pub fn next_validators(&self) -> Option<&Vec> { + match *self { + BeefyMmrLeafUnpacked::Regular(_) => None, + BeefyMmrLeafUnpacked::Handoff(_, ref next_validators) => Some(next_validators), + } + } + + /// Converts self to unpacked next validator set, if available. + pub fn into_next_validators(self) -> Option> { + match self { + BeefyMmrLeafUnpacked::Regular(_) => None, + BeefyMmrLeafUnpacked::Handoff(_, next_validators) => Some(next_validators), + } + } + + /// Set actual MMR leaf contents. + pub fn set_leaf(self, new_raw_leaf: Vec) -> Self { + match self { + BeefyMmrLeafUnpacked::Regular(_) => BeefyMmrLeafUnpacked::Regular(new_raw_leaf), + BeefyMmrLeafUnpacked::Handoff(_, next_validators) => + BeefyMmrLeafUnpacked::Handoff(new_raw_leaf, next_validators), + } + } + + /// Set next validator set. + pub fn set_next_validators(self, next_validators: Option>) -> Self { + let raw_leaf = match self { + BeefyMmrLeafUnpacked::Regular(raw_leaf) => raw_leaf, + BeefyMmrLeafUnpacked::Handoff(raw_leaf, _) => raw_leaf, + }; + match next_validators { + Some(next_validators) => BeefyMmrLeafUnpacked::Handoff(raw_leaf, next_validators), + None => BeefyMmrLeafUnpacked::Regular(raw_leaf), + } + } +} + +/// Data required for initializing the BEEFY pallet. +#[derive(Encode, Decode, RuntimeDebug, PartialEq, Eq, Clone, TypeInfo)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct InitializationData { + /// Should the pallet block transaction immediately after initialization. + pub is_halted: bool, + /// Number of the best block, finalized by BEEFY. + pub best_beefy_block_number: BlockNumber, + /// BEEFY validator set that will be finalizing descendants of the `best_beefy_block_number` + /// block. + pub current_validator_set: (ValidatorSetId, Vec), + /// Next BEEFY validator set, that we'll switch to, once we see the handoff header. + pub next_validator_set: (ValidatorSetId, Vec), +} + +/// Basic data, stored by the pallet for every imported commitment. +#[derive(Encode, Decode, RuntimeDebug, PartialEq, Eq, Clone, TypeInfo)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct ImportedCommitment { + /// Block number and hash of the finalized block parent. + pub parent_number_and_hash: (BlockNumber, BlockHash), + /// MMR root at the imported block. + pub mmr_root: BeefyMmrHash, + /// Parachain heads merkle root at the imported block. + pub parachain_heads: BeefyMmrHash, +} diff --git a/primitives/beefy/src/storage_keys.rs b/primitives/beefy/src/storage_keys.rs new file mode 100644 index 00000000000..0ba1bc74746 --- /dev/null +++ b/primitives/beefy/src/storage_keys.rs @@ -0,0 +1,115 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Storage keys of bridge BEEFY pallet and related pallets. + +/// Name of the `IsHalted` storage value. +pub const IS_HALTED_VALUE_NAME: &str = "IsHalted"; +/// Name of the `BestBlockNumber` storage value. +pub const BEST_BLOCK_NUMBER_VALUE_NAME: &str = "BestBlockNumber"; + +use sp_core::storage::StorageKey; + +/// Storage key of the `IsHalted` flag in the runtime storage. +pub fn is_halted_key(pallet_prefix: &str) -> StorageKey { + StorageKey( + bp_runtime::storage_value_final_key( + pallet_prefix.as_bytes(), + IS_HALTED_VALUE_NAME.as_bytes(), + ) + .to_vec(), + ) +} + +/// Storage key of the `BestBlockNumber` valud in the runtime storage. +pub fn best_block_number_key(pallet_prefix: &str) -> StorageKey { + StorageKey( + bp_runtime::storage_value_final_key( + pallet_prefix.as_bytes(), + BEST_BLOCK_NUMBER_VALUE_NAME.as_bytes(), + ) + .to_vec(), + ) +} + +/// This module provides storage keys of some pallets deployed, at the bridged (source) chain. +pub mod bridged { + use super::*; + + // There's `BeefyApi` right now, but it is not providing any methods to access the next + // validator set. So we're using storage reads instead. + + // TODO: tests + + /// Name of the `ValidatorSetId` storage value. + const VALIDATOR_SET_ID_VALUE_NAME: &str = "ValidatorSetId"; + /// Name of the `Authorities` storage value. + const AUTHORITIES_VALUE_NAME: &str = "Authorities"; + /// Name of the `ValidatorSetId` storage value. + const NEXT_AUTHORITIES_VALUE_NAME: &str = "NextAuthorities"; + + /// Storage key of the `ValidatorSetId` runtime storage value from the `pallet-beefy`. + pub fn valdiator_set_id_storage_key(pallet_prefix: &str) -> StorageKey { + StorageKey( + bp_runtime::storage_value_final_key( + pallet_prefix.as_bytes(), + VALIDATOR_SET_ID_VALUE_NAME.as_bytes(), + ) + .to_vec(), + ) + } + + /// Storage key of the `Authorities` runtime storage value from the `pallet-beefy`. + pub fn validators_storage_key(pallet_prefix: &str) -> StorageKey { + StorageKey( + bp_runtime::storage_value_final_key( + pallet_prefix.as_bytes(), + AUTHORITIES_VALUE_NAME.as_bytes(), + ) + .to_vec(), + ) + } + + /// Storage key of the `NextAuthorities` runtime storage value from the `pallet-beefy`. + pub fn next_validators_storage_key(pallet_prefix: &str) -> StorageKey { + StorageKey( + bp_runtime::storage_value_final_key( + pallet_prefix.as_bytes(), + NEXT_AUTHORITIES_VALUE_NAME.as_bytes(), + ) + .to_vec(), + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use hex_literal::hex; + + #[test] + fn is_halted_key_computed_properly() { + // If this test fails, then something has been changed in module storage that is breaking + // compatibility with previous pallet. + let storage_key = is_halted_key("BridgeBeefy").0; + assert_eq!( + storage_key, + hex!("0b06f475eddb98cf933a12262e0388de9611a984bbd04e2fd39f97bbc006115f").to_vec(), + "Unexpected storage key: {}", + hex::encode(&storage_key), + ); + } +} diff --git a/primitives/chain-millau/Cargo.toml b/primitives/chain-millau/Cargo.toml index 0aaeb5b6bf9..25c487e5f1c 100644 --- a/primitives/chain-millau/Cargo.toml +++ b/primitives/chain-millau/Cargo.toml @@ -10,6 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" # Bridge Dependencies +bp-beefy = { path = "../beefy", default-features = false } bp-messages = { path = "../messages", default-features = false } bp-runtime = { path = "../runtime", default-features = false } fixed-hash = { version = "0.7.0", default-features = false } @@ -34,6 +35,7 @@ sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", [features] default = ["std"] std = [ + "bp-beefy/std", "bp-messages/std", "bp-runtime/std", "fixed-hash/std", diff --git a/primitives/chain-millau/src/lib.rs b/primitives/chain-millau/src/lib.rs index ff8d5385953..371aecbe839 100644 --- a/primitives/chain-millau/src/lib.rs +++ b/primitives/chain-millau/src/lib.rs @@ -20,6 +20,7 @@ mod millau_hash; +use bp_beefy::ChainWithBeefy; use bp_messages::{LaneId, MessageDetails, MessageNonce}; use bp_runtime::Chain; use frame_support::{ @@ -183,6 +184,13 @@ impl Chain for Millau { } } +impl ChainWithBeefy for Millau { + type CommitmentHasher = sp_runtime::traits::Keccak256; + type MmrHasher = bp_beefy::BeefyKeccak256; + type ValidatorId = bp_beefy::EcdsaValidatorId; + type ValidatorIdToMerkleLeaf = bp_beefy::BeefyEcdsaToEthereum; +} + /// Millau Hasher (Blake2-256 ++ Keccak-256) implementation. #[derive(PartialEq, Eq, Clone, Copy, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] @@ -261,10 +269,14 @@ frame_support::parameter_types! { .build_or_panic(); } +/// Name of the `pallet-beefy` instance that is deployed at Millau. +pub const AT_MILLAU_BEEFY_PALLET_NAME: &'static str = "Beefy"; /// Name of the With-Millau GRANDPA pallet instance that is deployed at bridged chains. pub const WITH_MILLAU_GRANDPA_PALLET_NAME: &str = "BridgeMillauGrandpa"; /// Name of the With-Millau messages pallet instance that is deployed at bridged chains. pub const WITH_MILLAU_MESSAGES_PALLET_NAME: &str = "BridgeMillauMessages"; +/// Name of the With-Millau BEEFY pallet instance that is deployed at bridged chains. +pub const WITH_MILLAU_BEEFY_PALLET_NAME: &'static str = "BridgeMillauBeefy"; /// Name of the Rialto->Millau (actually DOT->KSM) conversion rate stored in the Millau runtime. pub const RIALTO_TO_MILLAU_CONVERSION_RATE_PARAMETER_NAME: &str = "RialtoToMillauConversionRate"; diff --git a/relays/bin-substrate/Cargo.toml b/relays/bin-substrate/Cargo.toml index fb8ff467d04..e3d1da6a2d6 100644 --- a/relays/bin-substrate/Cargo.toml +++ b/relays/bin-substrate/Cargo.toml @@ -22,6 +22,7 @@ strum = { version = "0.21.0", features = ["derive"] } # Bridge dependencies +bp-beefy = { path = "../../primitives/beefy" } bp-header-chain = { path = "../../primitives/header-chain" } bp-kusama = { path = "../../primitives/chain-kusama" } bp-messages = { path = "../../primitives/messages" } diff --git a/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs b/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs index 0c0ba2272c7..bf7003b6b7f 100644 --- a/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs +++ b/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs @@ -18,7 +18,7 @@ use async_trait::async_trait; use relay_polkadot_client::Polkadot; -use substrate_relay_helper::{finality_pipeline::SubstrateFinalitySyncPipeline, TransactionParams}; +use substrate_relay_helper::{finality::SubstrateFinalitySyncPipeline, TransactionParams}; /// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat /// relay as gone wild. @@ -47,6 +47,7 @@ impl SubstrateFinalitySyncPipeline for KusamaFinalityToPolkadot { type SourceChain = relay_kusama_client::Kusama; type TargetChain = Polkadot; + type FinalityEngine = substrate_relay_helper::finality::engine::Grandpa; type SubmitFinalityProofCallBuilder = KusamaFinalityToPolkadotCallBuilder; type TransactionSignScheme = Polkadot; diff --git a/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs b/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs index 584f0a9bb1d..dc8b6f58817 100644 --- a/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs +++ b/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs @@ -16,11 +16,12 @@ //! Millau-to-Rialto headers sync entrypoint. -use substrate_relay_helper::finality_pipeline::{ - DirectSubmitFinalityProofCallBuilder, SubstrateFinalitySyncPipeline, +use substrate_relay_helper::finality::{ + DirectSubmitBeefyFinalityProofCallBuilder, DirectSubmitGrandpaFinalityProofCallBuilder, + SubstrateFinalitySyncPipeline, }; -/// Description of Millau -> Rialto finalized headers bridge. +/// Description of Millau -> Rialto GRANDPA-finalized headers bridge. #[derive(Clone, Debug)] pub struct MillauFinalityToRialto; @@ -28,7 +29,25 @@ impl SubstrateFinalitySyncPipeline for MillauFinalityToRialto { type SourceChain = relay_millau_client::Millau; type TargetChain = relay_rialto_client::Rialto; - type SubmitFinalityProofCallBuilder = DirectSubmitFinalityProofCallBuilder< + type FinalityEngine = substrate_relay_helper::finality::engine::Grandpa; + type SubmitFinalityProofCallBuilder = DirectSubmitGrandpaFinalityProofCallBuilder< + Self, + rialto_runtime::Runtime, + rialto_runtime::MillauGrandpaInstance, + >; + type TransactionSignScheme = relay_rialto_client::Rialto; +} + +/// Description of Millau -> Rialto BEEFY-finalized headers bridge. +#[derive(Clone, Debug)] +pub struct MillauBeefyFinalityToRialto; + +impl SubstrateFinalitySyncPipeline for MillauBeefyFinalityToRialto { + type SourceChain = relay_millau_client::Millau; + type TargetChain = relay_rialto_client::Rialto; + + type FinalityEngine = substrate_relay_helper::finality::engine::Beefy; + type SubmitFinalityProofCallBuilder = DirectSubmitBeefyFinalityProofCallBuilder< Self, rialto_runtime::Runtime, rialto_runtime::MillauGrandpaInstance, diff --git a/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs b/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs index 6d118b07caa..704419affbf 100644 --- a/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs +++ b/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs @@ -18,7 +18,7 @@ use async_trait::async_trait; use relay_kusama_client::Kusama; -use substrate_relay_helper::{finality_pipeline::SubstrateFinalitySyncPipeline, TransactionParams}; +use substrate_relay_helper::{finality::SubstrateFinalitySyncPipeline, TransactionParams}; /// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat /// relay as gone wild. @@ -47,6 +47,7 @@ impl SubstrateFinalitySyncPipeline for PolkadotFinalityToKusama { type SourceChain = relay_polkadot_client::Polkadot; type TargetChain = Kusama; + type FinalityEngine = substrate_relay_helper::finality::engine::Grandpa; type SubmitFinalityProofCallBuilder = PolkadotFinalityToKusamaCallBuilder; type TransactionSignScheme = Kusama; diff --git a/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs b/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs index a433f3562a7..38d8dc4c9d2 100644 --- a/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs +++ b/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs @@ -16,8 +16,8 @@ //! Rialto-to-Millau headers sync entrypoint. -use substrate_relay_helper::finality_pipeline::{ - DirectSubmitFinalityProofCallBuilder, SubstrateFinalitySyncPipeline, +use substrate_relay_helper::finality::{ + DirectSubmitGrandpaFinalityProofCallBuilder, SubstrateFinalitySyncPipeline, }; /// Description of Millau -> Rialto finalized headers bridge. @@ -28,7 +28,8 @@ impl SubstrateFinalitySyncPipeline for RialtoFinalityToMillau { type SourceChain = relay_rialto_client::Rialto; type TargetChain = relay_millau_client::Millau; - type SubmitFinalityProofCallBuilder = DirectSubmitFinalityProofCallBuilder< + type FinalityEngine = substrate_relay_helper::finality::engine::Grandpa; + type SubmitFinalityProofCallBuilder = DirectSubmitGrandpaFinalityProofCallBuilder< Self, millau_runtime::Runtime, millau_runtime::RialtoGrandpaInstance, diff --git a/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs b/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs index bb66a7422d3..c28febc660c 100644 --- a/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs +++ b/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs @@ -20,7 +20,7 @@ use crate::chains::wococo_headers_to_rococo::MAXIMAL_BALANCE_DECREASE_PER_DAY; use async_trait::async_trait; use relay_wococo_client::Wococo; -use substrate_relay_helper::{finality_pipeline::SubstrateFinalitySyncPipeline, TransactionParams}; +use substrate_relay_helper::{finality::SubstrateFinalitySyncPipeline, TransactionParams}; /// Description of Rococo -> Wococo finalized headers bridge. #[derive(Clone, Debug)] @@ -37,6 +37,7 @@ impl SubstrateFinalitySyncPipeline for RococoFinalityToWococo { type SourceChain = relay_rococo_client::Rococo; type TargetChain = Wococo; + type FinalityEngine = substrate_relay_helper::finality::engine::Grandpa; type SubmitFinalityProofCallBuilder = RococoFinalityToWococoCallBuilder; type TransactionSignScheme = Wococo; diff --git a/relays/bin-substrate/src/chains/westend_headers_to_millau.rs b/relays/bin-substrate/src/chains/westend_headers_to_millau.rs index 2ec20a027ff..036947f4ee1 100644 --- a/relays/bin-substrate/src/chains/westend_headers_to_millau.rs +++ b/relays/bin-substrate/src/chains/westend_headers_to_millau.rs @@ -16,8 +16,8 @@ //! Westend-to-Millau headers sync entrypoint. -use substrate_relay_helper::finality_pipeline::{ - DirectSubmitFinalityProofCallBuilder, SubstrateFinalitySyncPipeline, +use substrate_relay_helper::finality::{ + DirectSubmitGrandpaFinalityProofCallBuilder, SubstrateFinalitySyncPipeline, }; /// Description of Westend -> Millau finalized headers bridge. @@ -28,7 +28,8 @@ impl SubstrateFinalitySyncPipeline for WestendFinalityToMillau { type SourceChain = relay_westend_client::Westend; type TargetChain = relay_millau_client::Millau; - type SubmitFinalityProofCallBuilder = DirectSubmitFinalityProofCallBuilder< + type FinalityEngine = substrate_relay_helper::finality::engine::Grandpa; + type SubmitFinalityProofCallBuilder = DirectSubmitGrandpaFinalityProofCallBuilder< Self, millau_runtime::Runtime, millau_runtime::WestendGrandpaInstance, diff --git a/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs b/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs index a7bff595188..d58bbdc15c2 100644 --- a/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs +++ b/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs @@ -18,7 +18,7 @@ use async_trait::async_trait; use relay_rococo_client::Rococo; -use substrate_relay_helper::{finality_pipeline::SubstrateFinalitySyncPipeline, TransactionParams}; +use substrate_relay_helper::{finality::SubstrateFinalitySyncPipeline, TransactionParams}; /// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat /// relay as gone wild. @@ -42,6 +42,7 @@ impl SubstrateFinalitySyncPipeline for WococoFinalityToRococo { type SourceChain = relay_wococo_client::Wococo; type TargetChain = Rococo; + type FinalityEngine = substrate_relay_helper::finality::engine::Grandpa; type SubmitFinalityProofCallBuilder = WococoFinalityToRococoCallBuilder; type TransactionSignScheme = Rococo; diff --git a/relays/bin-substrate/src/cli/init_bridge.rs b/relays/bin-substrate/src/cli/init_bridge.rs index a0129ce9baa..d2644c57049 100644 --- a/relays/bin-substrate/src/cli/init_bridge.rs +++ b/relays/bin-substrate/src/cli/init_bridge.rs @@ -18,7 +18,9 @@ use crate::cli::{SourceConnectionParams, TargetConnectionParams, TargetSigningPa use bp_header_chain::InitializationData; use bp_runtime::Chain as ChainBase; use codec::Encode; -use relay_substrate_client::{Chain, SignParam, TransactionSignScheme, UnsignedTransaction}; +use relay_substrate_client::{ + BlockNumberOf, Chain, SignParam, TransactionSignScheme, UnsignedTransaction, +}; use sp_core::{Bytes, Pair}; use structopt::StructOpt; use strum::{EnumString, EnumVariantNames, VariantNames}; @@ -43,6 +45,7 @@ pub struct InitBridge { pub enum InitBridgeName { MillauToRialto, RialtoToMillau, + MillauBeefyToRialto, WestendToMillau, RococoToWococo, WococoToRococo, @@ -56,6 +59,7 @@ macro_rules! select_bridge { InitBridgeName::MillauToRialto => { type Source = relay_millau_client::Millau; type Target = relay_rialto_client::Rialto; + type Engine = substrate_relay_helper::finality::engine::Grandpa; fn encode_init_bridge( init_data: InitializationData<::Header>, @@ -74,6 +78,7 @@ macro_rules! select_bridge { InitBridgeName::RialtoToMillau => { type Source = relay_rialto_client::Rialto; type Target = relay_millau_client::Millau; + type Engine = substrate_relay_helper::finality::engine::Grandpa; fn encode_init_bridge( init_data: InitializationData<::Header>, @@ -89,9 +94,32 @@ macro_rules! select_bridge { $generic }, + InitBridgeName::MillauBeefyToRialto => { + type Source = relay_millau_client::Millau; + type Target = relay_rialto_client::Rialto; + type Engine = substrate_relay_helper::finality::engine::Beefy; + + fn encode_init_bridge( + init_data: bp_beefy::InitializationData< + BlockNumberOf, + bp_beefy::BeefyValidatorIdOf, + >, + ) -> ::Call { + let initialize_call = rialto_runtime::BridgeBeefyCall::< + rialto_runtime::Runtime, + rialto_runtime::MillauGrandpaInstance, + >::initialize { + init_data, + }; + rialto_runtime::SudoCall::sudo { call: Box::new(initialize_call.into()) }.into() + } + + $generic + }, InitBridgeName::WestendToMillau => { type Source = relay_westend_client::Westend; type Target = relay_millau_client::Millau; + type Engine = substrate_relay_helper::finality::engine::Grandpa; fn encode_init_bridge( init_data: InitializationData<::Header>, @@ -114,6 +142,7 @@ macro_rules! select_bridge { InitBridgeName::RococoToWococo => { type Source = relay_rococo_client::Rococo; type Target = relay_wococo_client::Wococo; + type Engine = substrate_relay_helper::finality::engine::Grandpa; fn encode_init_bridge( init_data: InitializationData<::Header>, @@ -130,6 +159,7 @@ macro_rules! select_bridge { InitBridgeName::WococoToRococo => { type Source = relay_wococo_client::Wococo; type Target = relay_rococo_client::Rococo; + type Engine = substrate_relay_helper::finality::engine::Grandpa; fn encode_init_bridge( init_data: InitializationData<::Header>, @@ -146,6 +176,7 @@ macro_rules! select_bridge { InitBridgeName::KusamaToPolkadot => { type Source = relay_kusama_client::Kusama; type Target = relay_polkadot_client::Polkadot; + type Engine = substrate_relay_helper::finality::engine::Grandpa; fn encode_init_bridge( init_data: InitializationData<::Header>, @@ -162,6 +193,7 @@ macro_rules! select_bridge { InitBridgeName::PolkadotToKusama => { type Source = relay_polkadot_client::Polkadot; type Target = relay_kusama_client::Kusama; + type Engine = substrate_relay_helper::finality::engine::Grandpa; fn encode_init_bridge( init_data: InitializationData<::Header>, @@ -189,7 +221,7 @@ impl InitBridge { let (spec_version, transaction_version) = target_client.simple_runtime_version().await?; - substrate_relay_helper::headers_initialize::initialize( + substrate_relay_helper::finality::initialize::initialize::( source_client, target_client.clone(), target_sign.public().into(), diff --git a/relays/bin-substrate/src/cli/reinit_bridge.rs b/relays/bin-substrate/src/cli/reinit_bridge.rs index a6897aaf0ab..a20690fb5ef 100644 --- a/relays/bin-substrate/src/cli/reinit_bridge.rs +++ b/relays/bin-substrate/src/cli/reinit_bridge.rs @@ -40,8 +40,12 @@ use std::convert::{TryFrom, TryInto}; use structopt::StructOpt; use strum::{EnumString, EnumVariantNames, VariantNames}; use substrate_relay_helper::{ - finality_pipeline::SubstrateFinalitySyncPipeline, finality_source::SubstrateFinalitySource, - finality_target::SubstrateFinalityTarget, messages_source::read_client_state, + finality::{ + source::{SubstrateFinalityProof, SubstrateFinalitySource}, + target::SubstrateFinalityTarget, + SubstrateFinalitySyncPipeline, + }, + messages_source::read_client_state, TransactionParams, }; @@ -299,7 +303,7 @@ impl ReinitBridge { /// Mandatory header and its finality proof. type HeaderAndProof

= ( SyncHeader::SourceChain>>, - GrandpaJustification::SourceChain>>, + SubstrateFinalityProof

, ); /// Vector of mandatory headers and their finality proofs. type HeadersAndProofs

= Vec>; diff --git a/relays/bin-substrate/src/cli/relay_headers.rs b/relays/bin-substrate/src/cli/relay_headers.rs index 45034aba4b5..d01eab6c9ea 100644 --- a/relays/bin-substrate/src/cli/relay_headers.rs +++ b/relays/bin-substrate/src/cli/relay_headers.rs @@ -18,7 +18,7 @@ use structopt::StructOpt; use strum::{EnumString, EnumVariantNames, VariantNames}; use relay_utils::metrics::{GlobalMetrics, StandaloneMetric}; -use substrate_relay_helper::finality_pipeline::SubstrateFinalitySyncPipeline; +use substrate_relay_helper::finality::SubstrateFinalitySyncPipeline; use crate::cli::{ PrometheusParams, SourceConnectionParams, TargetConnectionParams, TargetSigningParams, @@ -50,6 +50,7 @@ pub struct RelayHeaders { pub enum RelayHeadersBridge { MillauToRialto, RialtoToMillau, + MillauBeefyToRialto, WestendToMillau, RococoToWococo, WococoToRococo, @@ -74,6 +75,14 @@ macro_rules! select_bridge { $generic }, + RelayHeadersBridge::MillauBeefyToRialto => { + type Source = relay_millau_client::Millau; + type Target = relay_rialto_client::Rialto; + type Finality = + crate::chains::millau_headers_to_rialto::MillauBeefyFinalityToRialto; + + $generic + }, RelayHeadersBridge::WestendToMillau => { type Source = relay_westend_client::Westend; type Target = relay_millau_client::Millau; @@ -136,7 +145,7 @@ impl RelayHeaders { ) .await?; - substrate_relay_helper::finality_pipeline::run::( + substrate_relay_helper::finality::run::( source_client, target_client, self.only_mandatory_headers, diff --git a/relays/bin-substrate/src/cli/relay_headers_and_messages.rs b/relays/bin-substrate/src/cli/relay_headers_and_messages.rs index d071d1f8910..537ae1e1018 100644 --- a/relays/bin-substrate/src/cli/relay_headers_and_messages.rs +++ b/relays/bin-substrate/src/cli/relay_headers_and_messages.rs @@ -35,7 +35,7 @@ use relay_substrate_client::{ use relay_utils::metrics::MetricsParams; use sp_core::{Bytes, Pair}; use substrate_relay_helper::{ - finality_pipeline::SubstrateFinalitySyncPipeline, messages_lane::MessagesRelayParams, + finality::SubstrateFinalitySyncPipeline, messages_lane::MessagesRelayParams, on_demand_headers::OnDemandHeadersRelay, TransactionParams, }; diff --git a/relays/client-millau/Cargo.toml b/relays/client-millau/Cargo.toml index 98932433455..26d16ea7cbc 100644 --- a/relays/client-millau/Cargo.toml +++ b/relays/client-millau/Cargo.toml @@ -12,6 +12,7 @@ relay-utils = { path = "../utils" } # Supported Chains +bp-beefy = { path = "../../primitives/beefy" } bp-messages = { path = "../../primitives/messages" } bp-millau = { path = "../../primitives/chain-millau" } millau-runtime = { path = "../../bin/millau/runtime" } diff --git a/relays/client-millau/src/lib.rs b/relays/client-millau/src/lib.rs index eae9d9b4586..d109f6e96e3 100644 --- a/relays/client-millau/src/lib.rs +++ b/relays/client-millau/src/lib.rs @@ -20,8 +20,9 @@ use bp_messages::MessageNonce; use codec::{Compact, Decode, Encode}; use frame_support::weights::Weight; use relay_substrate_client::{ - BalanceOf, Chain, ChainBase, ChainWithBalances, ChainWithGrandpa, ChainWithMessages, - Error as SubstrateError, IndexOf, SignParam, TransactionSignScheme, UnsignedTransaction, + BalanceOf, Chain, ChainBase, ChainWithBalances, ChainWithBeefy, ChainWithGrandpa, + ChainWithMessages, Error as SubstrateError, IndexOf, SignParam, TransactionSignScheme, + UnsignedTransaction, }; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; @@ -58,6 +59,19 @@ impl ChainWithGrandpa for Millau { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = bp_millau::WITH_MILLAU_GRANDPA_PALLET_NAME; } +impl bp_beefy::ChainWithBeefy for Millau { + type CommitmentHasher = ::CommitmentHasher; + type MmrHasher = ::MmrHasher; + type ValidatorId = ::ValidatorId; + type ValidatorIdToMerkleLeaf = + ::ValidatorIdToMerkleLeaf; +} + +impl ChainWithBeefy for Millau { + const AT_CHAIN_BEEFY_PALLET_NAME: &'static str = bp_millau::AT_MILLAU_BEEFY_PALLET_NAME; + const WITH_CHAIN_BEEFY_PALLET_NAME: &'static str = bp_millau::WITH_MILLAU_BEEFY_PALLET_NAME; +} + impl ChainWithMessages for Millau { const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = bp_millau::WITH_MILLAU_MESSAGES_PALLET_NAME; diff --git a/relays/client-substrate/Cargo.toml b/relays/client-substrate/Cargo.toml index dad864965e2..5cd94d1cf83 100644 --- a/relays/client-substrate/Cargo.toml +++ b/relays/client-substrate/Cargo.toml @@ -19,6 +19,7 @@ thiserror = "1.0.26" # Bridge dependencies +bp-beefy = { path = "../../primitives/beefy" } bp-header-chain = { path = "../../primitives/header-chain" } bp-messages = { path = "../../primitives/messages" } bp-runtime = { path = "../../primitives/runtime" } diff --git a/relays/client-substrate/src/chain.rs b/relays/client-substrate/src/chain.rs index a55fa64d850..3cab250a256 100644 --- a/relays/client-substrate/src/chain.rs +++ b/relays/client-substrate/src/chain.rs @@ -78,6 +78,19 @@ pub trait ChainWithGrandpa: Chain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str; } +/// Substrate based chain that is using BEEFY finality from minimal relay-client point of view. +pub trait ChainWithBeefy: Chain + bp_beefy::ChainWithBeefy { + /// Name of the original `pallet-beefy` at this chain (used in `construct_runtime` macro call). + const AT_CHAIN_BEEFY_PALLET_NAME: &'static str; + + /// Name of the bridge BEEFY pallet (used in `construct_runtime` macro call) that is deployed + /// at some other chain to bridge with this `ChainWithBeefy`. + /// + /// We assume that all chains that are bridging with this `ChainWithBeefy` are using + /// the same name. + const WITH_CHAIN_BEEFY_PALLET_NAME: &'static str; +} + /// Substrate-based chain with messaging support from minimal relay-client point of view. pub trait ChainWithMessages: Chain { /// Name of the bridge messages pallet (used in `construct_runtime` macro call) that is deployed diff --git a/relays/client-substrate/src/client.rs b/relays/client-substrate/src/client.rs index 1e48bc33396..1af89ca3b3d 100644 --- a/relays/client-substrate/src/client.rs +++ b/relays/client-substrate/src/client.rs @@ -262,6 +262,22 @@ impl Client { Ok(*self.header_by_hash(self.best_finalized_header_hash().await?).await?.number()) } + /// Return hash of the best block, finalized by BEEFY. + pub async fn best_finalized_beefy_hash(&self) -> Result { + self.jsonrpsee_execute(|client| async move { + Ok(SubstrateClient::< + AccountIdOf, + BlockNumberOf, + HashOf, + HeaderOf, + IndexOf, + C::SignedBlock, + >::beefy_get_finalized_head(&*client) + .await?) + }) + .await + } + /// Returns the best Substrate header. pub async fn best_header(&self) -> Result where @@ -704,8 +720,8 @@ impl Client { .await } - /// Return new justifications stream. - pub async fn subscribe_justifications(&self) -> Result> { + /// Return new GRANDPA justifications stream. + pub async fn subscribe_grandpa_justifications(&self) -> Result> { let subscription = self .jsonrpsee_execute(move |client| async move { Ok(client @@ -720,7 +736,30 @@ impl Client { let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); self.tokio.spawn(Subscription::background_worker( C::NAME.into(), - "justification".into(), + "grandpa-justification".into(), + subscription, + sender, + )); + Ok(Subscription(Mutex::new(receiver))) + } + + /// Return new BEEFY justifications stream. + pub async fn subscribe_beefy_justifications(&self) -> Result> { + let subscription = self + .jsonrpsee_execute(move |client| async move { + Ok(client + .subscribe( + "beefy_subscribeJustifications", + None, + "beefy_unsubscribeJustifications", + ) + .await?) + }) + .await?; + let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); + self.tokio.spawn(Subscription::background_worker( + C::NAME.into(), + "beefy-justification".into(), subscription, sender, )); diff --git a/relays/client-substrate/src/error.rs b/relays/client-substrate/src/error.rs index e698f2596c5..797af5cc5d2 100644 --- a/relays/client-substrate/src/error.rs +++ b/relays/client-substrate/src/error.rs @@ -45,9 +45,9 @@ pub enum Error { /// Account does not exist on the chain. #[error("Account does not exist on the chain.")] AccountDoesNotExist, - /// Runtime storage is missing mandatory ":code:" entry. - #[error("Mandatory :code: entry is missing from runtime storage.")] - MissingMandatoryCodeEntry, + /// Runtime storage is missing some mandatory value. + #[error("Mandatory storage value is missing from the runtime storage.")] + MissingMandatoryStorageValue, /// The client we're connected to is not synced, so we can't rely on its state. #[error("Substrate client is not synced {0}.")] ClientNotSynced(Health), diff --git a/relays/client-substrate/src/lib.rs b/relays/client-substrate/src/lib.rs index b3a7ec41419..4c5c8390ad2 100644 --- a/relays/client-substrate/src/lib.rs +++ b/relays/client-substrate/src/lib.rs @@ -31,7 +31,7 @@ use std::time::Duration; pub use crate::{ chain::{ - AccountKeyPairOf, BlockWithJustification, CallOf, Chain, ChainWithBalances, + AccountKeyPairOf, BlockWithJustification, CallOf, Chain, ChainWithBalances, ChainWithBeefy, ChainWithGrandpa, ChainWithMessages, SignParam, TransactionSignScheme, TransactionStatusOf, UnsignedTransaction, WeightToFeeOf, }, diff --git a/relays/client-substrate/src/metrics/storage_proof_overhead.rs b/relays/client-substrate/src/metrics/storage_proof_overhead.rs index f1c770ed228..42793fe7c54 100644 --- a/relays/client-substrate/src/metrics/storage_proof_overhead.rs +++ b/relays/client-substrate/src/metrics/storage_proof_overhead.rs @@ -72,7 +72,7 @@ impl StorageProofOverheadMetric { let maybe_encoded_storage_value = storage_value_reader.read_value(CODE).map_err(Error::StorageProofError)?; let encoded_storage_value_size = - maybe_encoded_storage_value.ok_or(Error::MissingMandatoryCodeEntry)?.len(); + maybe_encoded_storage_value.ok_or(Error::MissingMandatoryStorageValue)?.len(); Ok(storage_proof_size - encoded_storage_value_size) } diff --git a/relays/client-substrate/src/rpc.rs b/relays/client-substrate/src/rpc.rs index a0172d1e550..2f7b35d2a85 100644 --- a/relays/client-substrate/src/rpc.rs +++ b/relays/client-substrate/src/rpc.rs @@ -36,6 +36,8 @@ pub(crate) trait Substrate) -> RpcResult

; #[method(name = "chain_getFinalizedHead", param_kind = array)] async fn chain_get_finalized_head(&self) -> RpcResult; + #[method(name = "beefy_getFinalizedHead", param_kind = array)] + async fn beefy_get_finalized_head(&self) -> RpcResult; #[method(name = "chain_getBlock", param_kind = array)] async fn chain_get_block(&self, block_hash: Option) -> RpcResult; #[method(name = "chain_getBlockHash", param_kind = array)] diff --git a/relays/lib-substrate-relay/Cargo.toml b/relays/lib-substrate-relay/Cargo.toml index e2cabf52f44..1d13e31ded1 100644 --- a/relays/lib-substrate-relay/Cargo.toml +++ b/relays/lib-substrate-relay/Cargo.toml @@ -17,6 +17,7 @@ log = "0.4.14" # Bridge dependencies +bp-beefy = { path = "../../primitives/beefy" } bp-header-chain = { path = "../../primitives/header-chain" } bridge-runtime-common = { path = "../../bin/runtime-common" } @@ -26,6 +27,7 @@ relay-utils = { path = "../utils" } messages-relay = { path = "../messages" } relay-substrate-client = { path = "../client-substrate" } +pallet-bridge-beefy = { path = "../../modules/beefy" } pallet-bridge-grandpa = { path = "../../modules/grandpa" } pallet-bridge-messages = { path = "../../modules/messages" } diff --git a/relays/lib-substrate-relay/src/error.rs b/relays/lib-substrate-relay/src/error.rs index 9402d55e379..758738ee4f4 100644 --- a/relays/lib-substrate-relay/src/error.rs +++ b/relays/lib-substrate-relay/src/error.rs @@ -46,9 +46,17 @@ pub enum Error { /// Failed to guess initial GRANDPA authorities at the given header of the source chain. #[error("Failed to guess initial {0} GRANDPA authorities set id: checked all possible ids in range [0; {1}]")] GuessInitialAuthorities(&'static str, HeaderNumber), - /// Failed to retrieve GRANDPA authorities at the given header from the source chain. - #[error("Failed to retrive {0} GRANDPA authorities set at header {1}: {2:?}")] + /// Failed to retrieve GRANDPA/BEEFY authorities at the given header from the source chain. + #[error("Failed to retrive {0} authorities set at header {1}: {2:?}")] RetrieveAuthorities(&'static str, Hash, client::Error), + /// Failed to retrieve GRANDPA/BEEFY next authorities at the given header from the source + /// chain. + #[error("Failed to retrive {0} authorities set at header {1}: {2:?}")] + RetrieveNextAuthorities(&'static str, Hash, client::Error), + /// Failed to retrieve GRANDPA/BEEFY authorities set id at the given header from the source + /// chain. + #[error("Failed to retrive {0} authorities set id at header {1}: {2:?}")] + RetrieveAuthoritiesSetId(&'static str, Hash, client::Error), /// Failed to decode GRANDPA authorities at the given header of the source chain. #[error("Failed to decode {0} GRANDPA authorities set at header {1}: {2:?}")] DecodeAuthorities(&'static str, Hash, codec::Error), diff --git a/relays/lib-substrate-relay/src/finality/engine.rs b/relays/lib-substrate-relay/src/finality/engine.rs new file mode 100644 index 00000000000..49f8d6af95e --- /dev/null +++ b/relays/lib-substrate-relay/src/finality/engine.rs @@ -0,0 +1,344 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Support of different finality engines, available in Substrate. + +use crate::error::Error; +use async_trait::async_trait; +use bp_beefy::{BeefyMmrLeafUnpackedOf, BeefySignedCommitmentOf, BeefyValidatorIdOf}; +use bp_header_chain::{ + find_grandpa_authorities_scheduled_change, + justification::{verify_justification, GrandpaJustification}, + FinalityProof, +}; +use codec::{Decode, Encode}; +use finality_grandpa::voter_set::VoterSet; +use num_traits::{One, Zero}; +use relay_substrate_client::{ + BlockNumberOf, Chain, ChainWithBeefy, ChainWithGrandpa, Client, Error as SubstrateError, + HashOf, HeaderOf, Subscription, +}; +use sp_core::{storage::StorageKey, Bytes}; +use sp_finality_grandpa::AuthorityList as GrandpaAuthoritiesSet; +use sp_runtime::{traits::Header, ConsensusEngineId}; +use std::marker::PhantomData; + +/// Finality enfine, used by the Substrate chain. +#[async_trait] +pub trait Engine { + /// Unique consensus engine identifier. + const ID: ConsensusEngineId; + /// Type of finality proofs, used by consensus engine. + type FinalityProof: FinalityProof> + Decode + Encode; + /// Type of bridge pallet initialization data. + type InitializationData: std::fmt::Debug + Send + Sync + 'static; + + /// Returns storage key at the bridged (target) chain that corresponds to the `bool` value, + /// which is true when the bridge pallet is halted. + fn is_halted_key() -> StorageKey; + /// Returns storage at the bridged (target) chain that corresponds to some valud that is + /// missing from the storage until bridge pallet is initialized. + /// + /// Note that we don't care about type of the value - just if it present or not. + fn is_initialized_key() -> StorageKey; + /// A method to subscribe to encoded finality proofs, given source client. + async fn finality_proofs(client: Client) -> Result, SubstrateError>; + /// Prepare initialization data for the finality bridge pallet. + async fn prepare_initialization_data( + client: Client, + ) -> Result, BlockNumberOf>>; +} + +/// GRANDPA finality engine. +pub struct Grandpa(PhantomData); + +impl Grandpa { + /// Read header by hash from the source client. + async fn source_header( + source_client: &Client, + header_hash: C::Hash, + ) -> Result, BlockNumberOf>> { + source_client + .header_by_hash(header_hash) + .await + .map_err(|err| Error::RetrieveHeader(C::NAME, header_hash, err)) + } + + /// Read GRANDPA authorities set at given header. + async fn source_authorities_set( + source_client: &Client, + header_hash: C::Hash, + ) -> Result, BlockNumberOf>> { + let raw_authorities_set = source_client + .grandpa_authorities_set(header_hash) + .await + .map_err(|err| Error::RetrieveAuthorities(C::NAME, header_hash, err))?; + GrandpaAuthoritiesSet::decode(&mut &raw_authorities_set[..]) + .map_err(|err| Error::DecodeAuthorities(C::NAME, header_hash, err)) + } +} + +#[async_trait] +impl Engine for Grandpa { + const ID: ConsensusEngineId = sp_finality_grandpa::GRANDPA_ENGINE_ID; + type FinalityProof = GrandpaJustification>; + type InitializationData = bp_header_chain::InitializationData; + + fn is_halted_key() -> StorageKey { + bp_header_chain::storage_keys::is_halted_key(C::WITH_CHAIN_GRANDPA_PALLET_NAME) + } + + fn is_initialized_key() -> StorageKey { + bp_header_chain::storage_keys::best_finalized_hash_key(C::WITH_CHAIN_GRANDPA_PALLET_NAME) + } + + async fn finality_proofs(client: Client) -> Result, SubstrateError> { + client.subscribe_grandpa_justifications().await + } + + /// Prepare initialization data for the GRANDPA verifier pallet. + async fn prepare_initialization_data( + source_client: Client, + ) -> Result, BlockNumberOf>> { + // In ideal world we just need to get best finalized header and then to read GRANDPA + // authorities set (`pallet_grandpa::CurrentSetId` + `GrandpaApi::grandpa_authorities()`) at + // this header. + // + // But now there are problems with this approach - `CurrentSetId` may return invalid value. + // So here we're waiting for the next justification, read the authorities set and then try + // to figure out the set id with bruteforce. + let justifications = source_client + .subscribe_grandpa_justifications() + .await + .map_err(|err| Error::Subscribe(C::NAME, err))?; + // Read next justification - the header that it finalizes will be used as initial header. + let justification = justifications + .next() + .await + .map_err(|e| Error::ReadJustification(C::NAME, e)) + .and_then(|justification| { + justification.ok_or(Error::ReadJustificationStreamEnded(C::NAME)) + })?; + + // Read initial header. + let justification: GrandpaJustification = + Decode::decode(&mut &justification.0[..]) + .map_err(|err| Error::DecodeJustification(C::NAME, err))?; + + let (initial_header_hash, initial_header_number) = + (justification.commit.target_hash, justification.commit.target_number); + + let initial_header = Self::source_header(&source_client, initial_header_hash).await?; + log::trace!(target: "bridge", "Selected {} initial header: {}/{}", + C::NAME, + initial_header_number, + initial_header_hash, + ); + + // Read GRANDPA authorities set at initial header. + let initial_authorities_set = + Self::source_authorities_set(&source_client, initial_header_hash).await?; + log::trace!(target: "bridge", "Selected {} initial authorities set: {:?}", + C::NAME, + initial_authorities_set, + ); + + // If initial header changes the GRANDPA authorities set, then we need previous authorities + // to verify justification. + let mut authorities_for_verification = initial_authorities_set.clone(); + let scheduled_change = find_grandpa_authorities_scheduled_change(&initial_header); + assert!( + scheduled_change.as_ref().map(|c| c.delay.is_zero()).unwrap_or(true), + "GRANDPA authorities change at {} scheduled to happen in {:?} blocks. We expect\ + regular hange to have zero delay", + initial_header_hash, + scheduled_change.as_ref().map(|c| c.delay), + ); + let schedules_change = scheduled_change.is_some(); + if schedules_change { + authorities_for_verification = + Self::source_authorities_set(&source_client, *initial_header.parent_hash()).await?; + log::trace!( + target: "bridge", + "Selected {} header is scheduling GRANDPA authorities set changes. Using previous set: {:?}", + C::NAME, + authorities_for_verification, + ); + } + + // Now let's try to guess authorities set id by verifying justification. + let mut initial_authorities_set_id = 0; + let mut min_possible_block_number = C::BlockNumber::zero(); + let authorities_for_verification = VoterSet::new(authorities_for_verification.clone()) + .ok_or(Error::ReadInvalidAuthorities(C::NAME, authorities_for_verification))?; + loop { + log::trace!( + target: "bridge", "Trying {} GRANDPA authorities set id: {}", + C::NAME, + initial_authorities_set_id, + ); + + let is_valid_set_id = verify_justification::( + (initial_header_hash, initial_header_number), + initial_authorities_set_id, + &authorities_for_verification, + &justification, + ) + .is_ok(); + + if is_valid_set_id { + break + } + + initial_authorities_set_id += 1; + min_possible_block_number += One::one(); + if min_possible_block_number > initial_header_number { + // there can't be more authorities set changes than headers => if we have reached + // `initial_block_number` and still have not found correct value of + // `initial_authorities_set_id`, then something else is broken => fail + return Err(Error::GuessInitialAuthorities(C::NAME, initial_header_number)) + } + } + + Ok(bp_header_chain::InitializationData { + header: Box::new(initial_header), + authority_list: initial_authorities_set, + set_id: if schedules_change { + initial_authorities_set_id + 1 + } else { + initial_authorities_set_id + }, + is_halted: false, + }) + } +} + +/// BEEFY finality engine. +pub struct Beefy(PhantomData); + +#[async_trait] +impl Engine for Beefy +where + C: std::fmt::Debug, + BeefySignedCommitmentOf: std::fmt::Debug + Send + Sync, + Option>: std::fmt::Debug + Send + Sync, +{ + const ID: ConsensusEngineId = bp_beefy::BEEFY_ENGINE_ID; + type FinalityProof = BeefySignedCommitmentWithMmrLeaf; + type InitializationData = bp_beefy::InitializationData, BeefyValidatorIdOf>; + + fn is_halted_key() -> StorageKey { + bp_beefy::storage_keys::is_halted_key(C::WITH_CHAIN_BEEFY_PALLET_NAME) + } + + fn is_initialized_key() -> StorageKey { + bp_beefy::storage_keys::best_block_number_key(C::WITH_CHAIN_BEEFY_PALLET_NAME) + } + + async fn finality_proofs(client: Client) -> Result, SubstrateError> { + client.subscribe_beefy_justifications().await + } + + async fn prepare_initialization_data( + client: Client, + ) -> Result, BlockNumberOf>> { + let best_finalized_header_hash = client + .best_finalized_beefy_hash() + .await + .map_err(|err| Error::RetrieveBestFinalizedHeaderHash(C::NAME, err))?; + let best_finalized_header = client + .header_by_hash(best_finalized_header_hash) + .await + .map_err(|err| Error::RetrieveHeader(C::NAME, best_finalized_header_hash, err))?; + let current_validator_set_id: bp_beefy::ValidatorSetId = client + .storage_value( + bp_beefy::storage_keys::bridged::valdiator_set_id_storage_key( + C::AT_CHAIN_BEEFY_PALLET_NAME, + ), + Some(best_finalized_header_hash), + ) + .await + .map_err(|err| { + Error::RetrieveAuthoritiesSetId(C::NAME, best_finalized_header_hash, err) + })? + .ok_or_else(|| { + Error::RetrieveAuthoritiesSetId( + C::NAME, + best_finalized_header_hash, + SubstrateError::MissingMandatoryStorageValue, + ) + })?; + let current_validators: Vec> = client + .storage_value( + bp_beefy::storage_keys::bridged::validators_storage_key( + C::AT_CHAIN_BEEFY_PALLET_NAME, + ), + Some(best_finalized_header_hash), + ) + .await + .map_err(|err| Error::RetrieveAuthorities(C::NAME, best_finalized_header_hash, err))? + .ok_or_else(|| { + Error::RetrieveAuthorities( + C::NAME, + best_finalized_header_hash, + SubstrateError::MissingMandatoryStorageValue, + ) + })?; + let next_validators: Vec> = client + .storage_value( + bp_beefy::storage_keys::bridged::next_validators_storage_key( + C::AT_CHAIN_BEEFY_PALLET_NAME, + ), + Some(best_finalized_header_hash), + ) + .await + .map_err(|err| { + Error::RetrieveNextAuthorities(C::NAME, best_finalized_header_hash, err) + })? + .ok_or_else(|| { + Error::RetrieveNextAuthorities( + C::NAME, + best_finalized_header_hash, + SubstrateError::MissingMandatoryStorageValue, + ) + })?; + Ok(bp_beefy::InitializationData { + is_halted: false, + best_beefy_block_number: *best_finalized_header.number(), + current_validator_set: (current_validator_set_id, current_validators), + next_validator_set: (current_validator_set_id.saturating_add(1), next_validators), + }) + } +} + +/// Finality proof, used by the BEEFY engine. +#[derive(Clone, Debug, Decode, Encode)] +pub struct BeefySignedCommitmentWithMmrLeaf { + /// Signed BEEFY commitment. + pub commitment: BeefySignedCommitmentOf, +} + +impl FinalityProof> + for BeefySignedCommitmentWithMmrLeaf +where + C: std::fmt::Debug, + BeefySignedCommitmentOf: std::fmt::Debug + Send + Sync, + Option>: std::fmt::Debug + Send + Sync, +{ + fn target_header_number(&self) -> BlockNumberOf { + self.commitment.commitment.block_number + } +} diff --git a/relays/lib-substrate-relay/src/finality/initialize.rs b/relays/lib-substrate-relay/src/finality/initialize.rs new file mode 100644 index 00000000000..c4f0bc1cef8 --- /dev/null +++ b/relays/lib-substrate-relay/src/finality/initialize.rs @@ -0,0 +1,121 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Initialize Substrate -> Substrate GRANDPA headers bridge. +//! +//! Initialization is a transaction that calls `initialize()` function of the +//! `pallet-bridge-grandpa` pallet. This transaction brings initial header +//! and authorities set from source to target chain. The headers sync starts +//! with this header. + +use crate::{error::Error, finality::engine::Engine}; + +use relay_substrate_client::{BlockNumberOf, Chain, Client, Error as SubstrateError, HashOf}; +use sp_core::Bytes; +use sp_runtime::traits::Header as HeaderT; + +/// Submit headers-bridge initialization transaction. +pub async fn initialize, SourceChain: Chain, TargetChain: Chain, F>( + source_client: Client, + target_client: Client, + target_transactions_signer: TargetChain::AccountId, + prepare_initialize_transaction: F, +) where + F: FnOnce(TargetChain::Index, E::InitializationData) -> Result + + Send + + 'static, +{ + let result = do_initialize::( + source_client, + target_client, + target_transactions_signer, + prepare_initialize_transaction, + ) + .await; + + match result { + Ok(Some(tx_hash)) => log::info!( + target: "bridge", + "Successfully submitted {}-headers bridge initialization transaction to {}: {:?}", + SourceChain::NAME, + TargetChain::NAME, + tx_hash, + ), + Ok(None) => (), + Err(err) => log::error!( + target: "bridge", + "Failed to submit {}-headers bridge initialization transaction to {}: {:?}", + SourceChain::NAME, + TargetChain::NAME, + err, + ), + } +} + +/// Craft and submit initialization transaction, returning any error that may occur. +async fn do_initialize, SourceChain: Chain, TargetChain: Chain, F>( + source_client: Client, + target_client: Client, + target_transactions_signer: TargetChain::AccountId, + prepare_initialize_transaction: F, +) -> Result< + Option, + Error::Number>, +> +where + F: FnOnce(TargetChain::Index, E::InitializationData) -> Result + + Send + + 'static, +{ + let is_initialized = is_initialized::(&target_client).await?; + if is_initialized { + log::info!( + target: "bridge", + "{}-headers bridge at {} is already initialized. Skipping", + SourceChain::NAME, + TargetChain::NAME, + ); + return Ok(None) + } + + let initialization_data = E::prepare_initialization_data(source_client).await?; + log::info!( + target: "bridge", + "Prepared initialization data for {}-headers bridge at {}: {:?}", + SourceChain::NAME, + TargetChain::NAME, + initialization_data, + ); + + let initialization_tx_hash = target_client + .submit_signed_extrinsic(target_transactions_signer, move |_, transaction_nonce| { + prepare_initialize_transaction(transaction_nonce, initialization_data) + }) + .await + .map_err(|err| Error::SubmitTransaction(TargetChain::NAME, err))?; + Ok(Some(initialization_tx_hash)) +} + +/// Returns `Ok(true)` if bridge has already been initialized. +async fn is_initialized, SourceChain: Chain, TargetChain: Chain>( + target_client: &Client, +) -> Result, BlockNumberOf>> { + Ok(target_client + .raw_storage_value(E::is_initialized_key(), None) + .await + .map_err(|err| Error::RetrieveBestFinalizedHeaderHash(SourceChain::NAME, err))? + .is_some()) +} diff --git a/relays/lib-substrate-relay/src/finality_pipeline.rs b/relays/lib-substrate-relay/src/finality/mod.rs similarity index 73% rename from relays/lib-substrate-relay/src/finality_pipeline.rs rename to relays/lib-substrate-relay/src/finality/mod.rs index 3daf8d11440..1945097c0f2 100644 --- a/relays/lib-substrate-relay/src/finality_pipeline.rs +++ b/relays/lib-substrate-relay/src/finality/mod.rs @@ -18,22 +18,33 @@ //! finality proofs synchronization pipelines. use crate::{ - finality_source::SubstrateFinalitySource, finality_target::SubstrateFinalityTarget, + finality::{ + engine::{BeefySignedCommitmentWithMmrLeaf, Engine}, + source::{SubstrateFinalityProof, SubstrateFinalitySource}, + target::SubstrateFinalityTarget, + }, TransactionParams, }; use async_trait::async_trait; use bp_header_chain::justification::GrandpaJustification; +use codec::Encode; use finality_relay::FinalitySyncPipeline; +use pallet_bridge_beefy::{Call as BridgeBeefyCall, Config as BridgeBeefyConfig}; use pallet_bridge_grandpa::{Call as BridgeGrandpaCall, Config as BridgeGrandpaConfig}; use relay_substrate_client::{ - transaction_stall_timeout, AccountIdOf, AccountKeyPairOf, BlockNumberOf, CallOf, Chain, - ChainWithGrandpa, Client, HashOf, HeaderOf, SyncHeader, TransactionSignScheme, + transaction_stall_timeout, AccountIdOf, AccountKeyPairOf, BlockNumberOf, CallOf, Chain, Client, + HashOf, HeaderOf, SyncHeader, TransactionSignScheme, }; use relay_utils::metrics::MetricsParams; use sp_core::Pair; use std::{fmt::Debug, marker::PhantomData}; +pub mod engine; +pub mod initialize; +pub mod source; +pub mod target; + /// Default limit of recent finality proofs. /// /// Finality delay of 4096 blocks is unlikely to happen in practice in @@ -44,10 +55,12 @@ pub(crate) const RECENT_FINALITY_PROOFS_LIMIT: usize = 4096; #[async_trait] pub trait SubstrateFinalitySyncPipeline: 'static + Clone + Debug + Send + Sync { /// Headers of this chain are submitted to the `TargetChain`. - type SourceChain: ChainWithGrandpa; + type SourceChain: Chain; /// Headers of the `SourceChain` are submitted to this chain. type TargetChain: Chain; + /// Finality engine. + type FinalityEngine: Engine; /// How submit finality proof call is built? type SubmitFinalityProofCallBuilder: SubmitFinalityProofCallBuilder; /// Scheme used to sign target chain transactions. @@ -76,7 +89,7 @@ impl FinalitySyncPipeline for FinalitySyncPipe type Hash = HashOf; type Number = BlockNumberOf; type Header = relay_substrate_client::SyncHeader>; - type FinalityProof = GrandpaJustification>; + type FinalityProof = SubstrateFinalityProof

; } /// Different ways of building `submit_finality_proof` calls. @@ -85,23 +98,26 @@ pub trait SubmitFinalityProofCallBuilder { /// function of bridge GRANDPA module at the target chain. fn build_submit_finality_proof_call( header: SyncHeader>, - proof: GrandpaJustification>, + proof: SubstrateFinalityProof

, ) -> CallOf; } /// Building `submit_finality_proof` call when you have direct access to the target /// chain runtime. -pub struct DirectSubmitFinalityProofCallBuilder { +pub struct DirectSubmitGrandpaFinalityProofCallBuilder { _phantom: PhantomData<(P, R, I)>, } -impl SubmitFinalityProofCallBuilder

for DirectSubmitFinalityProofCallBuilder +impl SubmitFinalityProofCallBuilder

+ for DirectSubmitGrandpaFinalityProofCallBuilder where P: SubstrateFinalitySyncPipeline, R: BridgeGrandpaConfig, I: 'static, R::BridgedChain: bp_runtime::Chain

>, CallOf: From>, + P::FinalityEngine: + Engine>>, { fn build_submit_finality_proof_call( header: SyncHeader>, @@ -115,6 +131,37 @@ where } } +/// Building `submit_commitment` call when you have direct access to the target +/// chain runtime. +pub struct DirectSubmitBeefyFinalityProofCallBuilder { + _phantom: PhantomData<(P, R, I)>, +} + +impl SubmitFinalityProofCallBuilder

+ for DirectSubmitBeefyFinalityProofCallBuilder +where + P: SubstrateFinalitySyncPipeline, + R: BridgeBeefyConfig, + I: 'static, + R::BridgedChain: bp_runtime::Chain

>, + CallOf: From>, + P::SourceChain: relay_substrate_client::ChainWithBeefy, + P::FinalityEngine: + Engine>, +{ + fn build_submit_finality_proof_call( + _header: SyncHeader>, + proof: BeefySignedCommitmentWithMmrLeaf, + ) -> CallOf { + BridgeBeefyCall::::submit_commitment { + encoded_commitment: proof.commitment.encode(), + encoded_mmr_proof: vec![], // TODO + mmr_leaf: bp_beefy::BeefyMmrLeafUnpacked::Regular(vec![]), // TODO + } + .into() + } +} + /// Macro that generates `SubmitFinalityProofCallBuilder` implementation for the case when /// you only have an access to the mocked version of target chain runtime. In this case you /// should provide "name" of the call variant for the bridge GRANDPA calls and the "name" of @@ -125,22 +172,22 @@ macro_rules! generate_mocked_submit_finality_proof_call_builder { ($pipeline:ident, $mocked_builder:ident, $bridge_grandpa:path, $submit_finality_proof:path) => { pub struct $mocked_builder; - impl $crate::finality_pipeline::SubmitFinalityProofCallBuilder<$pipeline> + impl $crate::finality::SubmitFinalityProofCallBuilder<$pipeline> for $mocked_builder { fn build_submit_finality_proof_call( header: relay_substrate_client::SyncHeader< relay_substrate_client::HeaderOf< - <$pipeline as $crate::finality_pipeline::SubstrateFinalitySyncPipeline>::SourceChain + <$pipeline as $crate::finality::SubstrateFinalitySyncPipeline>::SourceChain > >, proof: bp_header_chain::justification::GrandpaJustification< relay_substrate_client::HeaderOf< - <$pipeline as $crate::finality_pipeline::SubstrateFinalitySyncPipeline>::SourceChain + <$pipeline as $crate::finality::SubstrateFinalitySyncPipeline>::SourceChain > >, ) -> relay_substrate_client::CallOf< - <$pipeline as $crate::finality_pipeline::SubstrateFinalitySyncPipeline>::TargetChain + <$pipeline as $crate::finality::SubstrateFinalitySyncPipeline>::TargetChain > { $bridge_grandpa($submit_finality_proof(Box::new(header.into_inner()), proof)) } diff --git a/relays/lib-substrate-relay/src/finality_source.rs b/relays/lib-substrate-relay/src/finality/source.rs similarity index 88% rename from relays/lib-substrate-relay/src/finality_source.rs rename to relays/lib-substrate-relay/src/finality/source.rs index 804d3212930..57f11a029c7 100644 --- a/relays/lib-substrate-relay/src/finality_source.rs +++ b/relays/lib-substrate-relay/src/finality/source.rs @@ -16,11 +16,10 @@ //! Default generic implementation of finality source for basic Substrate client. -use crate::finality_pipeline::{FinalitySyncPipelineAdapter, SubstrateFinalitySyncPipeline}; +use crate::finality::{engine::Engine, FinalitySyncPipelineAdapter, SubstrateFinalitySyncPipeline}; use async_std::sync::{Arc, Mutex}; use async_trait::async_trait; -use bp_header_chain::justification::GrandpaJustification; use codec::Decode; use finality_relay::SourceClient; use futures::stream::{unfold, Stream, StreamExt}; @@ -38,13 +37,19 @@ pub type RequiredHeaderNumberRef = Arc::BlockN pub type SubstrateFinalityProofsStream

= Pin< Box< dyn Stream< - Item = GrandpaJustification< - HeaderOf<

::SourceChain>, - >, + Item = <

::FinalityEngine as Engine< +

::SourceChain, + >>::FinalityProof, > + Send, >, >; +/// TODO +pub type SubstrateFinalityProof

= + <

::FinalityEngine as Engine< +

::SourceChain, + >>::FinalityProof; + /// Substrate node as finality source. pub struct SubstrateFinalitySource { client: Client, @@ -120,7 +125,7 @@ impl SourceClient Result< ( relay_substrate_client::SyncHeader>, - Option>>, + Option>, ), Error, > { @@ -130,9 +135,7 @@ impl SourceClient>::decode( - &mut raw_justification.as_slice(), - ) + SubstrateFinalityProof::

::decode(&mut raw_justification.as_slice()) }) .transpose() .map_err(Error::ResponseParseFailed)?; @@ -142,7 +145,7 @@ impl SourceClient Result { Ok(unfold( - self.client.clone().subscribe_justifications().await?, + P::FinalityEngine::finality_proofs(self.client.clone()).await?, move |subscription| async move { loop { let log_error = |err| { @@ -161,7 +164,7 @@ impl SourceClient>::decode( + >::FinalityProof::decode( &mut &next_justification[..], ); diff --git a/relays/lib-substrate-relay/src/finality_target.rs b/relays/lib-substrate-relay/src/finality/target.rs similarity index 88% rename from relays/lib-substrate-relay/src/finality_target.rs rename to relays/lib-substrate-relay/src/finality/target.rs index 4c581417104..2d4079c9fe1 100644 --- a/relays/lib-substrate-relay/src/finality_target.rs +++ b/relays/lib-substrate-relay/src/finality/target.rs @@ -19,19 +19,19 @@ //! with chain. use crate::{ - finality_pipeline::{ - FinalitySyncPipelineAdapter, SubmitFinalityProofCallBuilder, SubstrateFinalitySyncPipeline, + finality::{ + engine::Engine, source::SubstrateFinalityProof, FinalitySyncPipelineAdapter, + SubmitFinalityProofCallBuilder, SubstrateFinalitySyncPipeline, }, TransactionParams, }; use async_trait::async_trait; -use bp_header_chain::{justification::GrandpaJustification, storage_keys::is_halted_key}; use codec::Encode; use finality_relay::TargetClient; use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, Chain, ChainWithGrandpa, Client, Error, HeaderIdOf, HeaderOf, - SignParam, SyncHeader, TransactionEra, TransactionSignScheme, UnsignedTransaction, + AccountIdOf, AccountKeyPairOf, Chain, Client, Error, HeaderIdOf, HeaderOf, SignParam, + SyncHeader, TransactionEra, TransactionSignScheme, UnsignedTransaction, }; use relay_utils::relay_loop::Client as RelayClient; use sp_core::{Bytes, Pair}; @@ -53,10 +53,7 @@ impl SubstrateFinalityTarget

{ /// Ensure that the GRANDPA pallet at target chain is active. pub async fn ensure_pallet_active(&self) -> Result<(), Error> { - let is_halted = self - .client - .storage_value(is_halted_key(P::SourceChain::WITH_CHAIN_GRANDPA_PALLET_NAME), None) - .await?; + let is_halted = self.client.storage_value(P::FinalityEngine::is_halted_key(), None).await?; if is_halted.unwrap_or(false) { Err(Error::BridgePalletIsHalted) } else { @@ -109,7 +106,7 @@ where async fn submit_finality_proof( &self, header: SyncHeader>, - proof: GrandpaJustification>, + proof: SubstrateFinalityProof

, ) -> Result<(), Error> { let genesis_hash = *self.client.genesis_hash(); let transaction_params = self.transaction_params.clone(); diff --git a/relays/lib-substrate-relay/src/headers_initialize.rs b/relays/lib-substrate-relay/src/headers_initialize.rs deleted file mode 100644 index 0e1371c53c8..00000000000 --- a/relays/lib-substrate-relay/src/headers_initialize.rs +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Initialize Substrate -> Substrate headers bridge. -//! -//! Initialization is a transaction that calls `initialize()` function of the -//! `pallet-bridge-grandpa` pallet. This transaction brings initial header -//! and authorities set from source to target chain. The headers sync starts -//! with this header. - -use crate::error::Error; - -use bp_header_chain::{ - find_grandpa_authorities_scheduled_change, - justification::{verify_justification, GrandpaJustification}, - InitializationData, -}; -use codec::Decode; -use finality_grandpa::voter_set::VoterSet; -use num_traits::{One, Zero}; -use relay_substrate_client::{ - BlockNumberOf, Chain, ChainWithGrandpa, Client, Error as SubstrateError, HashOf, -}; -use sp_core::Bytes; -use sp_finality_grandpa::AuthorityList as GrandpaAuthoritiesSet; -use sp_runtime::traits::Header as HeaderT; - -/// Submit headers-bridge initialization transaction. -pub async fn initialize( - source_client: Client, - target_client: Client, - target_transactions_signer: TargetChain::AccountId, - prepare_initialize_transaction: impl FnOnce( - TargetChain::Index, - InitializationData, - ) -> Result - + Send - + 'static, -) { - let result = do_initialize( - source_client, - target_client, - target_transactions_signer, - prepare_initialize_transaction, - ) - .await; - - match result { - Ok(Some(tx_hash)) => log::info!( - target: "bridge", - "Successfully submitted {}-headers bridge initialization transaction to {}: {:?}", - SourceChain::NAME, - TargetChain::NAME, - tx_hash, - ), - Ok(None) => (), - Err(err) => log::error!( - target: "bridge", - "Failed to submit {}-headers bridge initialization transaction to {}: {:?}", - SourceChain::NAME, - TargetChain::NAME, - err, - ), - } -} - -/// Craft and submit initialization transaction, returning any error that may occur. -async fn do_initialize( - source_client: Client, - target_client: Client, - target_transactions_signer: TargetChain::AccountId, - prepare_initialize_transaction: impl FnOnce( - TargetChain::Index, - InitializationData, - ) -> Result - + Send - + 'static, -) -> Result< - Option, - Error::Number>, -> { - let is_initialized = is_initialized::(&target_client).await?; - if is_initialized { - log::info!( - target: "bridge", - "{}-headers bridge at {} is already initialized. Skipping", - SourceChain::NAME, - TargetChain::NAME, - ); - return Ok(None) - } - - let initialization_data = prepare_initialization_data(source_client).await?; - log::info!( - target: "bridge", - "Prepared initialization data for {}-headers bridge at {}: {:?}", - SourceChain::NAME, - TargetChain::NAME, - initialization_data, - ); - - let initialization_tx_hash = target_client - .submit_signed_extrinsic(target_transactions_signer, move |_, transaction_nonce| { - prepare_initialize_transaction(transaction_nonce, initialization_data) - }) - .await - .map_err(|err| Error::SubmitTransaction(TargetChain::NAME, err))?; - Ok(Some(initialization_tx_hash)) -} - -/// Returns `Ok(true)` if bridge has already been initialized. -async fn is_initialized( - target_client: &Client, -) -> Result, BlockNumberOf>> { - Ok(target_client - .raw_storage_value( - bp_header_chain::storage_keys::best_finalized_hash_key( - SourceChain::WITH_CHAIN_GRANDPA_PALLET_NAME, - ), - None, - ) - .await - .map_err(|err| Error::RetrieveBestFinalizedHeaderHash(SourceChain::NAME, err))? - .is_some()) -} - -/// Prepare initialization data for the GRANDPA verifier pallet. -async fn prepare_initialization_data( - source_client: Client, -) -> Result< - InitializationData, - Error::Number>, -> { - // In ideal world we just need to get best finalized header and then to read GRANDPA authorities - // set (`pallet_grandpa::CurrentSetId` + `GrandpaApi::grandpa_authorities()`) at this header. - // - // But now there are problems with this approach - `CurrentSetId` may return invalid value. So - // here we're waiting for the next justification, read the authorities set and then try to - // figure out the set id with bruteforce. - let justifications = source_client - .subscribe_justifications() - .await - .map_err(|err| Error::Subscribe(SourceChain::NAME, err))?; - // Read next justification - the header that it finalizes will be used as initial header. - let justification = justifications - .next() - .await - .map_err(|e| Error::ReadJustification(SourceChain::NAME, e)) - .and_then(|justification| { - justification.ok_or(Error::ReadJustificationStreamEnded(SourceChain::NAME)) - })?; - - // Read initial header. - let justification: GrandpaJustification = - Decode::decode(&mut &justification.0[..]) - .map_err(|err| Error::DecodeJustification(SourceChain::NAME, err))?; - - let (initial_header_hash, initial_header_number) = - (justification.commit.target_hash, justification.commit.target_number); - - let initial_header = source_header(&source_client, initial_header_hash).await?; - log::trace!(target: "bridge", "Selected {} initial header: {}/{}", - SourceChain::NAME, - initial_header_number, - initial_header_hash, - ); - - // Read GRANDPA authorities set at initial header. - let initial_authorities_set = - source_authorities_set(&source_client, initial_header_hash).await?; - log::trace!(target: "bridge", "Selected {} initial authorities set: {:?}", - SourceChain::NAME, - initial_authorities_set, - ); - - // If initial header changes the GRANDPA authorities set, then we need previous authorities - // to verify justification. - let mut authorities_for_verification = initial_authorities_set.clone(); - let scheduled_change = find_grandpa_authorities_scheduled_change(&initial_header); - assert!( - scheduled_change.as_ref().map(|c| c.delay.is_zero()).unwrap_or(true), - "GRANDPA authorities change at {} scheduled to happen in {:?} blocks. We expect\ - regular hange to have zero delay", - initial_header_hash, - scheduled_change.as_ref().map(|c| c.delay), - ); - let schedules_change = scheduled_change.is_some(); - if schedules_change { - authorities_for_verification = - source_authorities_set(&source_client, *initial_header.parent_hash()).await?; - log::trace!( - target: "bridge", - "Selected {} header is scheduling GRANDPA authorities set changes. Using previous set: {:?}", - SourceChain::NAME, - authorities_for_verification, - ); - } - - // Now let's try to guess authorities set id by verifying justification. - let mut initial_authorities_set_id = 0; - let mut min_possible_block_number = SourceChain::BlockNumber::zero(); - let authorities_for_verification = VoterSet::new(authorities_for_verification.clone()) - .ok_or(Error::ReadInvalidAuthorities(SourceChain::NAME, authorities_for_verification))?; - loop { - log::trace!( - target: "bridge", "Trying {} GRANDPA authorities set id: {}", - SourceChain::NAME, - initial_authorities_set_id, - ); - - let is_valid_set_id = verify_justification::( - (initial_header_hash, initial_header_number), - initial_authorities_set_id, - &authorities_for_verification, - &justification, - ) - .is_ok(); - - if is_valid_set_id { - break - } - - initial_authorities_set_id += 1; - min_possible_block_number += One::one(); - if min_possible_block_number > initial_header_number { - // there can't be more authorities set changes than headers => if we have reached - // `initial_block_number` and still have not found correct value of - // `initial_authorities_set_id`, then something else is broken => fail - return Err(Error::GuessInitialAuthorities(SourceChain::NAME, initial_header_number)) - } - } - - Ok(InitializationData { - header: Box::new(initial_header), - authority_list: initial_authorities_set, - set_id: if schedules_change { - initial_authorities_set_id + 1 - } else { - initial_authorities_set_id - }, - is_halted: false, - }) -} - -/// Read header by hash from the source client. -async fn source_header( - source_client: &Client, - header_hash: SourceChain::Hash, -) -> Result::Number>> -{ - source_client - .header_by_hash(header_hash) - .await - .map_err(|err| Error::RetrieveHeader(SourceChain::NAME, header_hash, err)) -} - -/// Read GRANDPA authorities set at given header. -async fn source_authorities_set( - source_client: &Client, - header_hash: SourceChain::Hash, -) -> Result::Number>> -{ - let raw_authorities_set = source_client - .grandpa_authorities_set(header_hash) - .await - .map_err(|err| Error::RetrieveAuthorities(SourceChain::NAME, header_hash, err))?; - GrandpaAuthoritiesSet::decode(&mut &raw_authorities_set[..]) - .map_err(|err| Error::DecodeAuthorities(SourceChain::NAME, header_hash, err)) -} diff --git a/relays/lib-substrate-relay/src/lib.rs b/relays/lib-substrate-relay/src/lib.rs index 27d91147c2d..ff2d80b39df 100644 --- a/relays/lib-substrate-relay/src/lib.rs +++ b/relays/lib-substrate-relay/src/lib.rs @@ -22,11 +22,8 @@ use std::time::Duration; pub mod conversion_rate_update; pub mod error; +pub mod finality; pub mod finality_guards; -pub mod finality_pipeline; -pub mod finality_source; -pub mod finality_target; -pub mod headers_initialize; pub mod helpers; pub mod messages_lane; pub mod messages_metrics; diff --git a/relays/lib-substrate-relay/src/on_demand_headers.rs b/relays/lib-substrate-relay/src/on_demand_headers.rs index c1401a28a6d..915e04f0877 100644 --- a/relays/lib-substrate-relay/src/on_demand_headers.rs +++ b/relays/lib-substrate-relay/src/on_demand_headers.rs @@ -30,9 +30,11 @@ use relay_utils::{ }; use crate::{ - finality_pipeline::{SubstrateFinalitySyncPipeline, RECENT_FINALITY_PROOFS_LIMIT}, - finality_source::{RequiredHeaderNumberRef, SubstrateFinalitySource}, - finality_target::SubstrateFinalityTarget, + finality::{ + source::{RequiredHeaderNumberRef, SubstrateFinalitySource}, + target::SubstrateFinalityTarget, + SubstrateFinalitySyncPipeline, RECENT_FINALITY_PROOFS_LIMIT, + }, TransactionParams, STALL_TIMEOUT, };