Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Async backing client ready #1302

Merged
merged 13 commits into from
Aug 6, 2024
4 changes: 4 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,7 @@ cumulus-pallet-aura-ext = { git = "https://github.com/paritytech/polkadot-sdk",
cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.9.0", default-features = false }
cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.9.0", default-features = false }
cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.9.0", default-features = false }
cumulus-primitives-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.9.0", default-features = false }
cumulus-primitives-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.9.0", default-features = false }
cumulus-primitives-timestamp = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.9.0", default-features = false }
cumulus-primitives-utility = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.9.0", default-features = false }
Expand Down
1 change: 1 addition & 0 deletions bin/collator/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ cumulus-client-consensus-proposer = { workspace = true }
cumulus-client-consensus-relay-chain = { workspace = true }
cumulus-client-network = { workspace = true }
cumulus-client-service = { workspace = true }
cumulus-primitives-aura = { workspace = true }
cumulus-primitives-core = { workspace = true, features = ["std"] }
cumulus-primitives-parachain-inherent = { workspace = true }
cumulus-relay-chain-inprocess-interface = { workspace = true }
Expand Down
76 changes: 50 additions & 26 deletions bin/collator/src/parachain/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,21 +20,24 @@

use astar_primitives::*;
use cumulus_client_cli::CollatorOptions;
use cumulus_client_consensus_aura::collators::basic as basic_aura;
use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams};
use cumulus_client_consensus_common::ParachainBlockImport;
use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier;
use cumulus_client_service::{
prepare_node_config, start_relay_chain_tasks, BuildNetworkParams, DARecoveryProfile,
StartRelayChainTasksParams,
};
use cumulus_primitives_core::ParaId;
use cumulus_primitives_aura::AuraUnincludedSegmentApi;
use cumulus_primitives_core::{
relay_chain::{CollatorPair, ValidationCode},
ParaId,
};
use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain;
use cumulus_relay_chain_interface::{RelayChainInterface, RelayChainResult};
use cumulus_relay_chain_minimal_node::build_minimal_relay_chain_node_with_rpc;
use fc_consensus::FrontierBlockImport;
use fc_rpc_core::types::{FeeHistoryCache, FilterPool};
use futures::StreamExt;
use polkadot_service::CollatorPair;
use sc_client_api::BlockchainEvents;
use sc_consensus::{import_queue::BasicQueue, ImportQueue};
use sc_executor::NativeElseWasmExecutor;
Expand Down Expand Up @@ -351,6 +354,7 @@ where
) -> sc_consensus::DefaultImportQueue<Block>,
SC: FnOnce(
Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
Arc<TFullBackend<Block>>,
ParachainBlockImport<
Block,
FrontierBlockImport<
Expand Down Expand Up @@ -562,6 +566,7 @@ where
if is_authority {
start_consensus(
client.clone(),
backend,
parachain_block_import,
prometheus_registry.as_ref(),
telemetry.map(|t| t.handle()),
Expand Down Expand Up @@ -654,6 +659,7 @@ where
) -> sc_consensus::DefaultImportQueue<Block>,
SC: FnOnce(
Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
Arc<TFullBackend<Block>>,
ParachainBlockImport<
Block,
FrontierBlockImport<
Expand Down Expand Up @@ -897,6 +903,7 @@ where
if is_authority {
start_consensus(
client.clone(),
backend,
parachain_block_import,
prometheus_registry.as_ref(),
telemetry.map(|t| t.handle()),
Expand Down Expand Up @@ -1026,6 +1033,7 @@ where
sc_client_api::StateBackend<BlakeTwo256>,
Executor: sc_executor::NativeExecutionDispatch + 'static,
{
let cidp_client = client.clone();
let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)
.expect("AuraApi slot_duration failed!");

Expand All @@ -1038,16 +1046,21 @@ where
>(
client,
block_import,
move |_, _| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();

let slot =
sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
*timestamp,
slot_duration,
);

Ok((slot, timestamp))
move |parent_hash, _| {
let cidp_client = cidp_client.clone();
async move {
let slot_duration =
sc_consensus_aura::standalone::slot_duration_at(&*cidp_client, parent_hash)?;
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();

let slot =
sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
*timestamp,
slot_duration,
);

Ok((slot, timestamp))
}
},
slot_duration,
&task_manager.spawn_essential_handle(),
Expand All @@ -1059,6 +1072,7 @@ where
/// Start collating with the `shell` runtime while waiting for an upgrade to an Aura compatible runtime.
fn start_aura_consensus_fallback<RuntimeApi, Executor>(
client: Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
backend: Arc<TFullBackend<Block>>,
parachain_block_import: ParachainBlockImport<
Block,
FrontierBlockImport<
Expand Down Expand Up @@ -1097,6 +1111,7 @@ where
+ sp_block_builder::BlockBuilder<Block>
+ fp_rpc::EthereumRuntimeRPCApi<Block>
+ AuraApi<Block, AuraId>
+ AuraUnincludedSegmentApi<Block>
+ cumulus_primitives_core::CollectCollationInfo<Block>,
sc_client_api::StateBackendFor<TFullBackend<Block>, Block>:
sc_client_api::StateBackend<BlakeTwo256>,
Expand Down Expand Up @@ -1164,9 +1179,6 @@ where
}

// Move to Aura consensus.
let slot_duration =
cumulus_client_consensus_aura::slot_duration(&*client).expect("aura is present; qed");

let announce_block = {
let sync_service = sync_oracle.clone();
Arc::new(move |hash, data| sync_service.announce_block(hash, data))
Expand All @@ -1179,23 +1191,28 @@ where
client.clone(),
);

basic_aura::run::<Block, AuraPair, _, _, _, _, _, _, _>(basic_aura::Params {
aura::run::<Block, AuraPair, _, _, _, _, _, _, _, _, _>(AuraParams {
create_inherent_data_providers: move |_, ()| async move { Ok(()) },
block_import: parachain_block_import.clone(),
para_client: client.clone(),
para_backend: backend,
relay_client: relay_chain_interface.clone(),
code_hash_provider: move |block_hash| {
client
.code_at(block_hash)
.ok()
.map(|c| ValidationCode::from(c).hash())
},
sync_oracle: sync_oracle.clone(),
keystore,
collator_key,
para_id,
overseer_handle,
slot_duration,
relay_chain_slot_duration: Duration::from_secs(6),
proposer: cumulus_client_consensus_proposer::Proposer::new(proposer_factory),
collator_service,
// We got around 500ms for proposing
authoring_duration: Duration::from_millis(500),
collation_request_receiver: Some(request_stream),
authoring_duration: Duration::from_millis(1500),
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thinking about it now - shouldn't this still remain at 500?
At least until we turn on the async backing feature for some runtime?

E.g. we deploy this new client on the existing network now.
All collators still have half a second to propose the block, don't they?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The block limit is defined on runtime by weight limit. This duration here is the client time available to build the block. If not built within 1.5 secs then it will timeout (hence slot hardware). Normally it needs to match with runtime block limit but this is a transitory period until async backing is enabled. We need this out so when the time comes, all clients support async backing and we just enable it.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right, weight limit would be the limiter of the block size, assuming that the collator hardware is at least powerful as the benchmark machine.

Setting this value to 1500 now means that collators allow themselves to take up to 1500 [ms] seconds to produce a block worth of 500 [ms] (hence the addition of HW spec check as you said).

I was about to suggest to make this into a CLI param, but I assume that would defeat the purpose of avoiding the client restart 🙂.

I think it's all clear now. 👍

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Client is still required to build block within 0.5s, if not it will miss timeframe of 2s build&gossip. This doesn't mean they've 1.5s to build the block. It's more a time so client don't continue building if this timeout is reached. After this timeout, it's pointless to continue.

reinitialize: true,
})
.await
});
Expand All @@ -1208,6 +1225,7 @@ where

fn start_aura_consensus<RuntimeApi, Executor>(
client: Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
backend: Arc<TFullBackend<Block>>,
parachain_block_import: ParachainBlockImport<
Block,
FrontierBlockImport<
Expand Down Expand Up @@ -1246,6 +1264,7 @@ where
+ sp_block_builder::BlockBuilder<Block>
+ fp_rpc::EthereumRuntimeRPCApi<Block>
+ AuraApi<Block, AuraId>
+ AuraUnincludedSegmentApi<Block>
+ cumulus_primitives_core::CollectCollationInfo<Block>,
sc_client_api::StateBackendFor<TFullBackend<Block>, Block>:
sc_client_api::StateBackend<BlakeTwo256>,
Expand Down Expand Up @@ -1280,23 +1299,28 @@ where
client.clone(),
);

let fut = basic_aura::run::<Block, AuraPair, _, _, _, _, _, _, _>(basic_aura::Params {
let fut = aura::run::<Block, AuraPair, _, _, _, _, _, _, _, _, _>(AuraParams {
create_inherent_data_providers: move |_, ()| async move { Ok(()) },
block_import: parachain_block_import.clone(),
para_client: client.clone(),
para_backend: backend,
relay_client: relay_chain_interface.clone(),
code_hash_provider: move |block_hash| {
client
.code_at(block_hash)
.ok()
.map(|c| ValidationCode::from(c).hash())
},
sync_oracle: sync_oracle.clone(),
keystore,
collator_key,
para_id,
overseer_handle,
slot_duration: cumulus_client_consensus_aura::slot_duration(&*client)?,
relay_chain_slot_duration: Duration::from_secs(6),
proposer: cumulus_client_consensus_proposer::Proposer::new(proposer_factory),
collator_service,
// We got around 500ms for proposing
authoring_duration: Duration::from_millis(500),
collation_request_receiver: None,
authoring_duration: Duration::from_millis(1500),
reinitialize: false,
});

task_manager
Expand Down
4 changes: 3 additions & 1 deletion runtime/astar/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,10 @@ pallet-xcm-benchmarks = { workspace = true, optional = true }
# cumulus dependencies
cumulus-pallet-aura-ext = { workspace = true }
cumulus-pallet-dmp-queue = { workspace = true }
cumulus-pallet-parachain-system = { workspace = true }
cumulus-pallet-parachain-system = { workspace = true, features = ["parameterized-consensus-hook"] }
cumulus-pallet-xcm = { workspace = true }
cumulus-pallet-xcmp-queue = { workspace = true }
cumulus-primitives-aura = { workspace = true }
cumulus-primitives-core = { workspace = true }
cumulus-primitives-timestamp = { workspace = true }
cumulus-primitives-utility = { workspace = true }
Expand Down Expand Up @@ -203,6 +204,7 @@ std = [
"polkadot-parachain/std",
"polkadot-primitives/std",
"polkadot-runtime-common/std",
"cumulus-primitives-aura/std",
"cumulus-primitives-core/std",
"cumulus-primitives-utility/std",
"cumulus-primitives-timestamp/std",
Expand Down
62 changes: 30 additions & 32 deletions runtime/astar/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256.
#![recursion_limit = "256"]

use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases;
use cumulus_primitives_core::AggregateMessageOrigin;
use frame_support::{
construct_runtime,
Expand Down Expand Up @@ -131,11 +130,21 @@ pub const fn contracts_deposit(items: u32, bytes: u32) -> Balance {

/// Change this to adjust the block time.
pub const MILLISECS_PER_BLOCK: u64 = 12000;
pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK;
// Time is measured by number of blocks.
pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber);
pub const HOURS: BlockNumber = MINUTES * 60;
pub const DAYS: BlockNumber = HOURS * 24;

/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included into the
/// relay chain.
pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1;
/// How many parachain blocks are processed by the relay chain per parent. Limits the number of
/// blocks authored per slot.
pub const BLOCK_PROCESSING_VELOCITY: u32 = 1;
/// Relay chain slot duration, in milliseconds.
pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000;
Dinonard marked this conversation as resolved.
Show resolved Hide resolved

// Make the WASM binary available.
#[cfg(feature = "std")]
include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs"));
Expand Down Expand Up @@ -461,22 +470,24 @@ impl cumulus_pallet_parachain_system::Config for Runtime {
type ReservedDmpWeight = ReservedDmpWeight;
type XcmpMessageHandler = XcmpQueue;
type ReservedXcmpWeight = ReservedXcmpWeight;
type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases;
type CheckAssociatedRelayNumber = cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases;
type ConsensusHook = ConsensusHook;
type WeightInfo = cumulus_pallet_parachain_system::weights::SubstrateWeight<Runtime>;
}

impl parachain_info::Config for Runtime {}
type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook<
Runtime,
RELAY_CHAIN_SLOT_DURATION_MILLIS,
BLOCK_PROCESSING_VELOCITY,
UNINCLUDED_SEGMENT_CAPACITY,
>;

parameter_types! {
pub const MaxAuthorities: u32 = 250;
}
impl parachain_info::Config for Runtime {}

impl pallet_aura::Config for Runtime {
type AuthorityId = AuraId;
type DisabledValidators = ();
type MaxAuthorities = MaxAuthorities;
// Should be only enabled (`true`) when async backing is enabled
// otherwise set to `false`
type MaxAuthorities = ConstU32<250>;
type AllowMultipleBlocksPerSlot = ConstBool<false>;
}

Expand Down Expand Up @@ -1387,14 +1398,23 @@ impl_runtime_apis! {

impl sp_consensus_aura::AuraApi<Block, AuraId> for Runtime {
fn slot_duration() -> sp_consensus_aura::SlotDuration {
sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration())
sp_consensus_aura::SlotDuration::from_millis(SLOT_DURATION)
}

fn authorities() -> Vec<AuraId> {
Aura::authorities().into_inner()
}
}

impl cumulus_primitives_aura::AuraUnincludedSegmentApi<Block> for Runtime {
fn can_build_upon(
included_hash: <Block as BlockT>::Hash,
slot: cumulus_primitives_aura::Slot,
) -> bool {
ConsensusHook::can_build_upon(included_hash, slot)
}
}

impl sp_block_builder::BlockBuilder<Block> for Runtime {
fn apply_extrinsic(extrinsic: <Block as BlockT>::Extrinsic) -> ApplyExtrinsicResult {
Executive::apply_extrinsic(extrinsic)
Expand Down Expand Up @@ -2185,29 +2205,7 @@ impl_runtime_apis! {
}
}

struct CheckInherents;
Dinonard marked this conversation as resolved.
Show resolved Hide resolved

impl cumulus_pallet_parachain_system::CheckInherents<Block> for CheckInherents {
fn check_inherents(
block: &Block,
relay_state_proof: &cumulus_pallet_parachain_system::RelayChainStateProof,
) -> sp_inherents::CheckInherentsResult {
let relay_chain_slot = relay_state_proof
.read_slot()
.expect("Could not read the relay chain slot from the proof");
let inherent_data =
cumulus_primitives_timestamp::InherentDataProvider::from_relay_chain_slot_and_duration(
relay_chain_slot,
sp_std::time::Duration::from_secs(6),
)
.create_inherent_data()
.expect("Could not create the timestamp inherent data");
inherent_data.check_extrinsics(block)
}
}

cumulus_pallet_parachain_system::register_validate_block! {
Runtime = Runtime,
BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::<Runtime, Executive>,
CheckInherents = CheckInherents,
}
Loading
Loading