From cae06bc3fdd3fb51f8056dadfde8de0bde0419bf Mon Sep 17 00:00:00 2001 From: girazoki Date: Thu, 14 Nov 2024 14:38:42 +0100 Subject: [PATCH] add flume for messages --- Cargo.lock | 1 + .../node/tanssi-relay-service/Cargo.toml | 1 + .../node/tanssi-relay-service/src/dev_rpcs.rs | 81 ++++ .../tanssi-relay-service/src/dev_service.rs | 377 ++++++++++++------ .../node/tanssi-relay-service/src/lib.rs | 2 + 5 files changed, 329 insertions(+), 133 deletions(-) create mode 100644 solo-chains/node/tanssi-relay-service/src/dev_rpcs.rs diff --git a/Cargo.lock b/Cargo.lock index d3c4ca88b..1d9a65edf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17483,6 +17483,7 @@ dependencies = [ "dancelight-runtime-constants", "dp-container-chain-genesis-data", "env_logger 0.11.3", + "flume 0.10.14", "frame-benchmarking", "frame-benchmarking-cli", "frame-support", diff --git a/solo-chains/node/tanssi-relay-service/Cargo.toml b/solo-chains/node/tanssi-relay-service/Cargo.toml index 96d3c81a8..76da71989 100644 --- a/solo-chains/node/tanssi-relay-service/Cargo.toml +++ b/solo-chains/node/tanssi-relay-service/Cargo.toml @@ -82,6 +82,7 @@ async-io = { workspace = true } async-trait = { workspace = true } bitvec = { workspace = true, optional = true } codec = { workspace = true } +flume = { workspace = true } futures = { workspace = true } gum = { workspace = true } hex-literal = { workspace = true } diff --git a/solo-chains/node/tanssi-relay-service/src/dev_rpcs.rs b/solo-chains/node/tanssi-relay-service/src/dev_rpcs.rs new file mode 100644 index 000000000..5ccd24e5a --- /dev/null +++ b/solo-chains/node/tanssi-relay-service/src/dev_rpcs.rs @@ -0,0 +1,81 @@ +// Copyright (C) Moondance Labs Ltd. +// This file is part of Tanssi. + +// Tanssi is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Tanssi is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Tanssi. If not, see + +//! Development Polkadot service. Adapted from `polkadot_service` crate +//! and removed un-necessary components which are not required in dev node. + +use codec::{Decode, Encode}; +use jsonrpsee::{ + core::RpcResult, + proc_macros::rpc, + types::{ + error::{INTERNAL_ERROR_CODE, INTERNAL_ERROR_MSG}, + ErrorObjectOwned, + }, +}; + +/// This RPC interface is used to provide methods in dev mode only +#[rpc(server)] +#[jsonrpsee::core::async_trait] +pub trait DevApi { + /// Indicate the mock parachain candidate insertion to be active + #[method(name = "mock_enableParaInherentCandidate")] + async fn enable_para_inherent_candidate(&self) -> RpcResult<()>; + + /// Indicate the mock parachain candidate insertion to be disabled + #[method(name = "mock_disableParaInherentCandidate")] + async fn disable_para_inherent_candidate(&self) -> RpcResult<()>; +} + +pub struct DevRpc { + pub mock_para_inherent_channel: flume::Sender>, +} + +#[jsonrpsee::core::async_trait] +impl DevApiServer for DevRpc { + async fn enable_para_inherent_candidate(&self) -> RpcResult<()> { + let mock_para_inherent_channel = self.mock_para_inherent_channel.clone(); + // Push the message to the shared channel where it will be queued up + // to be injected in to an upcoming block. + mock_para_inherent_channel + .send_async(true.encode()) + .await + .map_err(|err| internal_err(err.to_string()))?; + + Ok(()) + } + + async fn disable_para_inherent_candidate(&self) -> RpcResult<()> { + let mock_para_inherent_channel = self.mock_para_inherent_channel.clone(); + // Push the message to the shared channel where it will be queued up + // to be injected in to an upcoming block. + mock_para_inherent_channel + .send_async(false.encode()) + .await + .map_err(|err| internal_err(err.to_string()))?; + + Ok(()) + } +} + +// This bit cribbed from frontier. +pub fn internal_err(message: T) -> ErrorObjectOwned { + ErrorObjectOwned::owned( + INTERNAL_ERROR_CODE, + INTERNAL_ERROR_MSG, + Some(message.to_string()), + ) +} diff --git a/solo-chains/node/tanssi-relay-service/src/dev_service.rs b/solo-chains/node/tanssi-relay-service/src/dev_service.rs index f093eef23..c2dc8ac26 100644 --- a/solo-chains/node/tanssi-relay-service/src/dev_service.rs +++ b/solo-chains/node/tanssi-relay-service/src/dev_service.rs @@ -30,6 +30,8 @@ //! 10. If amount of time passed between two block is less than slot duration, we emulate passing of time babe block import and runtime //! by incrementing timestamp by slot duration. +use crate::dev_rpcs::DevRpc; + use { async_io::Timer, babe::{BabeBlockImport, BabeLink}, @@ -42,7 +44,7 @@ use { polkadot_core_primitives::{AccountId, Balance, Block, Hash, Nonce}, polkadot_node_core_parachains_inherent::Error as InherentError, polkadot_overseer::Handle, - polkadot_primitives::{InherentData as ParachainsInherentData, runtime_api::ParachainHost}, + polkadot_primitives::{runtime_api::ParachainHost, InherentData as ParachainsInherentData}, polkadot_rpc::{DenyUnsafe, RpcExtension}, polkadot_service::{ BlockT, Error, IdentifyVariant, NewFullParams, OverseerGen, SelectRelayChain, @@ -54,7 +56,7 @@ use { run_manual_seal, EngineCommand, ManualSealParams, }, sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}, - sc_keystore::{LocalKeystore, Keystore}, + sc_keystore::{Keystore, LocalKeystore}, sc_transaction_pool_api::{OffchainTransactionPoolFactory, TransactionPool}, service::{Configuration, KeystoreContainer, RpcHandlers, TaskManager}, sp_api::ProvideRuntimeApi, @@ -67,6 +69,10 @@ use { telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}, }; +use crate::dev_rpcs::DevApiServer; + +const PARA_INHERENT_SELECTOR_AUX_KEY: &[u8] = b"__DEV_PARA_INHERENT_SELECTOR"; + pub type FullBackend = service::TFullBackend; pub type FullClient = service::TFullClient< @@ -100,6 +106,8 @@ struct DevDeps { pub deny_unsafe: DenyUnsafe, /// Manual seal command sink pub command_sink: Option>>, + /// Channels for dev rpcs + pub dev_rpc_data: Option>>, } fn create_dev_rpc_extension( @@ -109,6 +117,7 @@ fn create_dev_rpc_extension( chain_spec, deny_unsafe, command_sink: maybe_command_sink, + dev_rpc_data: maybe_dev_rpc_data, }: DevDeps, ) -> Result> where @@ -148,25 +157,36 @@ where io.merge(ManualSeal::new(command_sink).into_rpc())?; } + if let Some(mock_para_inherent_channel) = maybe_dev_rpc_data { + io.merge( + DevRpc { + mock_para_inherent_channel, + } + .into_rpc(), + )?; + } + Ok(io) } /// We use EmptyParachainsInherentDataProvider to insert an empty parachain inherent in the block /// to satisfy runtime -struct EmptyParachainsInherentDataProvider + ProvideRuntimeApi> { +struct EmptyParachainsInherentDataProvider> { pub client: Arc, pub parent: Hash, - pub keystore: KeystorePtr } -use sp_consensus_aura::{inherents::InherentType as AuraInherentType, AURA_ENGINE_ID}; -use sp_runtime::{traits::BlakeTwo256, DigestItem, RuntimeAppPublic}; use polkadot_primitives::BackedCandidate; -use polkadot_primitives::OccupiedCoreAssumption; -use sp_core::H256; use polkadot_primitives::CollatorPair; +use polkadot_primitives::OccupiedCoreAssumption; +use polkadot_primitives::{ + CandidateCommitments, CandidateDescriptor, CommittedCandidateReceipt, CompactStatement, + EncodeAs, SigningContext, ValidityAttestation, +}; +use sp_consensus_aura::{inherents::InherentType as AuraInherentType, AURA_ENGINE_ID}; use sp_core::Pair; -use polkadot_primitives::{CommittedCandidateReceipt, CandidateDescriptor, CandidateCommitments, CompactStatement, EncodeAs, SigningContext, ValidityAttestation}; +use sp_core::H256; +use sp_runtime::{traits::BlakeTwo256, DigestItem, RuntimeAppPublic}; /// Copied from polkadot service just so that this code retains same structure as /// polkadot_service crate. struct Basics { @@ -177,17 +197,81 @@ struct Basics { telemetry: Option, } -impl + ProvideRuntimeApi> EmptyParachainsInherentDataProvider where -C::Api: ParachainHost +impl> EmptyParachainsInherentDataProvider { + pub fn new(client: Arc, parent: Hash) -> Self { + EmptyParachainsInherentDataProvider { client, parent } + } + + pub async fn create( + client: Arc, + parent: Hash, + ) -> Result { + let parent_header = match client.header(parent) { + Ok(Some(h)) => h, + Ok(None) => return Err(InherentError::ParentHeaderNotFound(parent)), + Err(err) => return Err(InherentError::Blockchain(err)), + }; + + Ok(ParachainsInherentData { + bitfields: Vec::new(), + backed_candidates: Vec::new(), + disputes: Vec::new(), + parent_header, + }) + } +} + +/// Creates new development full node with manual seal +pub fn build_full( + sealing: Sealing, + config: Configuration, + mut params: NewFullParams, +) -> Result { + let is_polkadot = config.chain_spec.is_polkadot(); + + params.overseer_message_channel_capacity_override = params + .overseer_message_channel_capacity_override + .map(move |capacity| { + if is_polkadot { + gum::warn!("Channel capacity should _never_ be tampered with on polkadot!"); + } + capacity + }); + + match config.network.network_backend { + sc_network::config::NetworkBackendType::Libp2p => { + new_full::<_, sc_network::NetworkWorker>(sealing, config, params) + } + sc_network::config::NetworkBackendType::Litep2p => { + new_full::<_, sc_network::Litep2pNetworkBackend>(sealing, config, params) + } + } +} + +/// We use MockParachainsInherentDataProvider to insert an empty parachain inherent in the block +/// to satisfy runtime +struct MockParachainsInherentDataProvider + ProvideRuntimeApi> { + pub client: Arc, + pub parent: Hash, + pub keystore: KeystorePtr, +} + +impl + ProvideRuntimeApi> MockParachainsInherentDataProvider +where + C::Api: ParachainHost, { pub fn new(client: Arc, parent: Hash, keystore: KeystorePtr) -> Self { - EmptyParachainsInherentDataProvider { client, parent, keystore } + MockParachainsInherentDataProvider { + client, + parent, + keystore, + } } pub async fn create( client: Arc, parent: Hash, - keystore: KeystorePtr + keystore: KeystorePtr, ) -> Result { let parent_header_relay = match client.header(parent) { Ok(Some(h)) => h, @@ -195,7 +279,10 @@ C::Api: ParachainHost Err(err) => return Err(InherentError::Blockchain(err)), }; - let parent_hash = client.hash(parent_header_relay.number.saturating_sub(1)).unwrap().unwrap(); + let parent_hash = client + .hash(parent_header_relay.number.saturating_sub(1)) + .unwrap() + .unwrap(); let parent_header = match client.header(parent_hash) { Ok(Some(h)) => h, @@ -209,23 +296,24 @@ C::Api: ParachainHost // we generate a signature for the parachain assigned to that core // To retrieve the validator keys, cal runtime api: - // this following piece of code predicts whether the validator is assigned to a particular + // this following piece of code predicts whether the validator is assigned to a particular // core where a candidate for a parachain needs to be created let runtime_api = client.runtime_api(); let para_authorities = runtime_api.validators(parent_hash).unwrap(); let claim_queue = runtime_api.claim_queue(parent_hash).unwrap(); let (groups, rotation_info) = runtime_api.validator_groups(parent_hash).unwrap(); - let rotations_since_session_start = - (parent_header.number - rotation_info.session_start_block) / rotation_info.group_rotation_frequency; - + let rotations_since_session_start = (parent_header.number + - rotation_info.session_start_block) + / rotation_info.group_rotation_frequency; + // Get all the available keys - let available_keys = keystore.keys(polkadot_primitives::PARACHAIN_KEY_TYPE_ID).unwrap(); - - let slot_number = AuraInherentType::from( - u64::from(parent_header.number), - ); - + let available_keys = keystore + .keys(polkadot_primitives::PARACHAIN_KEY_TYPE_ID) + .unwrap(); + + let slot_number = AuraInherentType::from(u64::from(parent_header.number)); + let parachain_mocked_header = sp_runtime::generic::Header:: { parent_hash: Default::default(), number: parent_header.number, @@ -233,68 +321,76 @@ C::Api: ParachainHost extrinsics_root: Default::default(), digest: sp_runtime::generic::Digest { logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, slot_number.encode())], - } + }, }; let availability_cores = runtime_api.availability_cores(parent_hash).unwrap(); let session_idx = runtime_api.session_index_for_child(parent_hash).unwrap(); let all_validators = runtime_api.validators(parent_hash).unwrap(); let availability_bitvec = availability_bitvec(1, availability_cores.len()); - let signature_ctx = SigningContext { parent_hash: parent, session_index: session_idx, }; - log::info!("availability bitvec is {:?}", availability_bitvec); // we generate the availability bitfield sigs let bitfields: Vec> = all_validators - .iter() - .enumerate() - .map(|(i, public)| { - keystore_sign( - &keystore, - availability_bitvec.clone(), - &signature_ctx, - ValidatorIndex(i as u32), - &public - ).unwrap().unwrap() - }) - .collect(); - - log::info!("bitfields {:?}", bitfields); + .iter() + .enumerate() + .map(|(i, public)| { + keystore_sign( + &keystore, + availability_bitvec.clone(), + &signature_ctx, + ValidatorIndex(i as u32), + &public, + ) + .unwrap() + .unwrap() + }) + .collect(); let collator_pair = CollatorPair::generate().0; let mut backed_cand: Vec> = vec![]; for (core, para) in claim_queue { - let group_assigned_to_core = core.0 + rotations_since_session_start % groups.len() as u32; + let group_assigned_to_core = + core.0 + rotations_since_session_start % groups.len() as u32; let indices_associated_to_core = groups.get(group_assigned_to_core as usize).unwrap(); for index in indices_associated_to_core { let validator_keys_to_find = para_authorities.get(index.0 as usize).unwrap(); // Iterate keys until we find an eligible one, or run out of candidates. for type_public_pair in &available_keys { - if let Ok(validator) = polkadot_primitives::ValidatorId::from_slice(&type_public_pair) { + if let Ok(validator) = + polkadot_primitives::ValidatorId::from_slice(&type_public_pair) + { if validator_keys_to_find == &validator { - let persisted_validation_data = runtime_api.persisted_validation_data(parent_hash, para[0], OccupiedCoreAssumption::Included).unwrap().unwrap(); - log::info!("parent_hash is {:?}", parent_hash); - - log::info!("validation data is {:?}", persisted_validation_data); - log::info!("validation data encoded is {:?}", persisted_validation_data.encode()); - log::info!("parent number is is {:?}", parent_header.number); + let persisted_validation_data = runtime_api + .persisted_validation_data( + parent_hash, + para[0], + OccupiedCoreAssumption::Included, + ) + .unwrap() + .unwrap(); let persisted_validation_data_hash = persisted_validation_data.hash(); - let validation_code_hash = runtime_api.validation_code_hash(parent_hash, para[0], OccupiedCoreAssumption::Included).unwrap().unwrap(); + let validation_code_hash = runtime_api + .validation_code_hash( + parent_hash, + para[0], + OccupiedCoreAssumption::Included, + ) + .unwrap() + .unwrap(); let pov_hash = Default::default(); - let payload = - polkadot_primitives::collator_signature_payload( - &parent_hash, - ¶[0], - &persisted_validation_data_hash, - &pov_hash, - &validation_code_hash, - ); + let payload = polkadot_primitives::collator_signature_payload( + &parent_hash, + ¶[0], + &persisted_validation_data_hash, + &pov_hash, + &validation_code_hash, + ); let collator_signature = collator_pair.sign(&payload); - let prev_head = persisted_validation_data.parent_head; let candidate = CommittedCandidateReceipt:: { descriptor: CandidateDescriptor:: { para_id: para[0], @@ -324,15 +420,16 @@ C::Api: ParachainHost session_index: session_idx, }; - log::info!("before sig"); - let signature = keystore_sign( &keystore, payload, &signature_ctx, *index, - &validator - ).unwrap().unwrap().benchmark_signature(); + &validator, + ) + .unwrap() + .unwrap() + .benchmark_signature(); log::info!("after sig"); @@ -344,21 +441,12 @@ C::Api: ParachainHost bitvec::bitvec![u8, bitvec::order::Lsb0; 1; indices_associated_to_core.len()], Some(core), )); - - - - - // Only in this case, we need to create a candidate - log::info!("found public key"); - log::info!("validity_votes {:?}", validity_votes.clone()); } } } } } - log::info!("backed_cand {:?}", backed_cand); - Ok(ParachainsInherentData { bitfields: bitfields, backed_candidates: backed_cand, @@ -370,20 +458,42 @@ C::Api: ParachainHost #[async_trait::async_trait] impl + ProvideRuntimeApi> sp_inherents::InherentDataProvider - for EmptyParachainsInherentDataProvider - - where C::Api: ParachainHost + for MockParachainsInherentDataProvider +where + C::Api: ParachainHost, + C: AuxStore, { async fn provide_inherent_data( &self, dst_inherent_data: &mut sp_inherents::InherentData, ) -> Result<(), sp_inherents::Error> { - let inherent_data = - EmptyParachainsInherentDataProvider::create(self.client.clone(), self.parent, self.keystore.clone()) - .await - .map_err(|e| sp_inherents::Error::Application(Box::new(e)))?; + let maybe_para_selector = self + .client + .get_aux(PARA_INHERENT_SELECTOR_AUX_KEY) + .expect("Should be able to query aux storage; qed"); + + let inherent_data = { + if let Some(aux) = maybe_para_selector { + if aux == true.encode() { + MockParachainsInherentDataProvider::create( + self.client.clone(), + self.parent, + self.keystore.clone(), + ) + .await + .map_err(|e| sp_inherents::Error::Application(Box::new(e)))? + } else { + EmptyParachainsInherentDataProvider::create(self.client.clone(), self.parent) + .await + .map_err(|e| sp_inherents::Error::Application(Box::new(e)))? + } + } else { + EmptyParachainsInherentDataProvider::create(self.client.clone(), self.parent) + .await + .map_err(|e| sp_inherents::Error::Application(Box::new(e)))? + } + }; - log::info!("inherent data {:?}", inherent_data); dst_inherent_data.put_data( polkadot_primitives::PARACHAINS_INHERENT_IDENTIFIER, &inherent_data, @@ -400,33 +510,6 @@ impl + ProvideRuntimeApi> sp_inherents::InherentD } } -/// Creates new development full node with manual seal -pub fn build_full( - sealing: Sealing, - config: Configuration, - mut params: NewFullParams, -) -> Result { - let is_polkadot = config.chain_spec.is_polkadot(); - - params.overseer_message_channel_capacity_override = params - .overseer_message_channel_capacity_override - .map(move |capacity| { - if is_polkadot { - gum::warn!("Channel capacity should _never_ be tampered with on polkadot!"); - } - capacity - }); - - match config.network.network_backend { - sc_network::config::NetworkBackendType::Libp2p => { - new_full::<_, sc_network::NetworkWorker>(sealing, config, params) - } - sc_network::config::NetworkBackendType::Litep2p => { - new_full::<_, sc_network::Litep2pNetworkBackend>(sealing, config, params) - } - } -} - /// We store past timestamp we created in the aux storage, which enable us to return timestamp which is increased by /// slot duration from previous timestamp or current timestamp if in reality more time is passed. fn get_next_timestamp( @@ -503,6 +586,10 @@ fn new_full< let net_config = sc_network::config::FullNetworkConfiguration::<_, _, Network>::new(&config.network); + // Create channels for mocked parachain candidates. + let (downward_mock_para_inherent_sender, downward_mock_para_inherent_receiver) = + flume::bounded::>(100); + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = service::build_network(service::BuildNetworkParams { config: &config, @@ -615,13 +702,29 @@ fn new_full< create_inherent_data_providers: move |parent, ()| { let client_clone = client_clone.clone(); let keystore = keystore_clone.clone(); + let downward_mock_para_inherent_receiver = downward_mock_para_inherent_receiver.clone(); async move { - let parachain = - EmptyParachainsInherentDataProvider::new( - client_clone.clone(), - parent, - keystore - ); + + let downward_mock_para_inherent_receiver = downward_mock_para_inherent_receiver.clone(); + // here we only take the last one + let para_inherent_decider_messages: Vec> = downward_mock_para_inherent_receiver.drain().collect(); + + // If there is a value to be updated, we update it + if let Some(value) = para_inherent_decider_messages.last() { + client_clone + .insert_aux( + &[(PARA_INHERENT_SELECTOR_AUX_KEY, value.as_slice())], + &[], + ) + .expect("Should be able to write to aux storage; qed"); + + } + + let parachain = MockParachainsInherentDataProvider::new( + client_clone.clone(), + parent, + keystore + ); let timestamp = get_next_timestamp(client_clone, slot_duration); @@ -639,6 +742,13 @@ fn new_full< ); } + // We dont need the flume receiver if we are not a validator + let dev_rpc_data = if role.clone().is_authority() { + Some(downward_mock_para_inherent_sender) + } else { + None + }; + let rpc_extensions_builder = { let client = client.clone(); let transaction_pool = transaction_pool.clone(); @@ -653,6 +763,7 @@ fn new_full< chain_spec: chain_spec.cloned_box(), deny_unsafe, command_sink: command_sink.clone(), + dev_rpc_data: dev_rpc_data.clone(), }; create_dev_rpc_extension(deps).map_err(Into::into) @@ -816,7 +927,7 @@ fn new_partial_basics( }) } -use polkadot_primitives::{ValidatorIndex, ValidatorId, UncheckedSigned, AvailabilityBitfield}; +use polkadot_primitives::{AvailabilityBitfield, UncheckedSigned, ValidatorId, ValidatorIndex}; use sp_keystore::Error as KeystoreError; fn keystore_sign( keystore: &KeystorePtr, @@ -824,18 +935,18 @@ fn keystore_sign( context: &SigningContext, validator_index: ValidatorIndex, key: &ValidatorId, -) -> Result>, KeystoreError> { +) -> Result>, KeystoreError> { let data = payload_data(&payload, context); - let signature = - keystore.sr25519_sign(ValidatorId::ID, key.as_ref(), &data)?.map(|sig| UncheckedSigned::new( - payload, - validator_index, - sig.into(), - )); + let signature = keystore + .sr25519_sign(ValidatorId::ID, key.as_ref(), &data)? + .map(|sig| UncheckedSigned::new(payload, validator_index, sig.into())); Ok(signature) } -fn payload_data(payload: &Payload, context: &SigningContext) -> Vec { +fn payload_data( + payload: &Payload, + context: &SigningContext, +) -> Vec { // equivalent to (`real_payload`, context).encode() let mut out = payload.encode_as(); out.extend(context.encode()); @@ -843,16 +954,16 @@ fn payload_data(payload: &Payload, context: &Signing } /// Create an `AvailabilityBitfield` where `concluding` is a map where each key is a core index - /// that is concluding and `cores` is the total number of cores in the system. - fn availability_bitvec(used_cores: usize, cores: usize) -> AvailabilityBitfield { - let mut bitfields = bitvec::bitvec![u8, bitvec::order::Lsb0; 0; 0]; - for i in 0..cores { - if i < used_cores { - bitfields.push(true); - } else { - bitfields.push(false) - } +/// that is concluding and `cores` is the total number of cores in the system. +fn availability_bitvec(used_cores: usize, cores: usize) -> AvailabilityBitfield { + let mut bitfields = bitvec::bitvec![u8, bitvec::order::Lsb0; 0; 0]; + for i in 0..cores { + if i < used_cores { + bitfields.push(true); + } else { + bitfields.push(false) } + } - bitfields.into() - } \ No newline at end of file + bitfields.into() +} diff --git a/solo-chains/node/tanssi-relay-service/src/lib.rs b/solo-chains/node/tanssi-relay-service/src/lib.rs index 0ad8f1d95..46a0e5030 100644 --- a/solo-chains/node/tanssi-relay-service/src/lib.rs +++ b/solo-chains/node/tanssi-relay-service/src/lib.rs @@ -17,3 +17,5 @@ pub mod chain_spec; pub mod dev_service; + +pub mod dev_rpcs;