Skip to content

Commit

Permalink
Partially merge: Bounded election buckets with dynamic reprioritizati…
Browse files Browse the repository at this point in the history
  • Loading branch information
simpago committed Jul 22, 2024
1 parent 4678620 commit 3ab67dd
Show file tree
Hide file tree
Showing 9 changed files with 110 additions and 450 deletions.
31 changes: 31 additions & 0 deletions nano/node/nodeconfig.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ rsnano::NodeConfigDto to_node_config_dto (nano::node_config const & config)
rsnano::NodeConfigDto dto;
dto.optimistic_scheduler = config.optimistic_scheduler.into_dto ();
dto.hinted_scheduler = config.hinted_scheduler.into_dto ();
dto.priority_bucket = config.priority_bucket.into_dto ();
dto.peering_port = config.peering_port.value_or (0);
dto.peering_port_defined = config.peering_port.has_value ();
dto.bootstrap_fraction_numerator = config.bootstrap_fraction_numerator;
Expand Down Expand Up @@ -159,6 +160,7 @@ void nano::node_config::load_dto (rsnano::NodeConfigDto & dto)
}
optimistic_scheduler.load_dto (dto.optimistic_scheduler);
hinted_scheduler.load_dto (dto.hinted_scheduler);
priority_bucket = nano::priority_bucket_config{dto.priority_bucket};
bootstrap_fraction_numerator = dto.bootstrap_fraction_numerator;
bootstrap_ascending.load_dto (dto.bootstrap_ascending);
bootstrap_server.load_dto (dto.bootstrap_server);
Expand Down Expand Up @@ -303,6 +305,12 @@ nano::error nano::node_config::deserialize_toml (nano::tomlconfig & toml)
optimistic_scheduler.deserialize (config_l);
}

if (toml.has_key ("priority_bucket"))
{
auto config_l = toml.get_required_child ("priority_bucket");
priority_bucket.deserialize (config_l);
}

if (toml.has_key ("hinted_scheduler"))
{
auto config_l = toml.get_required_child ("hinted_scheduler");
Expand Down Expand Up @@ -1084,3 +1092,26 @@ nano::error nano::monitor_config::deserialize (nano::tomlconfig & toml)
return toml.get_error ();
}

nano::priority_bucket_config::priority_bucket_config(rsnano::PriorityBucketConfigDto const & dto) :
max_blocks{ dto.max_blocks },
reserved_elections{ dto.reserved_elections },
max_elections{ dto.max_elections }
{}


rsnano::PriorityBucketConfigDto nano::priority_bucket_config::into_dto () const{
return {
max_blocks,
reserved_elections,
max_elections
};
}

nano::error nano::priority_bucket_config::deserialize (nano::tomlconfig & toml)
{
toml.get ("max_blocks", max_blocks);
toml.get ("reserved_elections", reserved_elections);
toml.get ("max_elections", max_elections);

return toml.get_error ();
}
20 changes: 20 additions & 0 deletions nano/node/nodeconfig.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,25 @@ class monitor_config final
std::chrono::seconds interval{ 60s };
};

class priority_bucket_config final
{
public:
priority_bucket_config() = default;
priority_bucket_config(rsnano::PriorityBucketConfigDto const & dto);
rsnano::PriorityBucketConfigDto into_dto () const;
nano::error deserialize (nano::tomlconfig & toml);

public:
// Maximum number of blocks to sort by priority per bucket.
std::size_t max_blocks{ 1024 * 8 };

// Number of guaranteed slots per bucket available for election activation.
std::size_t reserved_elections{ 100 };

// Maximum number of slots per bucket available for election activation if the active election count is below the configured limit. (node.active_elections.size)
std::size_t max_elections{ 150 };
};

/**
* Node configuration
*/
Expand All @@ -117,6 +136,7 @@ class node_config
std::optional<uint16_t> peering_port{};
nano::scheduler::optimistic_config optimistic_scheduler;
nano::scheduler::hinted_config hinted_scheduler;
nano::priority_bucket_config priority_bucket;
std::vector<std::pair<std::string, uint16_t>> work_peers;
std::vector<std::pair<std::string, uint16_t>> secondary_work_peers;
std::vector<std::string> preconfigured_peers;
Expand Down
31 changes: 31 additions & 0 deletions rust/ffi/src/config/node_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ use rsnano_node::{
block_processing::LocalBlockBroadcasterConfig,
cementation::ConfirmingSetConfig,
config::{MonitorConfig, NodeConfig, Peer},
consensus::PriorityBucketConfig,
transport::{MessageProcessorConfig, TcpConfig},
NetworkParams,
};
Expand All @@ -35,6 +36,7 @@ pub struct NodeConfigDto {
pub peering_port: u16,
pub optimistic_scheduler: OptimisticSchedulerConfigDto,
pub hinted_scheduler: HintedSchedulerConfigDto,
pub priority_bucket: PriorityBucketConfigDto,
pub peering_port_defined: bool,
pub bootstrap_fraction_numerator: u32,
pub receive_minimum: [u8; 16],
Expand Down Expand Up @@ -205,6 +207,7 @@ pub fn fill_node_config_dto(dto: &mut NodeConfigDto, cfg: &NodeConfig) {
dto.peering_port = cfg.peering_port.unwrap_or_default();
dto.optimistic_scheduler = (&cfg.optimistic_scheduler).into();
dto.hinted_scheduler = (&cfg.hinted_scheduler).into();
dto.priority_bucket = (&cfg.priority_bucket).into();
dto.peering_port_defined = cfg.peering_port.is_some();
dto.bootstrap_fraction_numerator = cfg.bootstrap_fraction_numerator;
dto.receive_minimum = cfg.receive_minimum.to_be_bytes();
Expand Down Expand Up @@ -386,6 +389,7 @@ impl TryFrom<&NodeConfigDto> for NodeConfig {
},
optimistic_scheduler: (&value.optimistic_scheduler).into(),
hinted_scheduler: (&value.hinted_scheduler).into(),
priority_bucket: (&value.priority_bucket).into(),
bootstrap_fraction_numerator: value.bootstrap_fraction_numerator,
receive_minimum: Amount::from_be_bytes(value.receive_minimum),
online_weight_minimum: Amount::from_be_bytes(value.online_weight_minimum),
Expand Down Expand Up @@ -561,3 +565,30 @@ impl From<&ConfirmingSetConfig> for ConfirmingSetConfigDto {
}
}
}

#[repr(C)]
pub struct PriorityBucketConfigDto {
pub max_blocks: usize,
pub reserved_elections: usize,
pub max_elections: usize,
}

impl From<&PriorityBucketConfigDto> for PriorityBucketConfig {
fn from(value: &PriorityBucketConfigDto) -> Self {
Self {
max_blocks: value.max_blocks,
reserved_elections: value.reserved_elections,
max_elections: value.max_elections,
}
}
}

impl From<&PriorityBucketConfig> for PriorityBucketConfigDto {
fn from(value: &PriorityBucketConfig) -> Self {
Self {
max_blocks: value.max_blocks,
reserved_elections: value.reserved_elections,
max_elections: value.max_elections,
}
}
}
9 changes: 8 additions & 1 deletion rust/node/src/config/node_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@ use crate::{
bootstrap::{BootstrapInitiatorConfig, BootstrapServerConfig},
cementation::ConfirmingSetConfig,
consensus::{
ActiveElectionsConfig, RequestAggregatorConfig, VoteCacheConfig, VoteProcessorConfig,
ActiveElectionsConfig, PriorityBucketConfig, RequestAggregatorConfig, VoteCacheConfig,
VoteProcessorConfig,
},
stats::StatsConfig,
transport::{MessageProcessorConfig, TcpConfig},
Expand Down Expand Up @@ -37,6 +38,7 @@ pub struct NodeConfig {
pub peering_port: Option<u16>,
pub optimistic_scheduler: OptimisticSchedulerConfig,
pub hinted_scheduler: HintedSchedulerConfig,
pub priority_bucket: PriorityBucketConfig,
pub bootstrap_fraction_numerator: u32,
pub receive_minimum: Amount,
pub online_weight_minimum: Amount,
Expand Down Expand Up @@ -314,6 +316,7 @@ impl NodeConfig {
} else {
HintedSchedulerConfig::default()
},
priority_bucket: Default::default(),
vote_cache: Default::default(),
active_elections: Default::default(),
rep_crawler_query_timeout: if network_params.network.is_dev_network() {
Expand Down Expand Up @@ -502,6 +505,10 @@ impl NodeConfig {
self.optimistic_scheduler.serialize_toml(opt)
})?;

toml.put_child("priority_bucket", &mut |opt| {
self.priority_bucket.serialize_toml(opt)
})?;

toml.put_child("bootstrap_ascending", &mut |writer| {
self.bootstrap_ascending.serialize_toml(writer)
})?;
Expand Down
31 changes: 17 additions & 14 deletions rust/node/src/consensus/bucket.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,23 +3,23 @@ use crate::{
consensus::ActiveElectionsExt,
stats::{DetailType, StatType, Stats},
};
use rsnano_core::{Amount, BlockEnum, QualifiedRoot};
use rsnano_core::{utils::TomlWriter, Amount, BlockEnum, QualifiedRoot};
use std::{
cmp::Ordering,
collections::{BTreeMap, BTreeSet, HashMap, VecDeque},
sync::{Arc, Mutex},
};

#[derive(Clone)]
pub(crate) struct PriorityBucketConfig {
pub struct PriorityBucketConfig {
/// Maximum number of blocks to sort by priority per bucket.
max_blocks: usize,
pub max_blocks: usize,

/// Number of guaranteed slots per bucket available for election activation.
reserved_elections: usize,
pub reserved_elections: usize,

/// Maximum number of slots per bucket available for election activation if the active election count is below the configured limit. (node.active_elections.size)
max_elections: usize,
pub max_elections: usize,
}

impl Default for PriorityBucketConfig {
Expand All @@ -32,6 +32,18 @@ impl Default for PriorityBucketConfig {
}
}

impl PriorityBucketConfig {
pub(crate) fn serialize_toml(&self, toml: &mut dyn TomlWriter) -> anyhow::Result<()> {
toml.put_usize(
"max_blocks",
self.max_blocks,
"Maximum number of blocks to sort by priority per bucket. \nType: uint64",
)?;
toml.put_usize ("reserved_elections", self.reserved_elections, "Number of guaranteed slots per bucket available for election activation. \nType: uint64")?;
toml.put_usize ("max_elections", self.max_elections, "Maximum number of slots per bucket available for election activation if the active election count is below the configured limit. \nType: uint64")
}
}

type Priority = u64;

/// A struct which holds an ordered set of blocks to be scheduled, ordered by their block arrival time
Expand Down Expand Up @@ -134,18 +146,9 @@ impl NewBucket {
self.data.lock().unwrap().queue.len()
}

pub fn is_empty(&self) -> bool {
self.len() == 0
}

pub fn election_count(&self) -> usize {
self.data.lock().unwrap().elections.len()
}

fn blocks(&self) -> VecDeque<Arc<BlockEnum>> {
let guard = self.data.lock().unwrap();
guard.queue.iter().map(|i| i.block.clone()).collect()
}
}

pub(crate) trait BucketExt {
Expand Down
Loading

0 comments on commit 3ab67dd

Please sign in to comment.