Skip to content

Commit

Permalink
fix(watcher): register before expiry and use network consts (#1128)
Browse files Browse the repository at this point in the history
Description
---
* Send a VN registration transaction one epoch (10 blocks on
`esmeralda`) before it is due to expire, rather than reacting to not
being in the validator set
* Use consensus constants received from L1 as part of the check rather
than using hardcoded ones for the `esmeralda` network to generalize the
solution for any network

How Has This Been Tested?
---
1. Run `tari_swarm_daemon` and `tari_watcher`
2. Observe the process sending a registration tx
3. If the registration expires at block $B_E$, observe warning(s) from
$B_E - 100$ (default value) and up
4. Mine until any block in $\left[B_E - 10, B_E - 1\right]$ is reached,
and observe the registration tx sent
  • Loading branch information
therealdannzor committed Sep 3, 2024
1 parent d85bf65 commit f91bcd9
Show file tree
Hide file tree
Showing 9 changed files with 254 additions and 150 deletions.
10 changes: 8 additions & 2 deletions applications/tari_watcher/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@

### Quickstart

Initialize the project with `tari_watcher init` and start it with `tari_watcher run`. Edit the newly generated `config.toml` to enable notifications on Mattermost and Telegram. Make sure to have started up `tari_validator_node` once previously to have a node directory set up, default is `tari_validator_node -- -b data/vn1`.
Initialize the project with `tari_watcher init` and start it with `tari_watcher start`. Edit the newly generated `config.toml` to enable notifications on Mattermost and Telegram. Make sure to have started up `tari_validator_node` once previously to have a node directory set up, default is `tari_validator_node -- -b data/vn1`.

### Setup
### Config and Setup

The default values used (see `constants.rs`) when running the project without any flags:
```
Expand All @@ -24,6 +24,12 @@ The default values used (see `constants.rs`) when running the project without an
- DEFAULT_BASE_WALLET_GRPC_ADDRESS: default is Tari swarm localhost and port
```

The two main configuration settings for the watcher (default `true`):
```
- auto_register: automatically re-register the node
- auto_restart: automatically restart the node if it goes down
```

### Project

```
Expand Down
2 changes: 0 additions & 2 deletions applications/tari_watcher/src/constants.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@

use tokio::time::Duration;

pub const CONSENSUS_CONSTANT_REGISTRATION_DURATION: u64 = 1000; // in blocks: 100 epochs * 10 blocks/epoch

pub const DEFAULT_MAIN_PROJECT_PATH: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/../../");
pub const DEFAULT_WATCHER_CONFIG_PATH: &str = "data/watcher/config.toml";
pub const DEFAULT_VALIDATOR_PID_PATH: &str = "data/watcher/validator.pid";
Expand Down
30 changes: 28 additions & 2 deletions applications/tari_watcher/src/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@

use std::path::PathBuf;

use minotari_app_grpc::tari_rpc::GetActiveValidatorNodesResponse;
use minotari_app_grpc::tari_rpc::{ConsensusConstants, GetActiveValidatorNodesResponse};
use tari_common_types::types::PublicKey;
use tari_core::transactions::transaction_components::ValidatorNodeSignature;
use tari_crypto::{ristretto::RistrettoPublicKey, tari_utilities::ByteArray};
use tokio::fs;

use crate::config::Config;
use crate::{config::Config, constants::DEFAULT_THRESHOLD_WARN_EXPIRATION};

pub async fn read_config_file(path: PathBuf) -> anyhow::Result<Config> {
let content = fs::read_to_string(&path).await.map_err(|_| {
Expand Down Expand Up @@ -48,3 +48,29 @@ pub fn to_vn_public_keys(vns: Vec<GetActiveValidatorNodesResponse>) -> Vec<Publi
pub fn contains_key(vns: Vec<RistrettoPublicKey>, needle: PublicKey) -> bool {
vns.iter().any(|vn| vn.eq(&needle))
}

pub fn is_close_to_expiry(
constants: ConsensusConstants,
current_block: u64,
last_registered_block: Option<u64>,
) -> bool {
// if we haven't registered yet in this session, return false
if last_registered_block.is_none() {
return false;
}
let epoch_length = constants.epoch_length;
let validity_period = constants.validator_node_validity_period;
let registration_duration = validity_period * epoch_length;
// check if the current block is an epoch or less away from expiring
current_block + epoch_length >= last_registered_block.unwrap() + registration_duration
}

pub fn is_warning_close_to_expiry(
constants: ConsensusConstants,
current_block: u64,
last_registered_block: u64,
) -> bool {
let registration_duration = constants.epoch_length * constants.validator_node_validity_period;
// if we have approached the expiration threshold
current_block + DEFAULT_THRESHOLD_WARN_EXPIRATION >= last_registered_block + registration_duration
}
35 changes: 23 additions & 12 deletions applications/tari_watcher/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@
use anyhow::{anyhow, bail, Context};
use registration::registration_loop;
use tari_shutdown::{Shutdown, ShutdownSignal};
use tokio::{fs, task};
use tokio::{fs, task::JoinHandle};

use crate::{
cli::{Cli, Commands},
config::{get_base_config, Config},
helpers::read_config_file,
logger::init_logger,
manager::{ManagerHandle, ProcessManager},
manager::{start_receivers, ManagerHandle, ProcessManager},
shutdown::exit_signal,
};

Expand Down Expand Up @@ -74,14 +74,16 @@ async fn main() -> anyhow::Result<()> {
async fn start(config: Config) -> anyhow::Result<()> {
let shutdown = Shutdown::new();
let signal = shutdown.to_signal().select(exit_signal()?);
let (task_handle, manager_handle) = spawn(config.clone(), shutdown.to_signal(), shutdown).await;
let handlers = spawn_manager(config.clone(), shutdown.to_signal(), shutdown).await?;
let manager_handle = handlers.manager;
let task_handle = handlers.task;

tokio::select! {
_ = signal => {
log::info!("Shutting down");
},
result = task_handle => {
result??;
result?;
log::info!("Process manager exited");
},
_ = async {
Expand All @@ -92,12 +94,21 @@ async fn start(config: Config) -> anyhow::Result<()> {
Ok(())
}

async fn spawn(
config: Config,
shutdown: ShutdownSignal,
trigger: Shutdown,
) -> (task::JoinHandle<anyhow::Result<()>>, ManagerHandle) {
let (manager, manager_handle) = ProcessManager::new(config, shutdown, trigger);
let task_handle = tokio::spawn(manager.start());
(task_handle, manager_handle)
struct Handlers {
manager: ManagerHandle,
task: JoinHandle<()>,
}

async fn spawn_manager(config: Config, shutdown: ShutdownSignal, trigger: Shutdown) -> anyhow::Result<Handlers> {
let (manager, mut manager_handle) = ProcessManager::new(config, shutdown, trigger);
let cr = manager.start_request_handler().await?;
let status = manager_handle.get_tip_info().await?;
// in the case the consensus constants have changed since the genesis block, use the latest ones
let constants = manager_handle.get_consensus_constants(status.height()).await?;
start_receivers(cr.rx_log, cr.rx_alert, cr.cfg_alert, constants).await;

Ok(Handlers {
manager: manager_handle,
task: cr.task,
})
}
Loading

0 comments on commit f91bcd9

Please sign in to comment.