From b12fd321b2ddf3fde6e81b2d00ded6a401df2a11 Mon Sep 17 00:00:00 2001 From: Chiara Seim Date: Wed, 26 Jul 2023 17:43:58 -0700 Subject: [PATCH 1/5] Send usage data in 1-month portions This change is meant to speed up the checkin loop when routers have a lot of data to upload for usage history. We now only iterate through and send up to a month's worth of hours per checkin. --- rita_client/src/operator_update/mod.rs | 99 +++++++++++++++++++++----- 1 file changed, 80 insertions(+), 19 deletions(-) diff --git a/rita_client/src/operator_update/mod.rs b/rita_client/src/operator_update/mod.rs index d46a4c3e5..08c883cf4 100644 --- a/rita_client/src/operator_update/mod.rs +++ b/rita_client/src/operator_update/mod.rs @@ -19,6 +19,7 @@ use num256::Uint256; use rita_common::rita_loop::is_gateway; use rita_common::tunnel_manager::neighbor_status::get_neighbor_status; use rita_common::tunnel_manager::shaping::flag_reset_shaper; +use rita_common::usage_tracker::UsageHour as RCUsageHour; use rita_common::usage_tracker::UsageType::{Client, Relay}; use rita_common::usage_tracker::{get_current_hour, get_usage_data}; use rita_common::utils::option_convert; @@ -168,48 +169,69 @@ pub async fn operator_update(ops_last_seen_usage_hour: Option) -> u64 { } }; let last_seen_hour = ops_last_seen_usage_hour.unwrap_or(0); - let mut hours_to_send: u64 = 0; - // first check whats the last saved hr, send everything from that hr on - // but, we need to save a var of the last x hrs not seen, and load that up with the next checkin cycle if >0. - if current_hour - last_seen_hour > 0 { - hours_to_send = current_hour - last_seen_hour; - } + // We check that the difference is >1 because we leave a 1 hour buffer to prevent from sending over an incomplete current hour + let send_hours = current_hour - last_seen_hour > 1; let mut usage_tracker_data: Option = None; - // only deal with this if we actually do need to send some hours // if ops_last_seen_usage_hour is a None the thread has restarted and we are waiting for ops to tell us how much - // data we need to send, which will be populated with the next checkin cycle - if hours_to_send != 0 && ops_last_seen_usage_hour.is_some() { + // data we need to send, which will be populated with the next checkin cycle. 730 is the average numbers of hours + // in a month, and we only send 1 month at a time if ops is requesting the full usage history. + if send_hours && ops_last_seen_usage_hour.is_some() { let mut usage_data_client = get_usage_data(Client); let mut usage_data_relay = get_usage_data(Relay); let mut new_client_data: VecDeque = VecDeque::new(); let mut new_relay_data: VecDeque = VecDeque::new(); - for _hour in 0..hours_to_send { - // pop front, add to front of new vecdeque if exists. - while let Some(data) = usage_data_client.pop_front() { - if data.index > last_seen_hour { + + // ops is expecting data as [oldest..newest] but now we sent newest .. oldest + let client_oldest = match usage_data_client.back() { + Some(x) => x.index, + None => 0, + }; + let relay_oldest = match usage_data_relay.back() { + Some(x) => x.index, + None => 0, + }; + // if the last seen hour is earlier (lower) than our earliest saved index, we are uploading entire history so don't worry about position since last seen + if last_seen_hour < client_oldest { + new_client_data = iterate_month_usage_data(usage_data_client); + } else if client_oldest == last_seen_hour + 2 { + // we are simply updating the second most recent (because the most recent may be incomplete until it rolls over) + if let Some(data) = usage_data_client.get(1) { + if data.index == last_seen_hour + 1 { new_client_data.push_front(UsageHour { up: data.up, down: data.down, price: data.price, index: data.index, }); - break; } } - - // pop front, add to front of new vecdeque if exists. - while let Some(data) = usage_data_relay.pop_front() { - if data.index > last_seen_hour { + } else { + // binary search until we find the last seen hour or hour in the vecdeque with index before that (as in cases of gaps in data) + let pos = find_position_since_last_seen(&usage_data_client, last_seen_hour); + // drain all older data as it has been uploaded to ops already + usage_data_client.drain(pos..); + new_client_data = iterate_month_usage_data(usage_data_client); + } + // then repeat for relay data + if last_seen_hour < relay_oldest { + new_relay_data = iterate_month_usage_data(usage_data_relay); + } else if relay_oldest == last_seen_hour + 2 { + if let Some(data) = usage_data_relay.get(1) { + if data.index == last_seen_hour + 1 { new_relay_data.push_front(UsageHour { up: data.up, down: data.down, price: data.price, index: data.index, }); - break; } } + } else { + let pos = find_position_since_last_seen(&usage_data_relay, last_seen_hour); + usage_data_relay.drain(pos..); + new_relay_data = iterate_month_usage_data(usage_data_relay); } + usage_tracker_data = Some(UsageTracker { last_save_hour: current_hour, client_bandwidth: new_client_data, @@ -680,6 +702,45 @@ fn contains_forbidden_key(map: Map, forbidden_values: &[&str]) -> false } +/// Binary search the usage data until we get close enough to the last seen(either exact match or the next oldest +/// data). usage data is saved newest at the front, oldest at the back, meaning largest indexes at the front. Returns +/// the index of the position from which to iterate. +fn find_position_since_last_seen(usage: &VecDeque, last_seen: u64) -> usize { + let mut left = 0; + let mut right = usage.len(); + + while left < right { + let mid = left + (right - left) / 2; + if usage[mid].index > last_seen { + left = mid + 1; + } else { + right = mid; + } + } + left +} + +fn iterate_month_usage_data(mut data: VecDeque) -> VecDeque { + // one month in hours + let max_hour_iterations: u32 = 730; + let mut client_iter = 0; + let mut res = VecDeque::new(); + while let Some(hour) = data.pop_back() { + // either we hit max iterations or we are on the second to last entry + if client_iter >= max_hour_iterations || data.len() == 1 { + res.push_front(UsageHour { + up: hour.up, + down: hour.down, + price: hour.price, + index: hour.index, + }); + break; + } + client_iter += 1; + } + res +} + #[cfg(test)] mod tests { use std::{fs, io::Error, path::Path}; From 0b7ba634bf14f7633c13927b2fcbdcfda6b2dcfb Mon Sep 17 00:00:00 2001 From: Chiara Seim Date: Thu, 27 Jul 2023 15:47:27 -0700 Subject: [PATCH 2/5] Add unit test for usage tracker to ops updates --- Cargo.lock | 1 + rita_client/Cargo.toml | 1 + rita_client/src/operator_update/mod.rs | 232 +++++++++++++++-------- rita_common/src/payment_validator/mod.rs | 3 +- rita_common/src/usage_tracker/mod.rs | 190 +++++++++---------- 5 files changed, 247 insertions(+), 180 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cadfc2df6..457b4c813 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2724,6 +2724,7 @@ dependencies = [ "num256", "openssh-keys", "phonenumber", + "rand", "rita_common", "serde", "serde_derive", diff --git a/rita_client/Cargo.toml b/rita_client/Cargo.toml index 74d920a55..98102ebf2 100644 --- a/rita_client/Cargo.toml +++ b/rita_client/Cargo.toml @@ -22,6 +22,7 @@ antenna_forwarding_client = { path = "../antenna_forwarding_client" } settings = { path = "../settings" } sha3 = "0.10" lettre = "0.10" +rand = "0.8.0" phonenumber = "0.3" babel_monitor = { path = "../babel_monitor" } arrayvec = {version= "0.7", features = ["serde"]} diff --git a/rita_client/src/operator_update/mod.rs b/rita_client/src/operator_update/mod.rs index 08c883cf4..7cd119ffa 100644 --- a/rita_client/src/operator_update/mod.rs +++ b/rita_client/src/operator_update/mod.rs @@ -172,65 +172,36 @@ pub async fn operator_update(ops_last_seen_usage_hour: Option) -> u64 { // We check that the difference is >1 because we leave a 1 hour buffer to prevent from sending over an incomplete current hour let send_hours = current_hour - last_seen_hour > 1; let mut usage_tracker_data: Option = None; - // if ops_last_seen_usage_hour is a None the thread has restarted and we are waiting for ops to tell us how much - // data we need to send, which will be populated with the next checkin cycle. 730 is the average numbers of hours - // in a month, and we only send 1 month at a time if ops is requesting the full usage history. + // if ops_last_seen_usage_hour is a None the thread has restarted and we are waiting for ops to tell us how much data we need to send, + // which will be populated with the next checkin cycle. we only send 1 month at a time if ops is requesting the full usage history. if send_hours && ops_last_seen_usage_hour.is_some() { let mut usage_data_client = get_usage_data(Client); let mut usage_data_relay = get_usage_data(Relay); - let mut new_client_data: VecDeque = VecDeque::new(); - let mut new_relay_data: VecDeque = VecDeque::new(); - // ops is expecting data as [oldest..newest] but now we sent newest .. oldest - let client_oldest = match usage_data_client.back() { - Some(x) => x.index, - None => 0, - }; - let relay_oldest = match usage_data_relay.back() { - Some(x) => x.index, - None => 0, - }; - // if the last seen hour is earlier (lower) than our earliest saved index, we are uploading entire history so don't worry about position since last seen - if last_seen_hour < client_oldest { - new_client_data = iterate_month_usage_data(usage_data_client); - } else if client_oldest == last_seen_hour + 2 { - // we are simply updating the second most recent (because the most recent may be incomplete until it rolls over) - if let Some(data) = usage_data_client.get(1) { - if data.index == last_seen_hour + 1 { - new_client_data.push_front(UsageHour { - up: data.up, - down: data.down, - price: data.price, - index: data.index, - }); - } - } - } else { - // binary search until we find the last seen hour or hour in the vecdeque with index before that (as in cases of gaps in data) - let pos = find_position_since_last_seen(&usage_data_client, last_seen_hour); - // drain all older data as it has been uploaded to ops already - usage_data_client.drain(pos..); - new_client_data = iterate_month_usage_data(usage_data_client); - } - // then repeat for relay data - if last_seen_hour < relay_oldest { - new_relay_data = iterate_month_usage_data(usage_data_relay); - } else if relay_oldest == last_seen_hour + 2 { - if let Some(data) = usage_data_relay.get(1) { - if data.index == last_seen_hour + 1 { - new_relay_data.push_front(UsageHour { - up: data.up, - down: data.down, - price: data.price, - index: data.index, - }); - } - } - } else { - let pos = find_position_since_last_seen(&usage_data_relay, last_seen_hour); - usage_data_relay.drain(pos..); - new_relay_data = iterate_month_usage_data(usage_data_relay); - } + // sort client and relay data in case they have come out of order somehow. This sorts by index increasing so newest data at back + usage_data_relay + .make_contiguous() + .sort_by(|a, b| a.index.cmp(&b.index)); + usage_data_client + .make_contiguous() + .sort_by(|a, b| a.index.cmp(&b.index)); + + // so this spits out the index for where last seen is, or the index of the next highest hour(returned in an error). + // we take the result -1 just in case, limit 0, since it's possible we might get back an index out of bounds at the back. + let client_last_seen_index = + match usage_data_client.binary_search_by(|x| x.index.cmp(&last_seen_hour)) { + Ok(p) => p, + Err(p) => p.saturating_sub(1), + }; + let relay_last_seen_index = + match usage_data_relay.binary_search_by(|x| x.index.cmp(&last_seen_hour)) { + Ok(p) => p, + Err(p) => p.saturating_sub(1), + }; + usage_data_client.drain(0..client_last_seen_index); + usage_data_relay.drain(0..relay_last_seen_index); + let new_client_data = iterate_month_usage_data(usage_data_client); + let new_relay_data = iterate_month_usage_data(usage_data_relay); usage_tracker_data = Some(UsageTracker { last_save_hour: current_hour, @@ -702,38 +673,21 @@ fn contains_forbidden_key(map: Map, forbidden_values: &[&str]) -> false } -/// Binary search the usage data until we get close enough to the last seen(either exact match or the next oldest -/// data). usage data is saved newest at the front, oldest at the back, meaning largest indexes at the front. Returns -/// the index of the position from which to iterate. -fn find_position_since_last_seen(usage: &VecDeque, last_seen: u64) -> usize { - let mut left = 0; - let mut right = usage.len(); - - while left < right { - let mid = left + (right - left) / 2; - if usage[mid].index > last_seen { - left = mid + 1; - } else { - right = mid; - } - } - left -} - -fn iterate_month_usage_data(mut data: VecDeque) -> VecDeque { +/// Given a vecdeque of usage hours, add up to a month's worth of hours to a returned vecdeque +pub fn iterate_month_usage_data(mut data: VecDeque) -> VecDeque { // one month in hours let max_hour_iterations: u32 = 730; let mut client_iter = 0; let mut res = VecDeque::new(); - while let Some(hour) = data.pop_back() { - // either we hit max iterations or we are on the second to last entry + while let Some(hour) = data.pop_front() { + // either we hit max iterations or we are on the second to last entry. + res.push_back(UsageHour { + up: hour.up, + down: hour.down, + price: hour.price, + index: hour.index, + }); if client_iter >= max_hour_iterations || data.len() == 1 { - res.push_front(UsageHour { - up: hour.up, - down: hour.down, - price: hour.price, - index: hour.index, - }); break; } client_iter += 1; @@ -743,9 +697,13 @@ fn iterate_month_usage_data(mut data: VecDeque) -> VecDeque x.index, + None => 0, + }; + let last_seen_hour = client_oldest - 10; + // so this spits out the index for where last seen is, or the index of the next highest hour(returned in an error). + // we take the result -1 just in case, limit 0, since it's possible we might get back an index out of bounds at the back. + let last_seen_position = + match usage_data_client.binary_search_by(|x| x.index.cmp(&last_seen_hour)) { + Ok(p) => p, + Err(p) => p.saturating_sub(1), + }; + assert!(usage_data_client.get(last_seen_position).unwrap().index == last_seen_hour); + // now for Case B: we have a gap in the data + usage_data_client.remove(last_seen_position); + let last_seen_position = + match usage_data_client.binary_search_by(|x| x.index.cmp(&last_seen_hour)) { + Ok(p) => p, + Err(p) => p.saturating_sub(1), + }; + // so we must retrieve the next earliest entry from where the last seen would be: + assert!(usage_data_client.get(last_seen_position).unwrap().index == last_seen_hour - 1); + + // now that we have the position of where to start (keep in mind these are sorted vecdeques, and we only need larger + // (later) indexes than the last seen.) we can drain any earlier entries up to the last seen, and send off the result + // to the iterate function + usage_data_client.drain(0..last_seen_position); + let new_client_data = iterate_month_usage_data(usage_data_client); + // finally, check that the returned list to be sent back to ops is sorted as intended: + assert!( + new_client_data.get(0).unwrap().index < new_client_data.get(1).unwrap().index + && new_client_data.get(1).unwrap().index < new_client_data.get(2).unwrap().index + ); + } + + // generates a usage tracker struct for testing without payments since these do not get send up in ops updates. + // using this while I can't get the import working... as a note the original function needs to be updated to push to back + // instead of front, as this generates data in the wrong order + fn generate_dummy_usage_tracker_temp() -> RCUsageTracker { + let current_hour = get_current_hour().unwrap(); + RCUsageTracker { + last_save_hour: current_hour, + client_bandwidth: generate_bandwidth(current_hour), + relay_bandwidth: generate_bandwidth(current_hour), + exit_bandwidth: generate_bandwidth(current_hour), + payments: VecDeque::new(), + } + } + #[cfg(test)] + // generates dummy usage hour data randomly + fn generate_bandwidth(starting_hour: u64) -> VecDeque { + use rand::{thread_rng, Rng}; + // 8760 is the max number of saved usage entries(1 year) + let num_to_generate: u16 = thread_rng().gen_range(50..8760); + let mut output = VecDeque::new(); + for i in 0..num_to_generate { + output.push_back(RCUsageHour { + index: starting_hour - i as u64, + up: rand::random(), + down: rand::random(), + price: rand::random(), + }); + } + output + } } diff --git a/rita_common/src/payment_validator/mod.rs b/rita_common/src/payment_validator/mod.rs index 03c623e44..cf2d4bbd5 100644 --- a/rita_common/src/payment_validator/mod.rs +++ b/rita_common/src/payment_validator/mod.rs @@ -904,8 +904,9 @@ mod tests { use cosmos_sdk_proto_althea::cosmos::bank::v1beta1::MsgSend; use deep_space::utils::decode_any; + use crate::usage_tracker::random_identity; + use super::*; - use crate::usage_tracker::tests::random_identity; fn generate_fake_payment() -> ToValidate { let amount: u128 = rand::random(); diff --git a/rita_common/src/usage_tracker/mod.rs b/rita_common/src/usage_tracker/mod.rs index cda7ccff3..5eab89c61 100644 --- a/rita_common/src/usage_tracker/mod.rs +++ b/rita_common/src/usage_tracker/mod.rs @@ -549,20 +549,109 @@ pub fn get_payments_data() -> VecDeque { pub fn save_usage_on_shutdown() { save_usage_to_disk() } +#[cfg(test)] +// generates a nontrivial usage tracker struct for testing +pub fn generate_dummy_usage_tracker() -> UsageTracker { + let current_hour = get_current_hour().unwrap(); + UsageTracker { + last_save_hour: current_hour, + client_bandwidth: generate_bandwidth(current_hour), + relay_bandwidth: generate_bandwidth(current_hour), + exit_bandwidth: generate_bandwidth(current_hour), + payments: generate_payments(current_hour), + } +} +#[cfg(test)] +// generates dummy usage hour data randomly +fn generate_bandwidth(starting_hour: u64) -> VecDeque { + let num_to_generate: u16 = rand::random(); + let mut output = VecDeque::new(); + for i in 0..num_to_generate { + output.push_front(UsageHour { + index: starting_hour - i as u64, + up: rand::random(), + down: rand::random(), + price: rand::random(), + }); + } + output +} +#[cfg(test)] +// generates dummy payment data randomly +fn generate_payments(starting_hour: u64) -> VecDeque { + let mut num_to_generate: u8 = rand::random(); + while (num_to_generate as usize) < MINIMUM_NUMBER_OF_TRANSACTIONS_LARGE_STORAGE { + num_to_generate = rand::random(); + } + let our_id = random_identity(); + let neighbor_ids = get_neighbor_ids(); + let mut output = VecDeque::new(); + for i in 0..num_to_generate { + let num_payments_generate: u8 = rand::random(); + let mut payments = Vec::new(); + for _ in 0..num_payments_generate { + let neighbor_idx: u8 = rand::random(); + let amount: u128 = rand::random(); + let to_us: bool = rand::random(); + let (to, from) = if to_us { + (our_id, neighbor_ids[neighbor_idx as usize]) + } else { + (neighbor_ids[neighbor_idx as usize], our_id) + }; + let txid: u128 = rand::random(); + payments.push(FormattedPaymentTx { + to, + from, + amount: amount.into(), + txid: txid.to_string(), + }) + } + output.push_front(PaymentHour { + index: starting_hour - i as u64, + payments, + }); + } + output +} +#[cfg(test)] +// gets a list of pregenerated neighbor id +fn get_neighbor_ids() -> Vec { + let mut id = Vec::new(); + for _ in 0..256 { + id.push(random_identity()); + } + id +} +#[cfg(test)] +/// generates a random identity, never use in production, your money will be stolen +pub fn random_identity() -> Identity { + use clarity::PrivateKey; + + let secret: [u8; 32] = rand::random(); + let mut ip: [u8; 16] = [0; 16]; + ip.copy_from_slice(&secret[0..16]); + + // the starting location of the funds + let eth_key = PrivateKey::from_bytes(secret).unwrap(); + let eth_address = eth_key.to_address(); + + Identity { + mesh_ip: ip.into(), + eth_address, + wg_public_key: secret.into(), + nickname: None, + } +} #[cfg(test)] pub mod tests { - use super::MINIMUM_NUMBER_OF_TRANSACTIONS_LARGE_STORAGE; - use super::{get_current_hour, FormattedPaymentTx, PaymentHour, UsageHour, UsageTracker}; - use crate::usage_tracker::{self, IOError}; - use althea_types::Identity; - use clarity::PrivateKey; + use super::UsageTracker; + use crate::usage_tracker::{self, generate_dummy_usage_tracker, IOError}; use flate2::write::ZlibEncoder; use flate2::Compression; use settings::client::RitaClientSettings; use settings::{get_rita_common, set_rita_client, set_rita_common}; - use std::collections::VecDeque; use std::fs::File; use std::io::Write; impl UsageTracker { @@ -638,93 +727,4 @@ pub mod tests { assert!(dummy_usage_tracker == res2); assert!(res2 == res4); } - - // generates a nontrivial usage tracker struct for testing - fn generate_dummy_usage_tracker() -> UsageTracker { - let current_hour = get_current_hour().unwrap(); - UsageTracker { - last_save_hour: current_hour, - client_bandwidth: generate_bandwidth(current_hour), - relay_bandwidth: generate_bandwidth(current_hour), - exit_bandwidth: generate_bandwidth(current_hour), - payments: generate_payments(current_hour), - } - } - // generates dummy usage hour data randomly - fn generate_bandwidth(starting_hour: u64) -> VecDeque { - let num_to_generate: u16 = rand::random(); - let mut output = VecDeque::new(); - for i in 0..num_to_generate { - output.push_front(UsageHour { - index: starting_hour - i as u64, - up: rand::random(), - down: rand::random(), - price: rand::random(), - }); - } - output - } - // generates dummy payment data randomly - fn generate_payments(starting_hour: u64) -> VecDeque { - let mut num_to_generate: u8 = rand::random(); - while (num_to_generate as usize) < MINIMUM_NUMBER_OF_TRANSACTIONS_LARGE_STORAGE { - num_to_generate = rand::random(); - } - let our_id = random_identity(); - let neighbor_ids = get_neighbor_ids(); - let mut output = VecDeque::new(); - for i in 0..num_to_generate { - let num_payments_generate: u8 = rand::random(); - let mut payments = Vec::new(); - for _ in 0..num_payments_generate { - let neighbor_idx: u8 = rand::random(); - let amount: u128 = rand::random(); - let to_us: bool = rand::random(); - let (to, from) = if to_us { - (our_id, neighbor_ids[neighbor_idx as usize]) - } else { - (neighbor_ids[neighbor_idx as usize], our_id) - }; - let txid: u128 = rand::random(); - payments.push(FormattedPaymentTx { - to, - from, - amount: amount.into(), - txid: txid.to_string(), - }) - } - output.push_front(PaymentHour { - index: starting_hour - i as u64, - payments, - }); - } - output - } - - // gets a list of pregenerated neighbor id - fn get_neighbor_ids() -> Vec { - let mut id = Vec::new(); - for _ in 0..256 { - id.push(random_identity()); - } - id - } - - /// generates a random identity, never use in production, your money will be stolen - pub fn random_identity() -> Identity { - let secret: [u8; 32] = rand::random(); - let mut ip: [u8; 16] = [0; 16]; - ip.copy_from_slice(&secret[0..16]); - - // the starting location of the funds - let eth_key = PrivateKey::from_bytes(secret).unwrap(); - let eth_address = eth_key.to_address(); - - Identity { - mesh_ip: ip.into(), - eth_address, - wg_public_key: secret.into(), - nickname: None, - } - } } From e4fc5c62ff2dba856d3a066f35d16c6239b51fe5 Mon Sep 17 00:00:00 2001 From: Chiara Seim Date: Fri, 28 Jul 2023 01:41:34 -0700 Subject: [PATCH 3/5] Move usage tracker, operator update tests to their own files --- rita_client/src/operator_update/mod.rs | 322 ++++------------------- rita_client/src/operator_update/tests.rs | 260 ++++++++++++++++++ rita_common/src/payment_validator/mod.rs | 2 +- rita_common/src/usage_tracker/mod.rs | 183 +------------ rita_common/src/usage_tracker/tests.rs | 178 +++++++++++++ 5 files changed, 491 insertions(+), 454 deletions(-) create mode 100644 rita_client/src/operator_update/tests.rs create mode 100644 rita_common/src/usage_tracker/tests.rs diff --git a/rita_client/src/operator_update/mod.rs b/rita_client/src/operator_update/mod.rs index 7cd119ffa..595caec2d 100644 --- a/rita_client/src/operator_update/mod.rs +++ b/rita_client/src/operator_update/mod.rs @@ -1,6 +1,7 @@ //! This module is responsible for checking in with the operator server and getting updated local settings pub mod update_loop; pub mod updater; +pub mod tests; extern crate openssh_keys; use crate::dashboard::system_chain::set_system_blockchain; use crate::exit_manager::{get_client_pub_ipv6, get_selected_exit_ip}; @@ -172,42 +173,17 @@ pub async fn operator_update(ops_last_seen_usage_hour: Option) -> u64 { // We check that the difference is >1 because we leave a 1 hour buffer to prevent from sending over an incomplete current hour let send_hours = current_hour - last_seen_hour > 1; let mut usage_tracker_data: Option = None; - // if ops_last_seen_usage_hour is a None the thread has restarted and we are waiting for ops to tell us how much data we need to send, + // if ops_last_seen_usage_hour is a None the thread has restarted and we are waiting for ops to tell us how much data we need to send, // which will be populated with the next checkin cycle. we only send 1 month at a time if ops is requesting the full usage history. if send_hours && ops_last_seen_usage_hour.is_some() { - let mut usage_data_client = get_usage_data(Client); - let mut usage_data_relay = get_usage_data(Relay); - - // sort client and relay data in case they have come out of order somehow. This sorts by index increasing so newest data at back - usage_data_relay - .make_contiguous() - .sort_by(|a, b| a.index.cmp(&b.index)); - usage_data_client - .make_contiguous() - .sort_by(|a, b| a.index.cmp(&b.index)); - - // so this spits out the index for where last seen is, or the index of the next highest hour(returned in an error). - // we take the result -1 just in case, limit 0, since it's possible we might get back an index out of bounds at the back. - let client_last_seen_index = - match usage_data_client.binary_search_by(|x| x.index.cmp(&last_seen_hour)) { - Ok(p) => p, - Err(p) => p.saturating_sub(1), - }; - let relay_last_seen_index = - match usage_data_relay.binary_search_by(|x| x.index.cmp(&last_seen_hour)) { - Ok(p) => p, - Err(p) => p.saturating_sub(1), - }; - usage_data_client.drain(0..client_last_seen_index); - usage_data_relay.drain(0..relay_last_seen_index); - let new_client_data = iterate_month_usage_data(usage_data_client); - let new_relay_data = iterate_month_usage_data(usage_data_relay); - - usage_tracker_data = Some(UsageTracker { - last_save_hour: current_hour, - client_bandwidth: new_client_data, - relay_bandwidth: new_relay_data, - }); + let usage_data_client = get_usage_data(Client); + let usage_data_relay = get_usage_data(Relay); + usage_tracker_data = process_usage_data( + usage_data_client, + usage_data_relay, + last_seen_hour, + current_hour, + ) } let exit_con = Some(ExitConnection { @@ -695,244 +671,44 @@ pub fn iterate_month_usage_data(mut data: VecDeque) -> VecDeque &str { - let test_file = std::fs::OpenOptions::new() - .create(true) - .write(true) - .truncate(true) - .open(file_name); - let operator_key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL+UBakquB9rJ7tA2H+U43H/xNmpJiHpOkHGpVfFUXgP OPERATOR"; - writeln!(test_file.unwrap(), "{operator_key}").expect("setup failed to create temp file"); - operator_key - } - fn remove_temp_file(file_name: &str) -> Result<(), Error> { - fs::remove_file(file_name) - } - fn parse_keys(file_name: &str) -> Vec { - let mut temp = Vec::new(); - let expected = File::open(file_name).unwrap(); - let reader = BufReader::new(expected); - for key in reader.lines() { - temp.push(key.unwrap()); - } - temp - } - - #[test] - fn test_update_auth_keys() { - let added_keys = vec![String::from("ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHFgFrnSm9MFS1zpHHvwtfLohjqtsK13NyL41g/zyIhK test@hawk-net")]; - let removed_keys = vec![]; - let key_file: &str = "authorized_keys"; - let operator_key = touch_temp_file(key_file); - - let _update = update_authorized_keys(added_keys.clone(), removed_keys, key_file); - let result = parse_keys(key_file); - assert_eq!(result.len(), 2); - assert!(result.contains(&added_keys[0])); - assert!(result.contains(&operator_key.to_string())); - remove_temp_file(key_file).unwrap(); - } - - #[test] - fn test_update_auth_multiple_keys() { - let added_keys = vec![String::from("ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHFgFrnSm9MFS1zpHHvwtfLohjqtsK13NyL41g/zyIhK test@hawk-net"), - String::from("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDVF1POOko4/fTE/SowsURSmd+kAUFDX6VPNqICJjn8eQk8FZ15WsZKfBdrGXLhl2+pxM66VWMUVRQOq84iSRVSVPA3abz0H7JYIGzO8psTweSZfK1jwHfKDGQA1h1aPuspnPrX7dyS1qLZf3YeVUUi+BFsW2gSiMadbS4zal2c2F1AG5Ezr3zcRVA8y3D0bZxScPAEX74AeTFcimHpHFyzDtUsRpf0uSEXZcMFqX5j4ETKlIs28k1v8LlhHo91IQYHEtbyi/I1M0axbF4VCz5JlcbAs9LUEJg8Kx8LxzJSeSJbxVwyk5WiEDwVsCL2MAtaOcJ+/FhxLb0ZEELAHnXFNSqmY8QoHeSdHrGP7FmVCBjRb/AhVUHYvsG94rO3Ij4H5XsbsQbP3AHVKbvf387WB53Wga7VrBXvRC9aDisetdP9+4/seVIBbOIePotaiHoTyS1cJ+Jg0PkKy96enqwMt9T1Wt8jURB+s/A/bDGHkjB3dxomuGxux8dD6UNX54M= test-rsa@hawk-net"), - ]; - let removed_keys = vec![]; - let key_file: &str = "add_keys"; - - let operator_key = touch_temp_file(key_file); - - let _update = update_authorized_keys(added_keys.clone(), removed_keys, key_file); - let result = parse_keys(key_file); - assert!(result.contains(&added_keys[0])); - assert!(result.contains(&added_keys[1])); - assert!(result.contains(&operator_key.to_string())); - assert_eq!(result.len(), 3); - remove_temp_file(key_file).unwrap(); - } - - #[test] - fn test_update_auth_remove_keys() { - let added_keys = vec![]; - let removed_keys = vec![ - String::from("ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHFgFrnSm9MFS1zpHHvwtfLohjqtsK13NyL41g/zyIhK test@hawk-net"), - String::from("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDVF1POOko4/fTE/SowsURSmd+kAUFDX6VPNqICJjn8eQk8FZ15WsZKfBdrGXLhl2+pxM66VWMUVRQOq84iSRVSVPA3abz0H7JYIGzO8psTweSZfK1jwHfKDGQA1h1aPuspnPrX7dyS1qLZf3YeVUUi+BFsW2gSiMadbS4zal2c2F1AG5Ezr3zcRVA8y3D0bZxScPAEX74AeTFcimHpHFyzDtUsRpf0uSEXZcMFqX5j4ETKlIs28k1v8LlhHo91IQYHEtbyi/I1M0axbF4VCz5JlcbAs9LUEJg8Kx8LxzJSeSJbxVwyk5WiEDwVsCL2MAtaOcJ+/FhxLb0ZEELAHnXFNSqmY8QoHeSdHrGP7FmVCBjRb/AhVUHYvsG94rO3Ij4H5XsbsQbP3AHVKbvf387WB53Wga7VrBXvRC9aDisetdP9+4/seVIBbOIePotaiHoTyS1cJ+Jg0PkKy96enqwMt9T1Wt8jURB+s/A/bDGHkjB3dxomuGxux8dD6UNX54M= test-rsa@hawk-net"), - ]; - let key_file: &str = "auth_remove_keys"; - - let operator_key = touch_temp_file(key_file); - - let _update = update_authorized_keys(added_keys, removed_keys, key_file); - let result = parse_keys(key_file); - assert!(result.contains(&operator_key.to_string())); - - assert_eq!(result.len(), 1); - - remove_temp_file(key_file).unwrap(); - } - #[test] - fn test_removing_existing_key() { - let added_keys = vec![]; - let key_file: &str = "remove_keys"; - - let operator_key = touch_temp_file(key_file); - let removed_keys = vec![String::from(operator_key)]; - let _update = update_authorized_keys(added_keys, removed_keys.clone(), key_file); - - let result = parse_keys(key_file); - for item in result { - assert_eq!(item, removed_keys[0].to_string()); - } - - remove_temp_file(key_file).unwrap(); - } - #[test] - fn test_authorized_keys_create_if_missing() { - let added_keys = vec![ - String::from("ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHFgFrnSm9MFS1zpHHvwtfLohjqtsK13NyL41g/zyIhK test@hawk-net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDVF1POOko4/fTE/SowsURSmd+kAUFDX6VPNqICJjn8eQk8FZ15WsZKfBdrGXLhl2+pxM66VWMUVRQOq84iSRVSVPA3abz0H7JYIGzO8psTweSZfK1jwHfKDGQA1h1aPuspnPrX7dyS1qLZf3YeVUUi+BFsW2gSiMadbS4zal2c2F1AG5Ezr3zcRVA8y3D0bZxScPAEX74AeTFcimHpHFyzDtUsRpf0uSEXZcMFqX5j4ETKlIs28k1v8LlhHo91IQYHEtbyi/I1M0axbF4VCz5JlcbAs9LUEJg8Kx8LxzJSeSJbxVwyk5WiEDwVsCL2MAtaOcJ+/FhxLb0ZEELAHnXFNSqmY8QoHeSdHrGP7FmVCBjRb/AhVUHYvsG94rO3Ij4H5XsbsQbP3AHVKbvf387WB53Wga7VrBXvRC9aDisetdP9+4/seVIBbOIePotaiHoTyS1cJ+Jg0PkKy96enqwMt9T1Wt8jURB+s/A/bDGHkjB3dxomuGxux8dD6UNX54M= test-rsa@hawk-net"), - ]; - let removed_keys: Vec = vec![]; - let key_file: &str = "create_keys_file"; - let _update = update_authorized_keys(added_keys, removed_keys, key_file); - assert!(Path::new(key_file).exists()); - } - #[test] - fn test_usage_data_processing() { - // this tests the flow used in rita client's operator update loop used to process usage data sent up to ops - let dummy_usage_tracker = generate_dummy_usage_tracker_temp(); - let mut usage_data_client = dummy_usage_tracker.client_bandwidth.clone(); - // Test the sort function first: - // shuffle the data because it's currently ordered - usage_data_client - .make_contiguous() - .shuffle(&mut rand::thread_rng()); - println!( - "Sample of current shuffle is {} {} {}", - usage_data_client.get(0).unwrap().index, - usage_data_client.get(1).unwrap().index, - usage_data_client.get(2).unwrap().index - ); - // Sort by index increasing so newest data at back. Note that usage hours are stored to disk as - // the opposite order where newest are added to the front, so this is inefficient. - // Options here to optimize are either a/write my own binary sort again which will compare for the existing structure - // where the saved vecdeque is highest index to lowest index, b/rework usage tracker so that we save data lowest index - // to highest index, or c/the current solution(inefficient, as we will be fully reversing the whole vecdeque of each - // client and relay every checkin): sort the entire list in reverse order to use with the builtin bin search from vecdeque - - // this here sorts from lowest index to highest index, so we end with a vecdeque that we can use binary search on - usage_data_client - .make_contiguous() - .sort_by(|a, b| a.index.cmp(&b.index)); - println!( - "Sample of sorted list is {} {} {}", - usage_data_client.get(0).unwrap().index, - usage_data_client.get(1).unwrap().index, - usage_data_client.get(2).unwrap().index - ); - assert!( - usage_data_client.get(0).unwrap().index < usage_data_client.get(1).unwrap().index - && usage_data_client.get(1).unwrap().index - < usage_data_client.get(2).unwrap().index - ); - // Next test the binary search - // Case A: no gaps in data when searching through for the last seen hour - // for the purposes of this test we will look at the 10th entry in the list - let client_oldest = match dummy_usage_tracker.client_bandwidth.front() { - Some(x) => x.index, - None => 0, +/// Given our saved usage data and our last seen value, process the vecdeques so that we only +/// send to ops new data since last seen. +pub fn process_usage_data( + mut usage_data_client: VecDeque, + mut usage_data_relay: VecDeque, + last_seen_hour: u64, + current_hour: u64, +) -> Option { + // sort client and relay data in case they have come out of order somehow. This sorts by index increasing so newest data at back + usage_data_relay + .make_contiguous() + .sort_by(|a, b| a.index.cmp(&b.index)); + usage_data_client + .make_contiguous() + .sort_by(|a, b| a.index.cmp(&b.index)); + + // so this spits out the index for where last seen is, or the index of the next highest hour(returned in an error). + // we take the result -1 just in case, limit 0, since it's possible we might get back an index out of bounds at the back. + let client_last_seen_index = + match usage_data_client.binary_search_by(|x| x.index.cmp(&last_seen_hour)) { + Ok(p) => p, + Err(p) => p.saturating_sub(1), }; - let last_seen_hour = client_oldest - 10; - // so this spits out the index for where last seen is, or the index of the next highest hour(returned in an error). - // we take the result -1 just in case, limit 0, since it's possible we might get back an index out of bounds at the back. - let last_seen_position = - match usage_data_client.binary_search_by(|x| x.index.cmp(&last_seen_hour)) { - Ok(p) => p, - Err(p) => p.saturating_sub(1), - }; - assert!(usage_data_client.get(last_seen_position).unwrap().index == last_seen_hour); - // now for Case B: we have a gap in the data - usage_data_client.remove(last_seen_position); - let last_seen_position = - match usage_data_client.binary_search_by(|x| x.index.cmp(&last_seen_hour)) { - Ok(p) => p, - Err(p) => p.saturating_sub(1), - }; - // so we must retrieve the next earliest entry from where the last seen would be: - assert!(usage_data_client.get(last_seen_position).unwrap().index == last_seen_hour - 1); - - // now that we have the position of where to start (keep in mind these are sorted vecdeques, and we only need larger - // (later) indexes than the last seen.) we can drain any earlier entries up to the last seen, and send off the result - // to the iterate function - usage_data_client.drain(0..last_seen_position); - let new_client_data = iterate_month_usage_data(usage_data_client); - // finally, check that the returned list to be sent back to ops is sorted as intended: - assert!( - new_client_data.get(0).unwrap().index < new_client_data.get(1).unwrap().index - && new_client_data.get(1).unwrap().index < new_client_data.get(2).unwrap().index - ); - } - - // generates a usage tracker struct for testing without payments since these do not get send up in ops updates. - // using this while I can't get the import working... as a note the original function needs to be updated to push to back - // instead of front, as this generates data in the wrong order - fn generate_dummy_usage_tracker_temp() -> RCUsageTracker { - let current_hour = get_current_hour().unwrap(); - RCUsageTracker { - last_save_hour: current_hour, - client_bandwidth: generate_bandwidth(current_hour), - relay_bandwidth: generate_bandwidth(current_hour), - exit_bandwidth: generate_bandwidth(current_hour), - payments: VecDeque::new(), - } - } - #[cfg(test)] - // generates dummy usage hour data randomly - fn generate_bandwidth(starting_hour: u64) -> VecDeque { - use rand::{thread_rng, Rng}; - // 8760 is the max number of saved usage entries(1 year) - let num_to_generate: u16 = thread_rng().gen_range(50..8760); - let mut output = VecDeque::new(); - for i in 0..num_to_generate { - output.push_back(RCUsageHour { - index: starting_hour - i as u64, - up: rand::random(), - down: rand::random(), - price: rand::random(), - }); - } - output - } + let relay_last_seen_index = + match usage_data_relay.binary_search_by(|x| x.index.cmp(&last_seen_hour)) { + Ok(p) => p, + Err(p) => p.saturating_sub(1), + }; + // remove all data before the last seen index + usage_data_client.drain(0..client_last_seen_index); + usage_data_relay.drain(0..relay_last_seen_index); + let new_client_data = iterate_month_usage_data(usage_data_client); + let new_relay_data = iterate_month_usage_data(usage_data_relay); + + Some(UsageTracker { + last_save_hour: current_hour, + client_bandwidth: new_client_data, + relay_bandwidth: new_relay_data, + }) } + diff --git a/rita_client/src/operator_update/tests.rs b/rita_client/src/operator_update/tests.rs new file mode 100644 index 000000000..dbf11ea13 --- /dev/null +++ b/rita_client/src/operator_update/tests.rs @@ -0,0 +1,260 @@ +#[cfg(test)] +mod test { + use rand::seq::SliceRandom; + // TODO: Why is this import broken? + //use rita_common::usage_tracker::generate_dummy_usage_tracker; + use rita_common::usage_tracker::get_current_hour; + use rita_common::usage_tracker::UsageHour as RCUsageHour; + use rita_common::usage_tracker::UsageTracker as RCUsageTracker; + use serde_json::json; + use serde_json::Value; + use std::collections::VecDeque; + use std::fs::File; + use std::io::{BufRead, BufReader, Write}; + use std::{fs, io::Error, path::Path}; + + use crate::operator_update::contains_forbidden_key; + use crate::operator_update::process_usage_data; + use crate::operator_update::update_authorized_keys; + + const FORBIDDEN_MERGE_VALUES: [&str; 2] = ["test_key", "other_test_key"]; + + #[test] + fn test_contains_key() { + // exact key match should fail + let object = json!({"localization": { "wyre_enabled": true, "wyre_account_id": "test_key", "test_key": false}}); + if let Value::Object(map) = object { + assert!(contains_forbidden_key(map, &FORBIDDEN_MERGE_VALUES)); + } else { + panic!("Not a json map!"); + } + + // slightly modified key should not match + let object = json!({"localization": { "wyre_enabled": true, "wyre_account_id": "test_key", "test_key1": false}}); + if let Value::Object(map) = object { + assert!(!contains_forbidden_key(map, &FORBIDDEN_MERGE_VALUES)); + } else { + panic!("Not a json map!"); + } + } + fn touch_temp_file(file_name: &str) -> &str { + let test_file = std::fs::OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(file_name); + let operator_key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL+UBakquB9rJ7tA2H+U43H/xNmpJiHpOkHGpVfFUXgP OPERATOR"; + writeln!(test_file.unwrap(), "{operator_key}").expect("setup failed to create temp file"); + operator_key + } + fn remove_temp_file(file_name: &str) -> Result<(), Error> { + fs::remove_file(file_name) + } + fn parse_keys(file_name: &str) -> Vec { + let mut temp = Vec::new(); + let expected = File::open(file_name).unwrap(); + let reader = BufReader::new(expected); + for key in reader.lines() { + temp.push(key.unwrap()); + } + temp + } + + #[test] + fn test_update_auth_keys() { + let added_keys = vec![String::from("ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHFgFrnSm9MFS1zpHHvwtfLohjqtsK13NyL41g/zyIhK test@hawk-net")]; + let removed_keys = vec![]; + let key_file: &str = "authorized_keys"; + let operator_key = touch_temp_file(key_file); + + let _update = update_authorized_keys(added_keys.clone(), removed_keys, key_file); + let result = parse_keys(key_file); + assert_eq!(result.len(), 2); + assert!(result.contains(&added_keys[0])); + assert!(result.contains(&operator_key.to_string())); + remove_temp_file(key_file).unwrap(); + } + + #[test] + fn test_update_auth_multiple_keys() { + let added_keys = vec![String::from("ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHFgFrnSm9MFS1zpHHvwtfLohjqtsK13NyL41g/zyIhK test@hawk-net"), + String::from("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDVF1POOko4/fTE/SowsURSmd+kAUFDX6VPNqICJjn8eQk8FZ15WsZKfBdrGXLhl2+pxM66VWMUVRQOq84iSRVSVPA3abz0H7JYIGzO8psTweSZfK1jwHfKDGQA1h1aPuspnPrX7dyS1qLZf3YeVUUi+BFsW2gSiMadbS4zal2c2F1AG5Ezr3zcRVA8y3D0bZxScPAEX74AeTFcimHpHFyzDtUsRpf0uSEXZcMFqX5j4ETKlIs28k1v8LlhHo91IQYHEtbyi/I1M0axbF4VCz5JlcbAs9LUEJg8Kx8LxzJSeSJbxVwyk5WiEDwVsCL2MAtaOcJ+/FhxLb0ZEELAHnXFNSqmY8QoHeSdHrGP7FmVCBjRb/AhVUHYvsG94rO3Ij4H5XsbsQbP3AHVKbvf387WB53Wga7VrBXvRC9aDisetdP9+4/seVIBbOIePotaiHoTyS1cJ+Jg0PkKy96enqwMt9T1Wt8jURB+s/A/bDGHkjB3dxomuGxux8dD6UNX54M= test-rsa@hawk-net"), + ]; + let removed_keys = vec![]; + let key_file: &str = "add_keys"; + + let operator_key = touch_temp_file(key_file); + + let _update = update_authorized_keys(added_keys.clone(), removed_keys, key_file); + let result = parse_keys(key_file); + assert!(result.contains(&added_keys[0])); + assert!(result.contains(&added_keys[1])); + assert!(result.contains(&operator_key.to_string())); + assert_eq!(result.len(), 3); + remove_temp_file(key_file).unwrap(); + } + + #[test] + fn test_update_auth_remove_keys() { + let added_keys = vec![]; + let removed_keys = vec![ + String::from("ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHFgFrnSm9MFS1zpHHvwtfLohjqtsK13NyL41g/zyIhK test@hawk-net"), + String::from("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDVF1POOko4/fTE/SowsURSmd+kAUFDX6VPNqICJjn8eQk8FZ15WsZKfBdrGXLhl2+pxM66VWMUVRQOq84iSRVSVPA3abz0H7JYIGzO8psTweSZfK1jwHfKDGQA1h1aPuspnPrX7dyS1qLZf3YeVUUi+BFsW2gSiMadbS4zal2c2F1AG5Ezr3zcRVA8y3D0bZxScPAEX74AeTFcimHpHFyzDtUsRpf0uSEXZcMFqX5j4ETKlIs28k1v8LlhHo91IQYHEtbyi/I1M0axbF4VCz5JlcbAs9LUEJg8Kx8LxzJSeSJbxVwyk5WiEDwVsCL2MAtaOcJ+/FhxLb0ZEELAHnXFNSqmY8QoHeSdHrGP7FmVCBjRb/AhVUHYvsG94rO3Ij4H5XsbsQbP3AHVKbvf387WB53Wga7VrBXvRC9aDisetdP9+4/seVIBbOIePotaiHoTyS1cJ+Jg0PkKy96enqwMt9T1Wt8jURB+s/A/bDGHkjB3dxomuGxux8dD6UNX54M= test-rsa@hawk-net"), + ]; + let key_file: &str = "auth_remove_keys"; + + let operator_key = touch_temp_file(key_file); + + let _update = update_authorized_keys(added_keys, removed_keys, key_file); + let result = parse_keys(key_file); + assert!(result.contains(&operator_key.to_string())); + + assert_eq!(result.len(), 1); + + remove_temp_file(key_file).unwrap(); + } + #[test] + fn test_removing_existing_key() { + let added_keys = vec![]; + let key_file: &str = "remove_keys"; + + let operator_key = touch_temp_file(key_file); + let removed_keys = vec![String::from(operator_key)]; + let _update = update_authorized_keys(added_keys, removed_keys.clone(), key_file); + + let result = parse_keys(key_file); + for item in result { + assert_eq!(item, removed_keys[0].to_string()); + } + + remove_temp_file(key_file).unwrap(); + } + #[test] + fn test_authorized_keys_create_if_missing() { + let added_keys = vec![ + String::from("ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHFgFrnSm9MFS1zpHHvwtfLohjqtsK13NyL41g/zyIhK test@hawk-net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDVF1POOko4/fTE/SowsURSmd+kAUFDX6VPNqICJjn8eQk8FZ15WsZKfBdrGXLhl2+pxM66VWMUVRQOq84iSRVSVPA3abz0H7JYIGzO8psTweSZfK1jwHfKDGQA1h1aPuspnPrX7dyS1qLZf3YeVUUi+BFsW2gSiMadbS4zal2c2F1AG5Ezr3zcRVA8y3D0bZxScPAEX74AeTFcimHpHFyzDtUsRpf0uSEXZcMFqX5j4ETKlIs28k1v8LlhHo91IQYHEtbyi/I1M0axbF4VCz5JlcbAs9LUEJg8Kx8LxzJSeSJbxVwyk5WiEDwVsCL2MAtaOcJ+/FhxLb0ZEELAHnXFNSqmY8QoHeSdHrGP7FmVCBjRb/AhVUHYvsG94rO3Ij4H5XsbsQbP3AHVKbvf387WB53Wga7VrBXvRC9aDisetdP9+4/seVIBbOIePotaiHoTyS1cJ+Jg0PkKy96enqwMt9T1Wt8jURB+s/A/bDGHkjB3dxomuGxux8dD6UNX54M= test-rsa@hawk-net"), + ]; + let removed_keys: Vec = vec![]; + let key_file: &str = "create_keys_file"; + let _update = update_authorized_keys(added_keys, removed_keys, key_file); + assert!(Path::new(key_file).exists()); + } + #[test] + fn test_usage_data_processing() { + // this tests the flow used in rita client's operator update loop used to process usage data sent up to ops + let dummy_usage_tracker = generate_dummy_usage_tracker_temp(); + let mut usage_data_client = dummy_usage_tracker.client_bandwidth.clone(); + let mut usage_data_relay = dummy_usage_tracker.relay_bandwidth; + let mut unshuffled_client = usage_data_client.clone(); + let mut unshuffled_relay = usage_data_relay.clone(); + + // Test the sort function first: + // shuffle the data because it's currently ordered + usage_data_client + .make_contiguous() + .shuffle(&mut rand::thread_rng()); + println!( + "Sample of current shuffle is {} {} {}", + usage_data_client.get(0).unwrap().index, + usage_data_client.get(1).unwrap().index, + usage_data_client.get(2).unwrap().index + ); + usage_data_relay + .make_contiguous() + .shuffle(&mut rand::thread_rng()); + // The processing function sorts these lowest index to highest. Note that usage hours are stored to disk as + // the opposite order where newest are added to the front, so this is inefficient. + // Options here to optimize are either a/write my own binary sort again which will compare for the existing structure + // where the saved vecdeque is highest index to lowest index, b/rework usage tracker so that we save data lowest index + // to highest index, or c/the current solution(inefficient, as we will be fully reversing the whole vecdeque of each + // client and relay at least once per hour on rollover): sort the entire list in reverse order to use with the builtin + // bin search from vecdeque + + // for this purpose our last seen will be start hour - 10. + let current_hour = get_current_hour().unwrap(); + let last_seen_hour = current_hour - 10; + let res_usage = process_usage_data( + usage_data_client.clone(), + usage_data_relay.clone(), + last_seen_hour, + current_hour, + ) + .unwrap(); + let res_usage_client = res_usage.client_bandwidth; + let res_usage_relay = res_usage.relay_bandwidth; + + // check that the sorting in process_usage_data is correct after shuffling and sending it through + assert!( + res_usage_relay.get(0).unwrap().index < res_usage_relay.get(1).unwrap().index + && res_usage_relay.get(1).unwrap().index < res_usage_relay.get(2).unwrap().index + ); + assert!( + res_usage_client.get(0).unwrap().index < res_usage_client.get(1).unwrap().index + && res_usage_client.get(1).unwrap().index < res_usage_client.get(2).unwrap().index + ); + // check that the binary searching is correct: we did not remove any entries from usage client; so we should have started exactly + // from the last seen hour. we removed the last seen from usage relay, so we should expect to see our earliest hour as one fewer + assert!(res_usage_client.get(0).unwrap().index == last_seen_hour); + assert!(res_usage_relay.get(0).unwrap().index == last_seen_hour); + + // now check that same thing, but in case we have a gap in the data. we'll remove the entry for the last_seen_hour from usage_data_relay + // to make sure we are successfully returning the next earliest hour (in our case, the last seen -1). we use res_usage client and relay + // because to remove the correct entry it needs to be presorted (if we've gotten here it's guaranteed.) + // we successfully search for the entry or return the next one down. + unshuffled_client.remove(unshuffled_client.len() - 11); + unshuffled_relay.remove(unshuffled_relay.len() - 11); + // so the index of our last seen hour if we say last seen is current - 10... will be at len - 11. + let res_usage = process_usage_data( + unshuffled_client, + unshuffled_relay, + last_seen_hour, + current_hour, + ) + .unwrap(); + let res_usage_client = res_usage.client_bandwidth; + let res_usage_relay = res_usage.relay_bandwidth; + // after processing we should start at last seen - 1. + println!( + "{:?} last seen {:?}", + res_usage_relay.get(0).unwrap().index, + last_seen_hour + ); + assert!(res_usage_relay.get(0).unwrap().index == last_seen_hour - 1); + assert!(res_usage_client.get(0).unwrap().index == last_seen_hour - 1); + + // check that our iteration function does indeed stop at a month of data: + assert!(res_usage_client.len() <= 730); + assert!(res_usage_relay.len() <= 730); + } + + // generates a usage tracker struct for testing without payments since these do not get sent up in ops updates. + // using this while I can't get the import working... as a note the original function needs to be updated to push to back + // instead of front, as this generates data in the wrong order + fn generate_dummy_usage_tracker_temp() -> RCUsageTracker { + let current_hour = get_current_hour().unwrap(); + RCUsageTracker { + last_save_hour: current_hour, + client_bandwidth: generate_bandwidth(current_hour), + relay_bandwidth: generate_bandwidth(current_hour), + exit_bandwidth: VecDeque::new(), + payments: VecDeque::new(), + } + } + #[cfg(test)] + // generates dummy usage hour data randomly + fn generate_bandwidth(starting_hour: u64) -> VecDeque { + use rand::{thread_rng, Rng}; + // 8760 is the max number of saved usage entries(1 year) + let num_to_generate: u16 = thread_rng().gen_range(50..8760); + let mut output = VecDeque::new(); + for i in 0..num_to_generate { + output.push_front(RCUsageHour { + index: starting_hour - i as u64, + up: rand::random(), + down: rand::random(), + price: rand::random(), + }); + } + output + } +} diff --git a/rita_common/src/payment_validator/mod.rs b/rita_common/src/payment_validator/mod.rs index cf2d4bbd5..6171fbd69 100644 --- a/rita_common/src/payment_validator/mod.rs +++ b/rita_common/src/payment_validator/mod.rs @@ -904,7 +904,7 @@ mod tests { use cosmos_sdk_proto_althea::cosmos::bank::v1beta1::MsgSend; use deep_space::utils::decode_any; - use crate::usage_tracker::random_identity; + use crate::usage_tracker::tests::test::random_identity; use super::*; diff --git a/rita_common/src/usage_tracker/mod.rs b/rita_common/src/usage_tracker/mod.rs index 5eab89c61..5353a5458 100644 --- a/rita_common/src/usage_tracker/mod.rs +++ b/rita_common/src/usage_tracker/mod.rs @@ -32,6 +32,8 @@ use std::time::SystemTime; use std::time::UNIX_EPOCH; use std::usize; +pub mod tests; + /// one year worth of usage storage const MAX_USAGE_ENTRIES: usize = 8_760; /// The number of tx's we store in our history to show @@ -548,183 +550,4 @@ pub fn get_payments_data() -> VecDeque { /// a reboot or restart only, most common form of shutdown is power being pulled pub fn save_usage_on_shutdown() { save_usage_to_disk() -} -#[cfg(test)] -// generates a nontrivial usage tracker struct for testing -pub fn generate_dummy_usage_tracker() -> UsageTracker { - let current_hour = get_current_hour().unwrap(); - UsageTracker { - last_save_hour: current_hour, - client_bandwidth: generate_bandwidth(current_hour), - relay_bandwidth: generate_bandwidth(current_hour), - exit_bandwidth: generate_bandwidth(current_hour), - payments: generate_payments(current_hour), - } -} -#[cfg(test)] -// generates dummy usage hour data randomly -fn generate_bandwidth(starting_hour: u64) -> VecDeque { - let num_to_generate: u16 = rand::random(); - let mut output = VecDeque::new(); - for i in 0..num_to_generate { - output.push_front(UsageHour { - index: starting_hour - i as u64, - up: rand::random(), - down: rand::random(), - price: rand::random(), - }); - } - output -} -#[cfg(test)] -// generates dummy payment data randomly -fn generate_payments(starting_hour: u64) -> VecDeque { - let mut num_to_generate: u8 = rand::random(); - while (num_to_generate as usize) < MINIMUM_NUMBER_OF_TRANSACTIONS_LARGE_STORAGE { - num_to_generate = rand::random(); - } - let our_id = random_identity(); - let neighbor_ids = get_neighbor_ids(); - let mut output = VecDeque::new(); - for i in 0..num_to_generate { - let num_payments_generate: u8 = rand::random(); - let mut payments = Vec::new(); - for _ in 0..num_payments_generate { - let neighbor_idx: u8 = rand::random(); - let amount: u128 = rand::random(); - let to_us: bool = rand::random(); - let (to, from) = if to_us { - (our_id, neighbor_ids[neighbor_idx as usize]) - } else { - (neighbor_ids[neighbor_idx as usize], our_id) - }; - let txid: u128 = rand::random(); - payments.push(FormattedPaymentTx { - to, - from, - amount: amount.into(), - txid: txid.to_string(), - }) - } - output.push_front(PaymentHour { - index: starting_hour - i as u64, - payments, - }); - } - output -} -#[cfg(test)] -// gets a list of pregenerated neighbor id -fn get_neighbor_ids() -> Vec { - let mut id = Vec::new(); - for _ in 0..256 { - id.push(random_identity()); - } - id -} -#[cfg(test)] -/// generates a random identity, never use in production, your money will be stolen -pub fn random_identity() -> Identity { - use clarity::PrivateKey; - - let secret: [u8; 32] = rand::random(); - let mut ip: [u8; 16] = [0; 16]; - ip.copy_from_slice(&secret[0..16]); - - // the starting location of the funds - let eth_key = PrivateKey::from_bytes(secret).unwrap(); - let eth_address = eth_key.to_address(); - - Identity { - mesh_ip: ip.into(), - eth_address, - wg_public_key: secret.into(), - nickname: None, - } -} - -#[cfg(test)] -pub mod tests { - - use super::UsageTracker; - use crate::usage_tracker::{self, generate_dummy_usage_tracker, IOError}; - use flate2::write::ZlibEncoder; - use flate2::Compression; - use settings::client::RitaClientSettings; - use settings::{get_rita_common, set_rita_client, set_rita_common}; - use std::fs::File; - use std::io::Write; - impl UsageTracker { - // previous implementation of save which uses serde_json to serialize - fn save2(&self) -> Result<(), IOError> { - let serialized = serde_json::to_vec(self)?; - let mut file = File::create(settings::get_rita_common().network.usage_tracker_file)?; - let buffer: Vec = Vec::new(); - let mut encoder = ZlibEncoder::new(buffer, Compression::default()); - encoder.write_all(&serialized)?; - let compressed_bytes = encoder.finish()?; - file.write_all(&compressed_bytes) - } - } - - #[test] - fn save_usage_tracker_bincode() { - let rset = RitaClientSettings::new("../settings/test.toml").unwrap(); - set_rita_client(rset); - let mut newrc = get_rita_common(); - newrc.network.usage_tracker_file = "/tmp/usage_tracker.bincode".to_string(); - set_rita_common(newrc); - - let mut dummy_usage_tracker = generate_dummy_usage_tracker(); - let res = dummy_usage_tracker.save(); // saving to bincode with the new method - info!("Saving test data: {:?}", res); - - let res2 = usage_tracker::UsageTracker::load_from_disk(); - info!("Loading test data: {:?}", res2); - - assert_eq!(dummy_usage_tracker, res2); - } - - #[test] - fn convert_legacy_usage_tracker() { - //env_logger::init(); - // make a dummy usage tracker instance - // save it as gzipped json ( pull code from the git history that you deleted and put it in this test) - // makes sure the file exists - // deserialize the file using the upgrade function - // make sure it's equal to the original dummy we made - let rset = RitaClientSettings::new("../settings/test.toml").unwrap(); - set_rita_client(rset); - let mut newrc = get_rita_common(); - newrc.network.usage_tracker_file = "/tmp/usage_tracker.json".to_string(); - set_rita_common(newrc); - info!("Generating large usage tracker history"); - let dummy_usage_tracker = generate_dummy_usage_tracker(); - - info!("Saving test data as json"); - dummy_usage_tracker.save2().unwrap(); - - // using load_from_disk() with usage_tracker_file set to a .json writes bincode - // serialized data to a .json extended file, but because load_from_disk() deletes - // the .json file, this test ends with no file left. - info!("Loading test data from json"); - let mut res2 = usage_tracker::UsageTracker::load_from_disk(); - - // setting the usage_tracker_file to .bincode, which is what this upgrade expects - let mut newrc2 = get_rita_common(); - newrc2.network.usage_tracker_file = "/tmp/usage_tracker.bincode".to_string(); - set_rita_common(newrc2); - - // Saving res2 with the new save() and updated usage_tracker_file in order to end with - // a .bincode file from the loaded json data saved to res2. - res2.save().unwrap(); - info!("Saving test data as bincode"); - let res4 = usage_tracker::UsageTracker::load_from_disk(); - info!("Loading test data from bincode"); - - // use == to avoid printing out the compared data - // when it failed, as it is enormous - assert!(dummy_usage_tracker == res2); - assert!(res2 == res4); - } -} +} \ No newline at end of file diff --git a/rita_common/src/usage_tracker/tests.rs b/rita_common/src/usage_tracker/tests.rs new file mode 100644 index 000000000..841153c63 --- /dev/null +++ b/rita_common/src/usage_tracker/tests.rs @@ -0,0 +1,178 @@ +#[cfg(test)] +#[warn(clippy::module_inception)] +pub mod test { + + use crate::usage_tracker::{ + self, get_current_hour, FormattedPaymentTx, IOError, PaymentHour, UsageHour, UsageTracker, + MINIMUM_NUMBER_OF_TRANSACTIONS_LARGE_STORAGE, + }; + use althea_types::Identity; + use flate2::write::ZlibEncoder; + use flate2::Compression; + use settings::client::RitaClientSettings; + use settings::{get_rita_common, set_rita_client, set_rita_common}; + use std::collections::VecDeque; + use std::fs::File; + use std::io::Write; + impl UsageTracker { + // previous implementation of save which uses serde_json to serialize + fn save2(&self) -> Result<(), IOError> { + let serialized = serde_json::to_vec(self)?; + let mut file = File::create(settings::get_rita_common().network.usage_tracker_file)?; + let buffer: Vec = Vec::new(); + let mut encoder = ZlibEncoder::new(buffer, Compression::default()); + encoder.write_all(&serialized)?; + let compressed_bytes = encoder.finish()?; + file.write_all(&compressed_bytes) + } + } + + #[test] + fn save_usage_tracker_bincode() { + let rset = RitaClientSettings::new("../settings/test.toml").unwrap(); + set_rita_client(rset); + let mut newrc = get_rita_common(); + newrc.network.usage_tracker_file = "/tmp/usage_tracker.bincode".to_string(); + set_rita_common(newrc); + + let mut dummy_usage_tracker = generate_dummy_usage_tracker(); + let res = dummy_usage_tracker.save(); // saving to bincode with the new method + info!("Saving test data: {:?}", res); + + let res2 = usage_tracker::UsageTracker::load_from_disk(); + info!("Loading test data: {:?}", res2); + + assert_eq!(dummy_usage_tracker, res2); + } + + #[test] + fn convert_legacy_usage_tracker() { + //env_logger::init(); + // make a dummy usage tracker instance + // save it as gzipped json ( pull code from the git history that you deleted and put it in this test) + // makes sure the file exists + // deserialize the file using the upgrade function + // make sure it's equal to the original dummy we made + let rset = RitaClientSettings::new("../settings/test.toml").unwrap(); + set_rita_client(rset); + let mut newrc = get_rita_common(); + newrc.network.usage_tracker_file = "/tmp/usage_tracker.json".to_string(); + set_rita_common(newrc); + info!("Generating large usage tracker history"); + let dummy_usage_tracker = generate_dummy_usage_tracker(); + + info!("Saving test data as json"); + dummy_usage_tracker.save2().unwrap(); + + // using load_from_disk() with usage_tracker_file set to a .json writes bincode + // serialized data to a .json extended file, but because load_from_disk() deletes + // the .json file, this test ends with no file left. + info!("Loading test data from json"); + let mut res2 = usage_tracker::UsageTracker::load_from_disk(); + + // setting the usage_tracker_file to .bincode, which is what this upgrade expects + let mut newrc2 = get_rita_common(); + newrc2.network.usage_tracker_file = "/tmp/usage_tracker.bincode".to_string(); + set_rita_common(newrc2); + + // Saving res2 with the new save() and updated usage_tracker_file in order to end with + // a .bincode file from the loaded json data saved to res2. + res2.save().unwrap(); + info!("Saving test data as bincode"); + let res4 = usage_tracker::UsageTracker::load_from_disk(); + info!("Loading test data from bincode"); + + // use == to avoid printing out the compared data + // when it failed, as it is enormous + assert!(dummy_usage_tracker == res2); + assert!(res2 == res4); + } + // generates a nontrivial usage tracker struct for testing + pub fn generate_dummy_usage_tracker() -> UsageTracker { + let current_hour = get_current_hour().unwrap(); + UsageTracker { + last_save_hour: current_hour, + client_bandwidth: generate_bandwidth(current_hour), + relay_bandwidth: generate_bandwidth(current_hour), + exit_bandwidth: generate_bandwidth(current_hour), + payments: generate_payments(current_hour), + } + } + // generates dummy usage hour data randomly + fn generate_bandwidth(starting_hour: u64) -> VecDeque { + let num_to_generate: u16 = rand::random(); + let mut output = VecDeque::new(); + for i in 0..num_to_generate { + output.push_front(UsageHour { + index: starting_hour - i as u64, + up: rand::random(), + down: rand::random(), + price: rand::random(), + }); + } + output + } + // generates dummy payment data randomly + fn generate_payments(starting_hour: u64) -> VecDeque { + let mut num_to_generate: u8 = rand::random(); + while (num_to_generate as usize) < MINIMUM_NUMBER_OF_TRANSACTIONS_LARGE_STORAGE { + num_to_generate = rand::random(); + } + let our_id = random_identity(); + let neighbor_ids = get_neighbor_ids(); + let mut output = VecDeque::new(); + for i in 0..num_to_generate { + let num_payments_generate: u8 = rand::random(); + let mut payments = Vec::new(); + for _ in 0..num_payments_generate { + let neighbor_idx: u8 = rand::random(); + let amount: u128 = rand::random(); + let to_us: bool = rand::random(); + let (to, from) = if to_us { + (our_id, neighbor_ids[neighbor_idx as usize]) + } else { + (neighbor_ids[neighbor_idx as usize], our_id) + }; + let txid: u128 = rand::random(); + payments.push(FormattedPaymentTx { + to, + from, + amount: amount.into(), + txid: txid.to_string(), + }) + } + output.push_front(PaymentHour { + index: starting_hour - i as u64, + payments, + }); + } + output + } + // gets a list of pregenerated neighbor id + fn get_neighbor_ids() -> Vec { + let mut id = Vec::new(); + for _ in 0..256 { + id.push(random_identity()); + } + id + } + /// generates a random identity, never use in production, your money will be stolen + pub fn random_identity() -> Identity { + use clarity::PrivateKey; + + let secret: [u8; 32] = rand::random(); + let mut ip: [u8; 16] = [0; 16]; + ip.copy_from_slice(&secret[0..16]); + + // the starting location of the funds + let eth_key = PrivateKey::from_bytes(secret).unwrap(); + let eth_address = eth_key.to_address(); + + Identity { + mesh_ip: ip.into(), + eth_address, + wg_public_key: secret.into(), + nickname: None, + } + } +} From 2eafe8f378e4c7b776436c872e3fdabe1bf7b62e Mon Sep 17 00:00:00 2001 From: Chiara Seim Date: Fri, 28 Jul 2023 01:55:46 -0700 Subject: [PATCH 4/5] Rustfmt --- rita_client/src/operator_update/mod.rs | 3 +-- rita_common/src/usage_tracker/mod.rs | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/rita_client/src/operator_update/mod.rs b/rita_client/src/operator_update/mod.rs index 595caec2d..7137d2eac 100644 --- a/rita_client/src/operator_update/mod.rs +++ b/rita_client/src/operator_update/mod.rs @@ -1,7 +1,7 @@ //! This module is responsible for checking in with the operator server and getting updated local settings +pub mod tests; pub mod update_loop; pub mod updater; -pub mod tests; extern crate openssh_keys; use crate::dashboard::system_chain::set_system_blockchain; use crate::exit_manager::{get_client_pub_ipv6, get_selected_exit_ip}; @@ -711,4 +711,3 @@ pub fn process_usage_data( relay_bandwidth: new_relay_data, }) } - diff --git a/rita_common/src/usage_tracker/mod.rs b/rita_common/src/usage_tracker/mod.rs index 5353a5458..387742e15 100644 --- a/rita_common/src/usage_tracker/mod.rs +++ b/rita_common/src/usage_tracker/mod.rs @@ -550,4 +550,4 @@ pub fn get_payments_data() -> VecDeque { /// a reboot or restart only, most common form of shutdown is power being pulled pub fn save_usage_on_shutdown() { save_usage_to_disk() -} \ No newline at end of file +} From 5c5e012713d0d637d9c48ce24dfd3e9c651dc8c6 Mon Sep 17 00:00:00 2001 From: Chiara Seim Date: Fri, 28 Jul 2023 16:07:27 -0700 Subject: [PATCH 5/5] Make usage tracker test data generation public --- rita_client/src/operator_update/tests.rs | 39 ++---------------------- rita_common/src/usage_tracker/mod.rs | 2 +- rita_common/src/usage_tracker/tests.rs | 20 +++++++----- 3 files changed, 16 insertions(+), 45 deletions(-) diff --git a/rita_client/src/operator_update/tests.rs b/rita_client/src/operator_update/tests.rs index dbf11ea13..b53723b86 100644 --- a/rita_client/src/operator_update/tests.rs +++ b/rita_client/src/operator_update/tests.rs @@ -1,14 +1,10 @@ #[cfg(test)] mod test { use rand::seq::SliceRandom; - // TODO: Why is this import broken? - //use rita_common::usage_tracker::generate_dummy_usage_tracker; use rita_common::usage_tracker::get_current_hour; - use rita_common::usage_tracker::UsageHour as RCUsageHour; - use rita_common::usage_tracker::UsageTracker as RCUsageTracker; + use rita_common::usage_tracker::tests::test::generate_dummy_usage_tracker; use serde_json::json; use serde_json::Value; - use std::collections::VecDeque; use std::fs::File; use std::io::{BufRead, BufReader, Write}; use std::{fs, io::Error, path::Path}; @@ -142,7 +138,7 @@ mod test { #[test] fn test_usage_data_processing() { // this tests the flow used in rita client's operator update loop used to process usage data sent up to ops - let dummy_usage_tracker = generate_dummy_usage_tracker_temp(); + let dummy_usage_tracker = generate_dummy_usage_tracker(); let mut usage_data_client = dummy_usage_tracker.client_bandwidth.clone(); let mut usage_data_relay = dummy_usage_tracker.relay_bandwidth; let mut unshuffled_client = usage_data_client.clone(); @@ -226,35 +222,4 @@ mod test { assert!(res_usage_client.len() <= 730); assert!(res_usage_relay.len() <= 730); } - - // generates a usage tracker struct for testing without payments since these do not get sent up in ops updates. - // using this while I can't get the import working... as a note the original function needs to be updated to push to back - // instead of front, as this generates data in the wrong order - fn generate_dummy_usage_tracker_temp() -> RCUsageTracker { - let current_hour = get_current_hour().unwrap(); - RCUsageTracker { - last_save_hour: current_hour, - client_bandwidth: generate_bandwidth(current_hour), - relay_bandwidth: generate_bandwidth(current_hour), - exit_bandwidth: VecDeque::new(), - payments: VecDeque::new(), - } - } - #[cfg(test)] - // generates dummy usage hour data randomly - fn generate_bandwidth(starting_hour: u64) -> VecDeque { - use rand::{thread_rng, Rng}; - // 8760 is the max number of saved usage entries(1 year) - let num_to_generate: u16 = thread_rng().gen_range(50..8760); - let mut output = VecDeque::new(); - for i in 0..num_to_generate { - output.push_front(RCUsageHour { - index: starting_hour - i as u64, - up: rand::random(), - down: rand::random(), - price: rand::random(), - }); - } - output - } } diff --git a/rita_common/src/usage_tracker/mod.rs b/rita_common/src/usage_tracker/mod.rs index 387742e15..d442c3dc4 100644 --- a/rita_common/src/usage_tracker/mod.rs +++ b/rita_common/src/usage_tracker/mod.rs @@ -35,7 +35,7 @@ use std::usize; pub mod tests; /// one year worth of usage storage -const MAX_USAGE_ENTRIES: usize = 8_760; +pub const MAX_USAGE_ENTRIES: usize = 8_760; /// The number of tx's we store in our history to show /// prices, this data is larger than usage by a large margin /// so we can store less, it's also less predictable for what values diff --git a/rita_common/src/usage_tracker/tests.rs b/rita_common/src/usage_tracker/tests.rs index 841153c63..8b911a18c 100644 --- a/rita_common/src/usage_tracker/tests.rs +++ b/rita_common/src/usage_tracker/tests.rs @@ -1,19 +1,22 @@ -#[cfg(test)] #[warn(clippy::module_inception)] +#[allow(unused)] pub mod test { use crate::usage_tracker::{ - self, get_current_hour, FormattedPaymentTx, IOError, PaymentHour, UsageHour, UsageTracker, - MINIMUM_NUMBER_OF_TRANSACTIONS_LARGE_STORAGE, + get_current_hour, FormattedPaymentTx, IOError, PaymentHour, UsageHour, UsageTracker, + MAX_USAGE_ENTRIES, MINIMUM_NUMBER_OF_TRANSACTIONS_LARGE_STORAGE, }; use althea_types::Identity; use flate2::write::ZlibEncoder; use flate2::Compression; + use rand::{thread_rng, Rng}; use settings::client::RitaClientSettings; use settings::{get_rita_common, set_rita_client, set_rita_common}; use std::collections::VecDeque; + use std::convert::TryInto; use std::fs::File; use std::io::Write; + #[cfg(test)] impl UsageTracker { // previous implementation of save which uses serde_json to serialize fn save2(&self) -> Result<(), IOError> { @@ -39,7 +42,7 @@ pub mod test { let res = dummy_usage_tracker.save(); // saving to bincode with the new method info!("Saving test data: {:?}", res); - let res2 = usage_tracker::UsageTracker::load_from_disk(); + let res2 = UsageTracker::load_from_disk(); info!("Loading test data: {:?}", res2); assert_eq!(dummy_usage_tracker, res2); @@ -68,7 +71,7 @@ pub mod test { // serialized data to a .json extended file, but because load_from_disk() deletes // the .json file, this test ends with no file left. info!("Loading test data from json"); - let mut res2 = usage_tracker::UsageTracker::load_from_disk(); + let mut res2 = UsageTracker::load_from_disk(); // setting the usage_tracker_file to .bincode, which is what this upgrade expects let mut newrc2 = get_rita_common(); @@ -79,7 +82,7 @@ pub mod test { // a .bincode file from the loaded json data saved to res2. res2.save().unwrap(); info!("Saving test data as bincode"); - let res4 = usage_tracker::UsageTracker::load_from_disk(); + let res4 = UsageTracker::load_from_disk(); info!("Loading test data from bincode"); // use == to avoid printing out the compared data @@ -100,7 +103,10 @@ pub mod test { } // generates dummy usage hour data randomly fn generate_bandwidth(starting_hour: u64) -> VecDeque { - let num_to_generate: u16 = rand::random(); + let num_to_generate: u16 = thread_rng() + .gen_range(50..MAX_USAGE_ENTRIES) + .try_into() + .unwrap(); let mut output = VecDeque::new(); for i in 0..num_to_generate { output.push_front(UsageHour {