From aa0909fa1ad0ad565de7bcf75c447377c3adb0dd Mon Sep 17 00:00:00 2001 From: onur-ozkan Date: Thu, 28 Mar 2024 16:27:49 +0300 Subject: [PATCH 001/186] WIP: resolve db paths by account keys Signed-off-by: onur-ozkan --- mm2src/coins/lp_coins.rs | 2 + mm2src/mm2_core/src/mm_ctx.rs | 8 ++- mm2src/mm2_main/src/lp_swap.rs | 8 ++- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 29 ++++++--- mm2src/mm2_main/src/lp_swap/saved_swap.rs | 76 ++++++++++++++--------- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 30 ++++++--- 6 files changed, 106 insertions(+), 47 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 5179a5db80..0de2f4afd4 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -2966,6 +2966,8 @@ pub trait MmCoin: /// Loop collecting coin transaction history and saving it to local DB fn process_history_loop(&self, ctx: MmArc) -> Box + Send>; + fn account_key(&self, ctx: &MmArc) -> &str { todo!() } + /// Path to tx history file #[cfg(not(target_arch = "wasm32"))] fn tx_history_path(&self, ctx: &MmArc) -> PathBuf { diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 04de2e4d87..6972e1332e 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -287,7 +287,13 @@ impl MmCtx { /// /// No checks in this method, the paths should be checked in the `fn fix_directories` instead. #[cfg(not(target_arch = "wasm32"))] - pub fn dbdir(&self) -> PathBuf { path_to_dbdir(self.conf["dbdir"].as_str(), self.rmd160()) } + fn dbdir(&self) -> PathBuf { path_to_dbdir(self.conf["dbdir"].as_str(), self.rmd160()) } + + #[cfg(not(target_arch = "wasm32"))] + pub fn db_root(&self) -> PathBuf { + const DEFAULT_ROOT: &str = "DB"; + self.conf["dbdir"].as_str().unwrap_or(DEFAULT_ROOT).into() + } /// MM shared database path. /// Defaults to a relative "DB". diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index c4b7a405a0..2d2ce9ad99 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -997,10 +997,14 @@ pub struct TransactionIdentifier { } #[cfg(not(target_arch = "wasm32"))] -pub fn my_swaps_dir(ctx: &MmArc) -> PathBuf { ctx.dbdir().join("SWAPS").join("MY") } +pub fn my_swaps_dir(ctx: &MmArc, account_key: &str) -> PathBuf { + ctx.db_root().join(account_key).join("SWAPS").join("MY") +} #[cfg(not(target_arch = "wasm32"))] -pub fn my_swap_file_path(ctx: &MmArc, uuid: &Uuid) -> PathBuf { my_swaps_dir(ctx).join(format!("{}.json", uuid)) } +pub fn my_swap_file_path(ctx: &MmArc, account_key: &str, uuid: &Uuid) -> PathBuf { + my_swaps_dir(ctx, account_key).join(format!("{}.json", uuid)) +} pub async fn insert_new_swap_to_db( ctx: MmArc, diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index bc2151ef2d..e09f4b3709 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -77,15 +77,26 @@ pub const MAKER_ERROR_EVENTS: [&str; 15] = [ pub const MAKER_PAYMENT_SENT_LOG: &str = "Maker payment sent"; #[cfg(not(target_arch = "wasm32"))] -pub fn stats_maker_swap_dir(ctx: &MmArc) -> PathBuf { ctx.dbdir().join("SWAPS").join("STATS").join("MAKER") } +pub fn stats_maker_swap_dir(ctx: &MmArc, account_key: &str) -> PathBuf { + ctx.db_root() + .join(account_key) + .join("SWAPS") + .join("STATS") + .join("MAKER") +} #[cfg(not(target_arch = "wasm32"))] -pub fn stats_maker_swap_file_path(ctx: &MmArc, uuid: &Uuid) -> PathBuf { - stats_maker_swap_dir(ctx).join(format!("{}.json", uuid)) +pub fn stats_maker_swap_file_path(ctx: &MmArc, account_key: &str, uuid: &Uuid) -> PathBuf { + stats_maker_swap_dir(ctx, account_key).join(format!("{}.json", uuid)) } -async fn save_my_maker_swap_event(ctx: &MmArc, swap: &MakerSwap, event: MakerSavedEvent) -> Result<(), String> { - let swap = match SavedSwap::load_my_swap_from_db(ctx, swap.uuid).await { +async fn save_my_maker_swap_event( + ctx: &MmArc, + account_key: &str, + swap: &MakerSwap, + event: MakerSavedEvent, +) -> Result<(), String> { + let swap = match SavedSwap::load_my_swap_from_db(ctx, account_key, swap.uuid).await { Ok(Some(swap)) => swap, Ok(None) => SavedSwap::Maker(MakerSavedSwap { uuid: swap.uuid, @@ -111,7 +122,7 @@ async fn save_my_maker_swap_event(ctx: &MmArc, swap: &MakerSwap, event: MakerSav maker_swap.fetch_and_set_usd_prices().await; } let new_swap = SavedSwap::Maker(maker_swap); - try_s!(new_swap.save_to_db(ctx).await); + try_s!(new_swap.save_to_db(ctx, account_key).await); Ok(()) } else { ERR!("Expected SavedSwap::Maker, got {:?}", swap) @@ -1290,7 +1301,8 @@ impl MakerSwap { taker_coin: MmCoinEnum, swap_uuid: &Uuid, ) -> Result<(Self, Option), String> { - let saved = match SavedSwap::load_my_swap_from_db(&ctx, *swap_uuid).await { + let account_key = todo!(); + let saved = match SavedSwap::load_my_swap_from_db(&ctx, account_key, *swap_uuid).await { Ok(Some(saved)) => saved, Ok(None) => return ERR!("Couldn't find a swap with the uuid '{}'", swap_uuid), Err(e) => return ERR!("{}", e), @@ -2108,7 +2120,8 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { .dispatch_async(ctx.clone(), LpEvents::MakerSwapStatusChanged(event_to_send)) .await; drop(dispatcher); - save_my_maker_swap_event(&ctx, &running_swap, to_save) + let account_key = todo!(); + save_my_maker_swap_event(&ctx, account_key, &running_swap, to_save) .await .expect("!save_my_maker_swap_event"); if event.should_ban_taker() { diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index d5333bae17..e489a46235 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -157,28 +157,36 @@ impl SavedSwap { #[async_trait] pub trait SavedSwapIo { - async fn load_my_swap_from_db(ctx: &MmArc, uuid: Uuid) -> SavedSwapResult>; + async fn load_my_swap_from_db(ctx: &MmArc, account_key: &str, uuid: Uuid) -> SavedSwapResult>; - async fn load_all_my_swaps_from_db(ctx: &MmArc) -> SavedSwapResult>; + async fn load_all_my_swaps_from_db(ctx: &MmArc, account_key: &str) -> SavedSwapResult>; #[cfg(not(target_arch = "wasm32"))] - async fn load_from_maker_stats_db(ctx: &MmArc, uuid: Uuid) -> SavedSwapResult>; + async fn load_from_maker_stats_db( + ctx: &MmArc, + account_key: &str, + uuid: Uuid, + ) -> SavedSwapResult>; #[cfg(not(target_arch = "wasm32"))] - async fn load_all_from_maker_stats_db(ctx: &MmArc) -> SavedSwapResult>; + async fn load_all_from_maker_stats_db(ctx: &MmArc, account_key: &str) -> SavedSwapResult>; #[cfg(not(target_arch = "wasm32"))] - async fn load_from_taker_stats_db(ctx: &MmArc, uuid: Uuid) -> SavedSwapResult>; + async fn load_from_taker_stats_db( + ctx: &MmArc, + account_key: &str, + uuid: Uuid, + ) -> SavedSwapResult>; #[cfg(not(target_arch = "wasm32"))] - async fn load_all_from_taker_stats_db(ctx: &MmArc) -> SavedSwapResult>; + async fn load_all_from_taker_stats_db(ctx: &MmArc, account_key: &str) -> SavedSwapResult>; /// Save the serialized `SavedSwap` to the swaps db. - async fn save_to_db(&self, ctx: &MmArc) -> SavedSwapResult<()>; + async fn save_to_db(&self, ctx: &MmArc, account_key: &str) -> SavedSwapResult<()>; /// Save the inner maker/taker swap to the corresponding stats db. #[cfg(not(target_arch = "wasm32"))] - async fn save_to_stats_db(&self, ctx: &MmArc) -> SavedSwapResult<()>; + async fn save_to_stats_db(&self, ctx: &MmArc, account_key: &str) -> SavedSwapResult<()>; } #[cfg(not(target_arch = "wasm32"))] @@ -206,51 +214,63 @@ mod native_impl { #[async_trait] impl SavedSwapIo for SavedSwap { - async fn load_my_swap_from_db(ctx: &MmArc, uuid: Uuid) -> SavedSwapResult> { - let path = my_swap_file_path(ctx, &uuid); + async fn load_my_swap_from_db( + ctx: &MmArc, + account_key: &str, + uuid: Uuid, + ) -> SavedSwapResult> { + let path = my_swap_file_path(ctx, account_key, &uuid); Ok(read_json(&path).await?) } - async fn load_all_my_swaps_from_db(ctx: &MmArc) -> SavedSwapResult> { - let path = my_swaps_dir(ctx); + async fn load_all_my_swaps_from_db(ctx: &MmArc, account_key: &str) -> SavedSwapResult> { + let path = my_swaps_dir(ctx, account_key); Ok(read_dir_json(&path).await?) } - async fn load_from_maker_stats_db(ctx: &MmArc, uuid: Uuid) -> SavedSwapResult> { - let path = stats_maker_swap_file_path(ctx, &uuid); + async fn load_from_maker_stats_db( + ctx: &MmArc, + account_key: &str, + uuid: Uuid, + ) -> SavedSwapResult> { + let path = stats_maker_swap_file_path(ctx, account_key, &uuid); Ok(read_json(&path).await?) } - async fn load_all_from_maker_stats_db(ctx: &MmArc) -> SavedSwapResult> { - let path = stats_maker_swap_dir(ctx); + async fn load_all_from_maker_stats_db(ctx: &MmArc, account_key: &str) -> SavedSwapResult> { + let path = stats_maker_swap_dir(ctx, account_key); Ok(read_dir_json(&path).await?) } - async fn load_from_taker_stats_db(ctx: &MmArc, uuid: Uuid) -> SavedSwapResult> { - let path = stats_taker_swap_file_path(ctx, &uuid); + async fn load_from_taker_stats_db( + ctx: &MmArc, + account_key: &str, + uuid: Uuid, + ) -> SavedSwapResult> { + let path = stats_taker_swap_file_path(ctx, account_key, &uuid); Ok(read_json(&path).await?) } - async fn load_all_from_taker_stats_db(ctx: &MmArc) -> SavedSwapResult> { - let path = stats_taker_swap_dir(ctx); + async fn load_all_from_taker_stats_db(ctx: &MmArc, account_key: &str) -> SavedSwapResult> { + let path = stats_taker_swap_dir(ctx, account_key); Ok(read_dir_json(&path).await?) } - async fn save_to_db(&self, ctx: &MmArc) -> SavedSwapResult<()> { - let path = my_swap_file_path(ctx, self.uuid()); + async fn save_to_db(&self, ctx: &MmArc, account_key: &str) -> SavedSwapResult<()> { + let path = my_swap_file_path(ctx, account_key, self.uuid()); write_json(self, &path, USE_TMP_FILE).await?; Ok(()) } /// Save the inner maker/taker swap to the corresponding stats db. - async fn save_to_stats_db(&self, ctx: &MmArc) -> SavedSwapResult<()> { + async fn save_to_stats_db(&self, ctx: &MmArc, account_key: &str) -> SavedSwapResult<()> { match self { SavedSwap::Maker(maker) => { - let path = stats_maker_swap_file_path(ctx, &maker.uuid); + let path = stats_maker_swap_file_path(ctx, account_key & maker.uuid); write_json(self, &path, USE_TMP_FILE).await?; }, SavedSwap::Taker(taker) => { - let path = stats_taker_swap_file_path(ctx, &taker.uuid); + let path = stats_taker_swap_file_path(ctx, account_key, &taker.uuid); write_json(self, &path, USE_TMP_FILE).await?; }, } @@ -374,7 +394,7 @@ mod wasm_impl { #[async_trait] impl SavedSwapIo for SavedSwap { - async fn load_my_swap_from_db(ctx: &MmArc, uuid: Uuid) -> SavedSwapResult> { + async fn load_my_swap_from_db(ctx: &MmArc, account_key: &str, uuid: Uuid) -> SavedSwapResult> { let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; @@ -388,7 +408,7 @@ mod wasm_impl { json::from_value(saved_swap_json).map_to_mm(|e| SavedSwapError::ErrorDeserializing(e.to_string())) } - async fn load_all_my_swaps_from_db(ctx: &MmArc) -> SavedSwapResult> { + async fn load_all_my_swaps_from_db(ctx: &MmArc, account_key: &str) -> SavedSwapResult> { let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; @@ -403,7 +423,7 @@ mod wasm_impl { .collect() } - async fn save_to_db(&self, ctx: &MmArc) -> SavedSwapResult<()> { + async fn save_to_db(&self, ctx: &MmArc, account_key: &str) -> SavedSwapResult<()> { let saved_swap = json::to_value(self).map_to_mm(|e| SavedSwapError::ErrorSerializing(e.to_string()))?; let saved_swap_item = SavedSwapTable { uuid: *self.uuid(), diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index e4acf1b968..061298546b 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -98,15 +98,26 @@ pub const WATCHER_MESSAGE_SENT_LOG: &str = "Watcher message sent..."; pub const MAKER_PAYMENT_SPENT_BY_WATCHER_LOG: &str = "Maker payment is spent by the watcher..."; #[cfg(not(target_arch = "wasm32"))] -pub fn stats_taker_swap_dir(ctx: &MmArc) -> PathBuf { ctx.dbdir().join("SWAPS").join("STATS").join("TAKER") } +pub fn stats_taker_swap_dir(ctx: &MmArc, account_key: &str) -> PathBuf { + ctx.db_root() + .join(account_key) + .join("SWAPS") + .join("STATS") + .join("TAKER") +} #[cfg(not(target_arch = "wasm32"))] -pub fn stats_taker_swap_file_path(ctx: &MmArc, uuid: &Uuid) -> PathBuf { - stats_taker_swap_dir(ctx).join(format!("{}.json", uuid)) +pub fn stats_taker_swap_file_path(ctx: &MmArc, account_key: &str, uuid: &Uuid) -> PathBuf { + stats_taker_swap_dir(ctx, account_key).join(format!("{}.json", uuid)) } -async fn save_my_taker_swap_event(ctx: &MmArc, swap: &TakerSwap, event: TakerSavedEvent) -> Result<(), String> { - let swap = match SavedSwap::load_my_swap_from_db(ctx, swap.uuid).await { +async fn save_my_taker_swap_event( + ctx: &MmArc, + account_key: &str, + swap: &TakerSwap, + event: TakerSavedEvent, +) -> Result<(), String> { + let swap = match SavedSwap::load_my_swap_from_db(ctx, account_key, swap.uuid).await { Ok(Some(swap)) => swap, Ok(None) => SavedSwap::Taker(TakerSavedSwap { uuid: swap.uuid, @@ -142,7 +153,7 @@ async fn save_my_taker_swap_event(ctx: &MmArc, swap: &TakerSwap, event: TakerSav taker_swap.fetch_and_set_usd_prices().await; } let new_swap = SavedSwap::Taker(taker_swap); - try_s!(new_swap.save_to_db(ctx).await); + try_s!(new_swap.save_to_db(ctx, account_key).await); Ok(()) } else { ERR!("Expected SavedSwap::Taker, got {:?}", swap) @@ -459,7 +470,8 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { event: event.clone(), }; - save_my_taker_swap_event(&ctx, &running_swap, to_save) + let account_key = todo!(); + save_my_taker_swap_event(&ctx, account_key, &running_swap, to_save) .await .expect("!save_my_taker_swap_event"); if event.should_ban_maker() { @@ -1949,7 +1961,9 @@ impl TakerSwap { taker_coin: MmCoinEnum, swap_uuid: &Uuid, ) -> Result<(Self, Option), String> { - let saved = match SavedSwap::load_my_swap_from_db(&ctx, *swap_uuid).await { + let account_key = todo!(); + + let saved = match SavedSwap::load_my_swap_from_db(&ctx, account_key, *swap_uuid).await { Ok(Some(saved)) => saved, Ok(None) => return ERR!("Couldn't find a swap with the uuid '{}'", swap_uuid), Err(e) => return ERR!("{}", e), From f0516008106c966b5a882365d240f4a83385a775 Mon Sep 17 00:00:00 2001 From: onur-ozkan Date: Fri, 29 Mar 2024 13:43:13 +0300 Subject: [PATCH 002/186] WIP: handle db ids optionally Signed-off-by: onur-ozkan --- mm2src/coins/eth.rs | 4 +-- mm2src/coins/lightning/ln_utils.rs | 2 +- mm2src/coins/lp_coins.rs | 6 ++--- .../utxo/utxo_builder/utxo_coin_builder.rs | 2 +- mm2src/coins/z_coin.rs | 2 +- mm2src/mm2_core/src/mm_ctx.rs | 27 ++++++++++--------- mm2src/mm2_main/src/lp_native_dex.rs | 4 +-- mm2src/mm2_main/src/lp_swap.rs | 3 ++- 8 files changed, 27 insertions(+), 23 deletions(-) diff --git a/mm2src/coins/eth.rs b/mm2src/coins/eth.rs index 83a7f9d77f..fcb0945341 100644 --- a/mm2src/coins/eth.rs +++ b/mm2src/coins/eth.rs @@ -514,7 +514,7 @@ async fn make_gas_station_request(url: &str) -> GasStationResult { impl EthCoinImpl { #[cfg(not(target_arch = "wasm32"))] fn eth_traces_path(&self, ctx: &MmArc) -> PathBuf { - ctx.dbdir() + ctx.dbdir(None) .join("TRANSACTIONS") .join(format!("{}_{:#02x}_trace.json", self.ticker, self.my_address)) } @@ -558,7 +558,7 @@ impl EthCoinImpl { #[cfg(not(target_arch = "wasm32"))] fn erc20_events_path(&self, ctx: &MmArc) -> PathBuf { - ctx.dbdir() + ctx.dbdir(None) .join("TRANSACTIONS") .join(format!("{}_{:#02x}_events.json", self.ticker, self.my_address)) } diff --git a/mm2src/coins/lightning/ln_utils.rs b/mm2src/coins/lightning/ln_utils.rs index 88af1d68cc..a66af3fda4 100644 --- a/mm2src/coins/lightning/ln_utils.rs +++ b/mm2src/coins/lightning/ln_utils.rs @@ -39,7 +39,7 @@ pub type ChannelManager = SimpleArcChannelManager, Arc, Arc>; #[inline] -fn ln_data_dir(ctx: &MmArc, ticker: &str) -> PathBuf { ctx.dbdir().join("LIGHTNING").join(ticker) } +fn ln_data_dir(ctx: &MmArc, ticker: &str) -> PathBuf { ctx.dbdir(None).join("LIGHTNING").join(ticker) } #[inline] fn ln_data_backup_dir(ctx: &MmArc, path: Option, ticker: &str) -> Option { diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 0de2f4afd4..54833057d6 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -2966,7 +2966,7 @@ pub trait MmCoin: /// Loop collecting coin transaction history and saving it to local DB fn process_history_loop(&self, ctx: MmArc) -> Box + Send>; - fn account_key(&self, ctx: &MmArc) -> &str { todo!() } + fn db_id(&self) -> Option<&str> { None } /// Path to tx history file #[cfg(not(target_arch = "wasm32"))] @@ -2975,7 +2975,7 @@ pub trait MmCoin: // BCH cash address format has colon after prefix, e.g. bitcoincash: // Colon can't be used in file names on Windows so it should be escaped let my_address = my_address.replace(':', "_"); - ctx.dbdir() + ctx.dbdir(None) .join("TRANSACTIONS") .join(format!("{}_{}.json", self.ticker(), my_address)) } @@ -2987,7 +2987,7 @@ pub trait MmCoin: // BCH cash address format has colon after prefix, e.g. bitcoincash: // Colon can't be used in file names on Windows so it should be escaped let my_address = my_address.replace(':', "_"); - ctx.dbdir() + ctx.dbdir(None) .join("TRANSACTIONS") .join(format!("{}_{}_migration", self.ticker(), my_address)) } diff --git a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs index 188a644ba6..c93ec46ea3 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs @@ -734,7 +734,7 @@ pub trait UtxoCoinBuilderCommonOps { } #[cfg(not(target_arch = "wasm32"))] - fn tx_cache_path(&self) -> PathBuf { self.ctx().dbdir().join("TX_CACHE") } + fn tx_cache_path(&self) -> PathBuf { self.ctx().dbdir(None).join("TX_CACHE") } fn block_header_status_channel( &self, diff --git a/mm2src/coins/z_coin.rs b/mm2src/coins/z_coin.rs index 3ec97cd558..f0755296ae 100644 --- a/mm2src/coins/z_coin.rs +++ b/mm2src/coins/z_coin.rs @@ -876,7 +876,7 @@ pub async fn z_coin_from_conf_and_params( #[cfg(target_arch = "wasm32")] let db_dir_path = PathBuf::new(); #[cfg(not(target_arch = "wasm32"))] - let db_dir_path = ctx.dbdir(); + let db_dir_path = ctx.dbdir(None); let z_spending_key = None; let builder = ZCoinBuilder::new( ctx, diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 6972e1332e..baead46d1c 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -287,12 +287,12 @@ impl MmCtx { /// /// No checks in this method, the paths should be checked in the `fn fix_directories` instead. #[cfg(not(target_arch = "wasm32"))] - fn dbdir(&self) -> PathBuf { path_to_dbdir(self.conf["dbdir"].as_str(), self.rmd160()) } + pub fn dbdir(&self, db_id: Option<&str>) -> PathBuf { + let db_id = db_id.map(|t| t.to_owned()).unwrap_or_else(|| { + hex::encode(self.rmd160().as_slice()) + }); - #[cfg(not(target_arch = "wasm32"))] - pub fn db_root(&self) -> PathBuf { - const DEFAULT_ROOT: &str = "DB"; - self.conf["dbdir"].as_str().unwrap_or(DEFAULT_ROOT).into() + path_to_dbdir(self.conf["dbdir"].as_str(), &db_id) } /// MM shared database path. @@ -304,7 +304,10 @@ impl MmCtx { /// /// No checks in this method, the paths should be checked in the `fn fix_directories` instead. #[cfg(not(target_arch = "wasm32"))] - pub fn shared_dbdir(&self) -> PathBuf { path_to_dbdir(self.conf["dbdir"].as_str(), self.shared_db_id()) } + pub fn shared_dbdir(&self) -> PathBuf { + let db_id = hex::encode(self.shared_db_id().as_slice()); + path_to_dbdir(self.conf["dbdir"].as_str(), &db_id) + } pub fn is_watcher(&self) -> bool { self.conf["is_watcher"].as_bool().unwrap_or_default() } @@ -336,8 +339,8 @@ impl MmCtx { pub fn mm_version(&self) -> &str { &self.mm_version } #[cfg(not(target_arch = "wasm32"))] - pub fn init_sqlite_connection(&self) -> Result<(), String> { - let sqlite_file_path = self.dbdir().join("MM2.db"); + pub fn init_sqlite_connection(&self, db_id: Option<&str>) -> Result<(), String> { + let sqlite_file_path = self.dbdir(db_id).join("MM2.db"); log_sqlite_file_open_attempt(&sqlite_file_path); let connection = try_s!(Connection::open(sqlite_file_path)); try_s!(self.sqlite_connection.pin(Arc::new(Mutex::new(connection)))); @@ -354,8 +357,8 @@ impl MmCtx { } #[cfg(not(target_arch = "wasm32"))] - pub async fn init_async_sqlite_connection(&self) -> Result<(), String> { - let sqlite_file_path = self.dbdir().join("KOMODEFI.db"); + pub async fn init_async_sqlite_connection(&self, db_id: Option<&str>) -> Result<(), String> { + let sqlite_file_path = self.dbdir(db_id).join("KOMODEFI.db"); log_sqlite_file_open_attempt(&sqlite_file_path); let async_conn = try_s!(AsyncConnection::open(sqlite_file_path).await); try_s!(self.async_sqlite_connection.pin(Arc::new(AsyncMutex::new(async_conn)))); @@ -401,7 +404,7 @@ impl Drop for MmCtx { /// This function can be used later by an FFI function to open a GUI storage. #[cfg(not(target_arch = "wasm32"))] -pub fn path_to_dbdir(db_root: Option<&str>, db_id: &H160) -> PathBuf { +pub fn path_to_dbdir(db_root: Option<&str>, db_id: &str) -> PathBuf { const DEFAULT_ROOT: &str = "DB"; let path = match db_root { @@ -409,7 +412,7 @@ pub fn path_to_dbdir(db_root: Option<&str>, db_id: &H160) -> PathBuf { _ => Path::new(DEFAULT_ROOT), }; - path.join(hex::encode(db_id.as_slice())) + path.join(db_id) } // We don't want to send `MmCtx` across threads, it will only obstruct the normal use case diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 50c9f13c83..c8fc8aa916 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -466,11 +466,11 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { #[cfg(not(target_arch = "wasm32"))] { fix_directories(&ctx)?; - ctx.init_sqlite_connection() + ctx.init_sqlite_connection(None) .map_to_mm(MmInitError::ErrorSqliteInitializing)?; ctx.init_shared_sqlite_conn() .map_to_mm(MmInitError::ErrorSqliteInitializing)?; - ctx.init_async_sqlite_connection() + ctx.init_async_sqlite_connection(None) .await .map_to_mm(MmInitError::ErrorSqliteInitializing)?; init_and_migrate_sql_db(&ctx).await?; diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 2d2ce9ad99..76941f919e 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -1364,7 +1364,8 @@ pub async fn swap_kick_starts(ctx: MmArc) -> Result, String> { let mut coins = HashSet::new(); let legacy_unfinished_uuids = try_s!(get_unfinished_swaps_uuids(ctx.clone(), LEGACY_SWAP_TYPE).await); for uuid in legacy_unfinished_uuids { - let swap = match SavedSwap::load_my_swap_from_db(&ctx, uuid).await { + let db_id = todo!(); + let swap = match SavedSwap::load_my_swap_from_db(&ctx, db_id, uuid).await { Ok(Some(s)) => s, Ok(None) => { warn!("Swap {} is indexed, but doesn't exist in DB", uuid); From c8a434e371237781f2647e652db3728485fed53f Mon Sep 17 00:00:00 2001 From: onur-ozkan Date: Mon, 1 Apr 2024 12:28:41 +0300 Subject: [PATCH 003/186] use dummy values to fix build errors for development Signed-off-by: onur-ozkan --- mm2src/coins/lp_coins.rs | 2 +- mm2src/mm2_main/src/database.rs | 6 +-- mm2src/mm2_main/src/database/my_swaps.rs | 8 ++-- mm2src/mm2_main/src/database/stats_swaps.rs | 6 +-- mm2src/mm2_main/src/lp_native_dex.rs | 4 +- mm2src/mm2_main/src/lp_ordermatch.rs | 6 +-- mm2src/mm2_main/src/lp_swap.rs | 36 +++++++------- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 15 +++--- mm2src/mm2_main/src/lp_swap/saved_swap.rs | 50 ++++++++++---------- mm2src/mm2_main/src/lp_swap/swap_lock.rs | 2 +- mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs | 9 ++-- mm2src/mm2_main/src/lp_swap/taker_restart.rs | 4 +- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 18 +++---- 13 files changed, 81 insertions(+), 85 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 54833057d6..1974166515 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -2966,7 +2966,7 @@ pub trait MmCoin: /// Loop collecting coin transaction history and saving it to local DB fn process_history_loop(&self, ctx: MmArc) -> Box + Send>; - fn db_id(&self) -> Option<&str> { None } + fn account_db_id(&self) -> Option<&str> { None } /// Path to tx history file #[cfg(not(target_arch = "wasm32"))] diff --git a/mm2src/mm2_main/src/database.rs b/mm2src/mm2_main/src/database.rs index 1017f1fd6b..cdaa5f94cc 100644 --- a/mm2src/mm2_main/src/database.rs +++ b/mm2src/mm2_main/src/database.rs @@ -72,10 +72,10 @@ fn clean_db(ctx: &MmArc) { } } -async fn migration_1(ctx: &MmArc) -> Vec<(&'static str, Vec)> { fill_my_swaps_from_json_statements(ctx).await } +async fn migration_1(ctx: &MmArc) -> Vec<(&'static str, Vec)> { fill_my_swaps_from_json_statements(ctx, None).await } async fn migration_2(ctx: &MmArc) -> Vec<(&'static str, Vec)> { - create_and_fill_stats_swaps_from_json_statements(ctx).await + create_and_fill_stats_swaps_from_json_statements(ctx, None).await } fn migration_3() -> Vec<(&'static str, Vec)> { vec![(stats_swaps::ADD_STARTED_AT_INDEX, vec![])] } @@ -106,7 +106,7 @@ fn migration_9() -> Vec<(&'static str, Vec)> { } async fn migration_10(ctx: &MmArc) -> Vec<(&'static str, Vec)> { - set_is_finished_for_legacy_swaps_statements(ctx).await + set_is_finished_for_legacy_swaps_statements(ctx, None).await } fn migration_11() -> Vec<(&'static str, Vec)> { diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index 55b08f3957..612f766f75 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -128,8 +128,8 @@ pub fn insert_new_swap_v2(ctx: &MmArc, params: &[(&str, &dyn ToSql)]) -> SqlResu /// Returns SQL statements to initially fill my_swaps table using existing DB with JSON files /// Use this only in migration code! -pub async fn fill_my_swaps_from_json_statements(ctx: &MmArc) -> Vec<(&'static str, Vec)> { - let swaps = SavedSwap::load_all_my_swaps_from_db(ctx).await.unwrap_or_default(); +pub async fn fill_my_swaps_from_json_statements(ctx: &MmArc, db_id: Option<&str>) -> Vec<(&'static str, Vec)> { + let swaps = SavedSwap::load_all_my_swaps_from_db(ctx, db_id).await.unwrap_or_default(); swaps .into_iter() .filter_map(insert_saved_swap_sql_migration_1) @@ -343,8 +343,8 @@ WHERE uuid = :uuid; "#; /// Returns SQL statements to set is_finished to 1 for completed legacy swaps -pub async fn set_is_finished_for_legacy_swaps_statements(ctx: &MmArc) -> Vec<(&'static str, Vec)> { - let swaps = SavedSwap::load_all_my_swaps_from_db(ctx).await.unwrap_or_default(); +pub async fn set_is_finished_for_legacy_swaps_statements(ctx: &MmArc, db_id: Option<&str>) -> Vec<(&'static str, Vec)> { + let swaps = SavedSwap::load_all_my_swaps_from_db(ctx, db_id).await.unwrap_or_default(); swaps .into_iter() .filter_map(|swap| { diff --git a/mm2src/mm2_main/src/database/stats_swaps.rs b/mm2src/mm2_main/src/database/stats_swaps.rs index b1127b2d36..cca86463a3 100644 --- a/mm2src/mm2_main/src/database/stats_swaps.rs +++ b/mm2src/mm2_main/src/database/stats_swaps.rs @@ -97,9 +97,9 @@ pub const ADD_MAKER_TAKER_GUI_AND_VERSION: &[&str] = &[ pub const SELECT_ID_BY_UUID: &str = "SELECT id FROM stats_swaps WHERE uuid = ?1"; /// Returns SQL statements to initially fill stats_swaps table using existing DB with JSON files -pub async fn create_and_fill_stats_swaps_from_json_statements(ctx: &MmArc) -> Vec<(&'static str, Vec)> { - let maker_swaps = SavedSwap::load_all_from_maker_stats_db(ctx).await.unwrap_or_default(); - let taker_swaps = SavedSwap::load_all_from_taker_stats_db(ctx).await.unwrap_or_default(); +pub async fn create_and_fill_stats_swaps_from_json_statements(ctx: &MmArc, db_id: Option<&str>) -> Vec<(&'static str, Vec)> { + let maker_swaps = SavedSwap::load_all_from_maker_stats_db(ctx, db_id).await.unwrap_or_default(); + let taker_swaps = SavedSwap::load_all_from_taker_stats_db(ctx, db_id).await.unwrap_or_default(); let mut result = vec![(CREATE_STATS_SWAPS_TABLE, vec![])]; let mut inserted_maker_uuids = HashSet::with_capacity(maker_swaps.len()); diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index c8fc8aa916..671e425ade 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -337,7 +337,7 @@ fn default_seednodes(netid: u16) -> Vec { pub fn fix_directories(ctx: &MmCtx) -> MmInitResult<()> { fix_shared_dbdir(ctx)?; - let dbdir = ctx.dbdir(); + let dbdir = ctx.dbdir(None); fs::create_dir_all(&dbdir).map_to_mm(|e| MmInitError::ErrorCreatingDbDir { path: dbdir.clone(), error: e.to_string(), @@ -407,7 +407,7 @@ fn fix_shared_dbdir(ctx: &MmCtx) -> MmInitResult<()> { #[cfg(not(target_arch = "wasm32"))] fn migrate_db(ctx: &MmArc) -> MmInitResult<()> { - let migration_num_path = ctx.dbdir().join(".migration"); + let migration_num_path = ctx.dbdir(None).join(".migration"); let mut current_migration = match std::fs::read(&migration_num_path) { Ok(bytes) => { let mut num_bytes = [0; 8]; diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index a1393bf71e..ba57cfdf69 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -5345,13 +5345,13 @@ pub async fn my_orders(ctx: MmArc) -> Result>, String> { } #[cfg(not(target_arch = "wasm32"))] -pub fn my_maker_orders_dir(ctx: &MmArc) -> PathBuf { ctx.dbdir().join("ORDERS").join("MY").join("MAKER") } +pub fn my_maker_orders_dir(ctx: &MmArc) -> PathBuf { ctx.dbdir(None).join("ORDERS").join("MY").join("MAKER") } #[cfg(not(target_arch = "wasm32"))] -fn my_taker_orders_dir(ctx: &MmArc) -> PathBuf { ctx.dbdir().join("ORDERS").join("MY").join("TAKER") } +fn my_taker_orders_dir(ctx: &MmArc) -> PathBuf { ctx.dbdir(None).join("ORDERS").join("MY").join("TAKER") } #[cfg(not(target_arch = "wasm32"))] -fn my_orders_history_dir(ctx: &MmArc) -> PathBuf { ctx.dbdir().join("ORDERS").join("MY").join("HISTORY") } +fn my_orders_history_dir(ctx: &MmArc) -> PathBuf { ctx.dbdir(None).join("ORDERS").join("MY").join("HISTORY") } #[cfg(not(target_arch = "wasm32"))] pub fn my_maker_order_file_path(ctx: &MmArc, uuid: &Uuid) -> PathBuf { diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 76941f919e..1914b27fd2 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -997,13 +997,13 @@ pub struct TransactionIdentifier { } #[cfg(not(target_arch = "wasm32"))] -pub fn my_swaps_dir(ctx: &MmArc, account_key: &str) -> PathBuf { - ctx.db_root().join(account_key).join("SWAPS").join("MY") +pub fn my_swaps_dir(ctx: &MmArc, db_id: Option<&str>) -> PathBuf { + ctx.dbdir(db_id).join("SWAPS").join("MY") } #[cfg(not(target_arch = "wasm32"))] -pub fn my_swap_file_path(ctx: &MmArc, account_key: &str, uuid: &Uuid) -> PathBuf { - my_swaps_dir(ctx, account_key).join(format!("{}.json", uuid)) +pub fn my_swap_file_path(ctx: &MmArc, db_id: Option<&str>, uuid: &Uuid) -> PathBuf { + my_swaps_dir(ctx, db_id).join(format!("{}.json", uuid)) } pub async fn insert_new_swap_to_db( @@ -1029,7 +1029,7 @@ fn add_swap_to_db_index(ctx: &MmArc, swap: &SavedSwap) { #[cfg(not(target_arch = "wasm32"))] async fn save_stats_swap(ctx: &MmArc, swap: &SavedSwap) -> Result<(), String> { - try_s!(swap.save_to_stats_db(ctx).await); + try_s!(swap.save_to_stats_db(ctx, None).await); add_swap_to_db_index(ctx, swap); Ok(()) } @@ -1115,7 +1115,7 @@ pub async fn my_swap_status(ctx: MmArc, req: Json) -> Result>, match swap_type { Some(LEGACY_SWAP_TYPE) => { - let status = match SavedSwap::load_my_swap_from_db(&ctx, uuid).await { + let status = match SavedSwap::load_my_swap_from_db(&ctx, None, uuid).await { Ok(Some(status)) => status, Ok(None) => return Err("swap data is not found".to_owned()), Err(e) => return ERR!("{}", e), @@ -1152,8 +1152,8 @@ pub async fn stats_swap_status(_ctx: MmArc, _req: Json) -> Result Result>, String> { let uuid: Uuid = try_s!(json::from_value(req["params"]["uuid"].clone())); - let maker_status = try_s!(SavedSwap::load_from_maker_stats_db(&ctx, uuid).await); - let taker_status = try_s!(SavedSwap::load_from_taker_stats_db(&ctx, uuid).await); + let maker_status = try_s!(SavedSwap::load_from_maker_stats_db(&ctx, None, uuid).await); + let taker_status = try_s!(SavedSwap::load_from_taker_stats_db(&ctx, None, uuid).await); if maker_status.is_none() && taker_status.is_none() { return ERR!("swap data is not found"); @@ -1177,7 +1177,7 @@ struct SwapStatus { /// Broadcasts `my` swap status to P2P network async fn broadcast_my_swap_status(ctx: &MmArc, uuid: Uuid) -> Result<(), String> { - let mut status = match try_s!(SavedSwap::load_my_swap_from_db(ctx, uuid).await) { + let mut status = match try_s!(SavedSwap::load_my_swap_from_db(ctx, None, uuid).await) { Some(status) => status, None => return ERR!("swap data is not found"), }; @@ -1286,7 +1286,7 @@ pub async fn latest_swaps_for_pair( let mut swaps = Vec::with_capacity(db_result.uuids_and_types.len()); // TODO this is needed for trading bot, which seems not used as of now. Remove the code? for (uuid, _) in db_result.uuids_and_types.iter() { - let swap = match SavedSwap::load_my_swap_from_db(&ctx, *uuid).await { + let swap = match SavedSwap::load_my_swap_from_db(&ctx, None, *uuid).await { Ok(Some(swap)) => swap, Ok(None) => { error!("No such swap with the uuid '{}'", uuid); @@ -1313,7 +1313,7 @@ pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result match SavedSwap::load_my_swap_from_db(&ctx, *uuid).await { + LEGACY_SWAP_TYPE => match SavedSwap::load_my_swap_from_db(&ctx, None, *uuid).await { Ok(Some(swap)) => { let swap_json = try_s!(json::to_value(MySwapStatusResponse::from(swap))); swaps.push(swap_json) @@ -1521,7 +1521,7 @@ pub async fn coins_needed_for_kick_start(ctx: MmArc) -> Result> pub async fn recover_funds_of_swap(ctx: MmArc, req: Json) -> Result>, String> { let uuid: Uuid = try_s!(json::from_value(req["params"]["uuid"].clone())); - let swap = match SavedSwap::load_my_swap_from_db(&ctx, uuid).await { + let swap = match SavedSwap::load_my_swap_from_db(&ctx, None, uuid).await { Ok(Some(swap)) => swap, Ok(None) => return ERR!("swap data is not found"), Err(e) => return ERR!("{}", e), @@ -1544,7 +1544,7 @@ pub async fn import_swaps(ctx: MmArc, req: Json) -> Result>, St let mut imported = vec![]; let mut skipped = HashMap::new(); for swap in swaps { - match swap.save_to_db(&ctx).await { + match swap.save_to_db(&ctx, None).await { Ok(_) => { if let Some(info) = swap.get_my_info() { if let Err(e) = insert_new_swap_to_db( @@ -1598,7 +1598,7 @@ pub async fn active_swaps_rpc(ctx: MmArc, req: Json) -> Result> for (uuid, swap_type) in uuids_with_types.iter() { match *swap_type { LEGACY_SWAP_TYPE => { - let status = match SavedSwap::load_my_swap_from_db(&ctx, *uuid).await { + let status = match SavedSwap::load_my_swap_from_db(&ctx, None, *uuid).await { Ok(Some(status)) => status, Ok(None) => continue, Err(e) => { @@ -2273,7 +2273,7 @@ mod lp_swap_tests { fix_directories(&maker_ctx).unwrap(); block_on(init_p2p(maker_ctx.clone())).unwrap(); - maker_ctx.init_sqlite_connection().unwrap(); + maker_ctx.init_sqlite_connection(None).unwrap(); let rick_activation_params = utxo_activation_params(RICK_ELECTRUM_ADDRS); let morty_activation_params = utxo_activation_params(MORTY_ELECTRUM_ADDRS); @@ -2311,7 +2311,7 @@ mod lp_swap_tests { fix_directories(&taker_ctx).unwrap(); block_on(init_p2p(taker_ctx.clone())).unwrap(); - taker_ctx.init_sqlite_connection().unwrap(); + taker_ctx.init_sqlite_connection(None).unwrap(); let rick_taker = block_on(utxo_standard_coin_with_priv_key( &taker_ctx, @@ -2388,13 +2388,13 @@ mod lp_swap_tests { println!( "Maker swap path {}", - std::fs::canonicalize(my_swap_file_path(&maker_ctx, &uuid)) + std::fs::canonicalize(my_swap_file_path(&maker_ctx, None, &uuid)) .unwrap() .display() ); println!( "Taker swap path {}", - std::fs::canonicalize(my_swap_file_path(&taker_ctx, &uuid)) + std::fs::canonicalize(my_swap_file_path(&taker_ctx, None, &uuid)) .unwrap() .display() ); diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index e09f4b3709..4f42476e2e 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -77,26 +77,25 @@ pub const MAKER_ERROR_EVENTS: [&str; 15] = [ pub const MAKER_PAYMENT_SENT_LOG: &str = "Maker payment sent"; #[cfg(not(target_arch = "wasm32"))] -pub fn stats_maker_swap_dir(ctx: &MmArc, account_key: &str) -> PathBuf { - ctx.db_root() - .join(account_key) +pub fn stats_maker_swap_dir(ctx: &MmArc, db_id: Option<&str>) -> PathBuf { + ctx.dbdir(db_id) .join("SWAPS") .join("STATS") .join("MAKER") } #[cfg(not(target_arch = "wasm32"))] -pub fn stats_maker_swap_file_path(ctx: &MmArc, account_key: &str, uuid: &Uuid) -> PathBuf { - stats_maker_swap_dir(ctx, account_key).join(format!("{}.json", uuid)) +pub fn stats_maker_swap_file_path(ctx: &MmArc, db_id: Option<&str>, uuid: &Uuid) -> PathBuf { + stats_maker_swap_dir(ctx, db_id).join(format!("{}.json", uuid)) } async fn save_my_maker_swap_event( ctx: &MmArc, - account_key: &str, + db_id: Option<&str>, swap: &MakerSwap, event: MakerSavedEvent, ) -> Result<(), String> { - let swap = match SavedSwap::load_my_swap_from_db(ctx, account_key, swap.uuid).await { + let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id, swap.uuid).await { Ok(Some(swap)) => swap, Ok(None) => SavedSwap::Maker(MakerSavedSwap { uuid: swap.uuid, @@ -122,7 +121,7 @@ async fn save_my_maker_swap_event( maker_swap.fetch_and_set_usd_prices().await; } let new_swap = SavedSwap::Maker(maker_swap); - try_s!(new_swap.save_to_db(ctx, account_key).await); + try_s!(new_swap.save_to_db(ctx, db_id).await); Ok(()) } else { ERR!("Expected SavedSwap::Maker, got {:?}", swap) diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index e489a46235..1770eaf1ee 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -157,36 +157,36 @@ impl SavedSwap { #[async_trait] pub trait SavedSwapIo { - async fn load_my_swap_from_db(ctx: &MmArc, account_key: &str, uuid: Uuid) -> SavedSwapResult>; + async fn load_my_swap_from_db(ctx: &MmArc, db_id: Option<&str>, uuid: Uuid) -> SavedSwapResult>; - async fn load_all_my_swaps_from_db(ctx: &MmArc, account_key: &str) -> SavedSwapResult>; + async fn load_all_my_swaps_from_db(ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult>; #[cfg(not(target_arch = "wasm32"))] async fn load_from_maker_stats_db( ctx: &MmArc, - account_key: &str, + db_id: Option<&str>, uuid: Uuid, ) -> SavedSwapResult>; #[cfg(not(target_arch = "wasm32"))] - async fn load_all_from_maker_stats_db(ctx: &MmArc, account_key: &str) -> SavedSwapResult>; + async fn load_all_from_maker_stats_db(ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult>; #[cfg(not(target_arch = "wasm32"))] async fn load_from_taker_stats_db( ctx: &MmArc, - account_key: &str, + db_id: Option<&str>, uuid: Uuid, ) -> SavedSwapResult>; #[cfg(not(target_arch = "wasm32"))] - async fn load_all_from_taker_stats_db(ctx: &MmArc, account_key: &str) -> SavedSwapResult>; + async fn load_all_from_taker_stats_db(ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult>; /// Save the serialized `SavedSwap` to the swaps db. - async fn save_to_db(&self, ctx: &MmArc, account_key: &str) -> SavedSwapResult<()>; + async fn save_to_db(&self, ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult<()>; /// Save the inner maker/taker swap to the corresponding stats db. #[cfg(not(target_arch = "wasm32"))] - async fn save_to_stats_db(&self, ctx: &MmArc, account_key: &str) -> SavedSwapResult<()>; + async fn save_to_stats_db(&self, ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult<()>; } #[cfg(not(target_arch = "wasm32"))] @@ -216,61 +216,61 @@ mod native_impl { impl SavedSwapIo for SavedSwap { async fn load_my_swap_from_db( ctx: &MmArc, - account_key: &str, + db_id: Option<&str>, uuid: Uuid, ) -> SavedSwapResult> { - let path = my_swap_file_path(ctx, account_key, &uuid); + let path = my_swap_file_path(ctx, db_id, &uuid); Ok(read_json(&path).await?) } - async fn load_all_my_swaps_from_db(ctx: &MmArc, account_key: &str) -> SavedSwapResult> { - let path = my_swaps_dir(ctx, account_key); + async fn load_all_my_swaps_from_db(ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult> { + let path = my_swaps_dir(ctx, db_id); Ok(read_dir_json(&path).await?) } async fn load_from_maker_stats_db( ctx: &MmArc, - account_key: &str, + db_id: Option<&str>, uuid: Uuid, ) -> SavedSwapResult> { - let path = stats_maker_swap_file_path(ctx, account_key, &uuid); + let path = stats_maker_swap_file_path(ctx, db_id, &uuid); Ok(read_json(&path).await?) } - async fn load_all_from_maker_stats_db(ctx: &MmArc, account_key: &str) -> SavedSwapResult> { - let path = stats_maker_swap_dir(ctx, account_key); + async fn load_all_from_maker_stats_db(ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult> { + let path = stats_maker_swap_dir(ctx, db_id); Ok(read_dir_json(&path).await?) } async fn load_from_taker_stats_db( ctx: &MmArc, - account_key: &str, + db_id: Option<&str>, uuid: Uuid, ) -> SavedSwapResult> { - let path = stats_taker_swap_file_path(ctx, account_key, &uuid); + let path = stats_taker_swap_file_path(ctx, db_id, &uuid); Ok(read_json(&path).await?) } - async fn load_all_from_taker_stats_db(ctx: &MmArc, account_key: &str) -> SavedSwapResult> { - let path = stats_taker_swap_dir(ctx, account_key); + async fn load_all_from_taker_stats_db(ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult> { + let path = stats_taker_swap_dir(ctx, db_id); Ok(read_dir_json(&path).await?) } - async fn save_to_db(&self, ctx: &MmArc, account_key: &str) -> SavedSwapResult<()> { - let path = my_swap_file_path(ctx, account_key, self.uuid()); + async fn save_to_db(&self, ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult<()> { + let path = my_swap_file_path(ctx, db_id, self.uuid()); write_json(self, &path, USE_TMP_FILE).await?; Ok(()) } /// Save the inner maker/taker swap to the corresponding stats db. - async fn save_to_stats_db(&self, ctx: &MmArc, account_key: &str) -> SavedSwapResult<()> { + async fn save_to_stats_db(&self, ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult<()> { match self { SavedSwap::Maker(maker) => { - let path = stats_maker_swap_file_path(ctx, account_key & maker.uuid); + let path = stats_maker_swap_file_path(ctx, db_id, &maker.uuid); write_json(self, &path, USE_TMP_FILE).await?; }, SavedSwap::Taker(taker) => { - let path = stats_taker_swap_file_path(ctx, account_key, &taker.uuid); + let path = stats_taker_swap_file_path(ctx, db_id, &taker.uuid); write_json(self, &path, USE_TMP_FILE).await?; }, } diff --git a/mm2src/mm2_main/src/lp_swap/swap_lock.rs b/mm2src/mm2_main/src/lp_swap/swap_lock.rs index 25e752d14b..29b053db51 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_lock.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_lock.rs @@ -57,7 +57,7 @@ mod native_lock { #[async_trait] impl SwapLockOps for SwapLock { async fn lock(ctx: &MmArc, swap_uuid: Uuid, ttl_sec: f64) -> SwapLockResult> { - let lock_path = my_swaps_dir(ctx).join(format!("{}.lock", swap_uuid)); + let lock_path = my_swaps_dir(ctx, None).join(format!("{}.lock", swap_uuid)); let file_lock = some_or_return_ok_none!(FileLock::lock(lock_path, ttl_sec)?); Ok(Some(SwapLock { file_lock })) diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs index d5c50c6534..04bb0df564 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs @@ -292,12 +292,13 @@ impl From for GetSwapDataErr { async fn get_swap_data_by_uuid_and_type( ctx: &MmArc, + db_id: Option<&str>, uuid: Uuid, swap_type: u8, ) -> MmResult, GetSwapDataErr> { match swap_type { LEGACY_SWAP_TYPE => { - let saved_swap = SavedSwap::load_my_swap_from_db(ctx, uuid).await?; + let saved_swap = SavedSwap::load_my_swap_from_db(ctx, db_id, uuid).await?; Ok(saved_swap.map(|swap| match swap { SavedSwap::Maker(m) => SwapRpcData::MakerV1(m), SavedSwap::Taker(t) => SwapRpcData::TakerV1(t), @@ -365,7 +366,7 @@ pub(crate) async fn my_swap_status_rpc( let swap_type = get_swap_type(&ctx, &req.uuid) .await? .or_mm_err(|| MySwapStatusError::NoSwapWithUuid(req.uuid))?; - get_swap_data_by_uuid_and_type(&ctx, req.uuid, swap_type) + get_swap_data_by_uuid_and_type(&ctx, None, req.uuid, swap_type) .await? .or_mm_err(|| MySwapStatusError::NoSwapWithUuid(req.uuid)) } @@ -428,7 +429,7 @@ pub(crate) async fn my_recent_swaps_rpc( .await?; let mut swaps = Vec::with_capacity(db_result.uuids_and_types.len()); for (uuid, swap_type) in db_result.uuids_and_types.iter() { - match get_swap_data_by_uuid_and_type(&ctx, *uuid, *swap_type).await { + match get_swap_data_by_uuid_and_type(&ctx, None, *uuid, *swap_type).await { Ok(Some(data)) => swaps.push(data), Ok(None) => warn!("Swap {} data doesn't exist in DB", uuid), Err(e) => error!("Error {} while trying to get swap {} data", e, uuid), @@ -481,7 +482,7 @@ pub(crate) async fn active_swaps_rpc( let statuses = if req.include_status { let mut statuses = HashMap::with_capacity(uuids_with_types.len()); for (uuid, swap_type) in uuids_with_types.iter() { - match get_swap_data_by_uuid_and_type(&ctx, *uuid, *swap_type).await { + match get_swap_data_by_uuid_and_type(&ctx, None, *uuid, *swap_type).await { Ok(Some(data)) => { statuses.insert(*uuid, data); }, diff --git a/mm2src/mm2_main/src/lp_swap/taker_restart.rs b/mm2src/mm2_main/src/lp_swap/taker_restart.rs index 431d1a7c32..83eaa0740a 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_restart.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_restart.rs @@ -138,7 +138,7 @@ pub async fn check_maker_payment_spend_and_add_event( }; saved.events.push(to_save); let new_swap = SavedSwap::Taker(saved); - try_s!(new_swap.save_to_db(ctx).await); + try_s!(new_swap.save_to_db(ctx, None).await); info!("{}", MAKER_PAYMENT_SPENT_BY_WATCHER_LOG); Ok(TakerSwapCommand::Finish) } @@ -258,7 +258,7 @@ pub async fn add_taker_payment_refunded_by_watcher_event( saved.events.push(to_save); let new_swap = SavedSwap::Taker(saved); - try_s!(new_swap.save_to_db(ctx).await); + try_s!(new_swap.save_to_db(ctx, None).await); info!("Taker payment is refunded by the watcher"); Ok(TakerSwapCommand::Finish) } diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 061298546b..645b874aa7 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -98,26 +98,22 @@ pub const WATCHER_MESSAGE_SENT_LOG: &str = "Watcher message sent..."; pub const MAKER_PAYMENT_SPENT_BY_WATCHER_LOG: &str = "Maker payment is spent by the watcher..."; #[cfg(not(target_arch = "wasm32"))] -pub fn stats_taker_swap_dir(ctx: &MmArc, account_key: &str) -> PathBuf { - ctx.db_root() - .join(account_key) - .join("SWAPS") - .join("STATS") - .join("TAKER") +pub fn stats_taker_swap_dir(ctx: &MmArc, db_id: Option<&str>) -> PathBuf { + ctx.dbdir(db_id).join("SWAPS").join("STATS").join("TAKER") } #[cfg(not(target_arch = "wasm32"))] -pub fn stats_taker_swap_file_path(ctx: &MmArc, account_key: &str, uuid: &Uuid) -> PathBuf { - stats_taker_swap_dir(ctx, account_key).join(format!("{}.json", uuid)) +pub fn stats_taker_swap_file_path(ctx: &MmArc, db_id: Option<&str>, uuid: &Uuid) -> PathBuf { + stats_taker_swap_dir(ctx, db_id).join(format!("{}.json", uuid)) } async fn save_my_taker_swap_event( ctx: &MmArc, - account_key: &str, + db_id: Option<&str>, swap: &TakerSwap, event: TakerSavedEvent, ) -> Result<(), String> { - let swap = match SavedSwap::load_my_swap_from_db(ctx, account_key, swap.uuid).await { + let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id, swap.uuid).await { Ok(Some(swap)) => swap, Ok(None) => SavedSwap::Taker(TakerSavedSwap { uuid: swap.uuid, @@ -153,7 +149,7 @@ async fn save_my_taker_swap_event( taker_swap.fetch_and_set_usd_prices().await; } let new_swap = SavedSwap::Taker(taker_swap); - try_s!(new_swap.save_to_db(ctx, account_key).await); + try_s!(new_swap.save_to_db(ctx, db_id).await); Ok(()) } else { ERR!("Expected SavedSwap::Taker, got {:?}", swap) From c6146d0072949d2a23b3455efd2a02edb788c075 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 9 Apr 2024 22:36:02 +0100 Subject: [PATCH 004/186] more wip changes and start coin pubkey for indexeddb storage --- mm2src/coins/lp_coins.rs | 16 +++++-- mm2src/coins/nft/nft_structs.rs | 2 +- .../wasm/tx_history_storage_v1.rs | 2 +- .../wasm/indexeddb_block_header_storage.rs | 2 +- .../storage/blockdb/blockdb_idb_storage.rs | 2 +- .../z_coin/storage/walletdb/wasm/storage.rs | 4 +- .../z_coin/storage/z_params/indexeddb.rs | 2 +- mm2src/mm2_db/src/indexed_db/db_lock.rs | 19 ++++---- .../mm2_db/src/indexed_db/indexed_cursor.rs | 32 ++++++------- mm2src/mm2_db/src/indexed_db/indexed_db.rs | 46 +++++++++---------- .../src/account/storage/wasm_storage.rs | 2 +- mm2src/mm2_main/src/lp_native_dex.rs | 3 +- mm2src/mm2_main/src/lp_ordermatch.rs | 2 +- mm2src/mm2_main/src/lp_swap.rs | 10 ++-- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 14 +++--- mm2src/mm2_main/src/lp_swap/saved_swap.rs | 30 ++++++++---- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 8 ++-- 17 files changed, 108 insertions(+), 88 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 1974166515..c58660f7ef 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -2966,7 +2966,17 @@ pub trait MmCoin: /// Loop collecting coin transaction history and saving it to local DB fn process_history_loop(&self, ctx: MmArc) -> Box + Send>; - fn account_db_id(&self) -> Option<&str> { None } + #[cfg(not(target_arch = "wasm32"))] + fn account_db_id(&self) -> Option { + if let Ok(key) = self.get_public_key() { + return Some(key); + }; + + None + } + + #[cfg(target_arch = "wasm32")] + fn account_db_id(&self) -> Option { None } /// Path to tx history file #[cfg(not(target_arch = "wasm32"))] @@ -3409,9 +3419,9 @@ impl CoinsContext { scan_addresses_manager: ScanAddressesTaskManager::new_shared(), withdraw_task_manager: WithdrawTaskManager::new_shared(), #[cfg(target_arch = "wasm32")] - tx_history_db: ConstructibleDb::new(ctx).into_shared(), + tx_history_db: ConstructibleDb::new(ctx, None).into_shared(), #[cfg(target_arch = "wasm32")] - hd_wallet_db: ConstructibleDb::new_shared_db(ctx).into_shared(), + hd_wallet_db: ConstructibleDb::new_shared_db(ctx, None).into_shared(), }) }))) } diff --git a/mm2src/coins/nft/nft_structs.rs b/mm2src/coins/nft/nft_structs.rs index baf6c0c65d..600c95220e 100644 --- a/mm2src/coins/nft/nft_structs.rs +++ b/mm2src/coins/nft/nft_structs.rs @@ -740,7 +740,7 @@ impl NftCtx { pub(crate) fn from_ctx(ctx: &MmArc) -> Result, String> { Ok(try_s!(from_ctx(&ctx.nft_ctx, move || { Ok(NftCtx { - nft_cache_db: ConstructibleDb::new(ctx).into_shared(), + nft_cache_db: ConstructibleDb::new(ctx, None).into_shared(), }) }))) } diff --git a/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v1.rs b/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v1.rs index c52fefd76d..78639b2f04 100644 --- a/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v1.rs +++ b/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v1.rs @@ -95,7 +95,7 @@ mod tests { #[wasm_bindgen_test] async fn test_tx_history() { const DB_NAME: &str = "TEST_TX_HISTORY"; - let db = TxHistoryDb::init(DbIdentifier::for_test(DB_NAME)) + let db = TxHistoryDb::init(DbIdentifier::for_test(DB_NAME, None)) .await .expect("!TxHistoryDb::init_with_fs_path"); diff --git a/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs b/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs index 08e1a962c8..b0be84ac7e 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs @@ -49,7 +49,7 @@ pub struct IDBBlockHeadersStorage { impl IDBBlockHeadersStorage { pub fn new(ctx: &MmArc, ticker: String) -> Self { Self { - db: ConstructibleDb::new(ctx).into_shared(), + db: ConstructibleDb::new(ctx, None).into_shared(), ticker, } } diff --git a/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs b/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs index cccf8cc0a9..a057fea80d 100644 --- a/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs +++ b/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs @@ -69,7 +69,7 @@ impl BlockDbInner { impl BlockDbImpl { pub async fn new(ctx: &MmArc, ticker: String, _path: PathBuf) -> ZcoinStorageRes { Ok(Self { - db: ConstructibleDb::new(ctx).into_shared(), + db: ConstructibleDb::new(ctx, None).into_shared(), ticker, }) } diff --git a/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs b/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs index bf99dec6ca..ca6cfc40f1 100644 --- a/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs +++ b/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs @@ -138,7 +138,7 @@ impl<'a> WalletIndexedDb { consensus_params: ZcoinConsensusParams, ) -> MmResult { let db = Self { - db: ConstructibleDb::new(ctx).into_shared(), + db: ConstructibleDb::new(ctx, None).into_shared(), ticker: ticker.to_string(), params: consensus_params, }; @@ -697,7 +697,7 @@ impl WalletIndexedDb { } /// Asynchronously rewinds the storage to a specified block height, effectively - /// removing data beyond the specified height from the storage. + /// removing data beyond the specified height from the storage. pub async fn rewind_to_height(&self, block_height: BlockHeight) -> ZcoinStorageRes<()> { let locked_db = self.lock_db().await?; let db_transaction = locked_db.get_inner().transaction().await?; diff --git a/mm2src/coins/z_coin/storage/z_params/indexeddb.rs b/mm2src/coins/z_coin/storage/z_params/indexeddb.rs index 91a2ec51b4..d513cfce22 100644 --- a/mm2src/coins/z_coin/storage/z_params/indexeddb.rs +++ b/mm2src/coins/z_coin/storage/z_params/indexeddb.rs @@ -70,7 +70,7 @@ pub(crate) struct ZcashParamsWasmImpl(SharedDb); impl ZcashParamsWasmImpl { pub(crate) async fn new(ctx: &MmArc) -> MmResult { - Ok(Self(ConstructibleDb::new(ctx).into_shared())) + Ok(Self(ConstructibleDb::new(ctx, None).into_shared())) } async fn lock_db(&self) -> ZcashParamsWasmRes> { diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index 1ca10262f9..33f03ecac6 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -1,7 +1,6 @@ use super::{DbIdentifier, DbInstance, InitDbResult}; use futures::lock::{MappedMutexGuard as AsyncMappedMutexGuard, Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; use mm2_core::{mm_ctx::MmArc, DbNamespaceId}; -use primitives::hash::H160; use std::sync::{Arc, Weak}; /// The mapped mutex guard. @@ -14,7 +13,7 @@ pub struct ConstructibleDb { /// It's better to use something like [`Constructible`], but it doesn't provide a method to get the inner value by the mutable reference. mutex: AsyncMutex>, db_namespace: DbNamespaceId, - wallet_rmd160: Option, + pubkey: Option, } impl ConstructibleDb { @@ -22,22 +21,26 @@ impl ConstructibleDb { /// Creates a new uninitialized `Db` instance from other Iguana and/or HD accounts. /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. - pub fn new(ctx: &MmArc) -> Self { + pub fn new(ctx: &MmArc, db_id: Option<&str>) -> Self { + let rmd = hex::encode(ctx.rmd160().as_slice()); + let pubkey = db_id.unwrap_or(&rmd); ConstructibleDb { mutex: AsyncMutex::new(None), db_namespace: ctx.db_namespace, - wallet_rmd160: Some(*ctx.rmd160()), + pubkey: Some(pubkey.to_string()), } } /// Creates a new uninitialized `Db` instance shared between Iguana and all HD accounts /// derived from the same passphrase. /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. - pub fn new_shared_db(ctx: &MmArc) -> Self { + pub fn new_shared_db(ctx: &MmArc, db_id: Option<&str>) -> Self { + let rmd = hex::encode(ctx.shared_db_id().as_slice()); + let pubkey = db_id.unwrap_or(&rmd); ConstructibleDb { mutex: AsyncMutex::new(None), db_namespace: ctx.db_namespace, - wallet_rmd160: Some(*ctx.shared_db_id()), + pubkey: Some(pubkey.to_string()), } } @@ -47,7 +50,7 @@ impl ConstructibleDb { ConstructibleDb { mutex: AsyncMutex::new(None), db_namespace: ctx.db_namespace, - wallet_rmd160: None, + pubkey: None, } } @@ -60,7 +63,7 @@ impl ConstructibleDb { return Ok(unwrap_db_instance(locked_db)); } - let db_id = DbIdentifier::new::(self.db_namespace, self.wallet_rmd160); + let db_id = DbIdentifier::new::(self.db_namespace, self.pubkey.clone()); let db = Db::init(db_id).await?; *locked_db = Some(db); diff --git a/mm2src/mm2_db/src/indexed_db/indexed_cursor.rs b/mm2src/mm2_db/src/indexed_db/indexed_cursor.rs index 74b7c3e89b..ef28c7bea7 100644 --- a/mm2src/mm2_db/src/indexed_db/indexed_cursor.rs +++ b/mm2src/mm2_db/src/indexed_db/indexed_cursor.rs @@ -388,7 +388,7 @@ mod tests { }) .collect(); - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -449,7 +449,7 @@ mod tests { swap_item!("uuid6", "QRC20", "RICK", 2, 2, 721), ]; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -501,7 +501,7 @@ mod tests { swap_item!("uuid6", "KMD", "MORTY", 12, 3124, 214), // + ]; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -558,7 +558,7 @@ mod tests { swap_item!("uuid12", "tBTC", "RICK", 92, 6, 721), ]; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -633,7 +633,7 @@ mod tests { swap_item!("uuid25", "DOGE", "tBTC", 9, 10, 711), ]; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -692,7 +692,7 @@ mod tests { TimestampTable::new(u128::MAX, 2, u64::MAX as u128 + 1), // + ]; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -743,7 +743,7 @@ mod tests { swap_item!("uuid4", "RICK", "MORTY", 8, 6, 92), ]; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -789,7 +789,7 @@ mod tests { const DB_NAME: &str = "TEST_REV_ITER_WITHOUT_CONSTRAINTS"; const DB_VERSION: u32 = 1; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -826,7 +826,7 @@ mod tests { swap_item!("uuid6", "KMD", "MORTY", 12, 3124, 214), // + ]; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -877,7 +877,7 @@ mod tests { swap_item!("uuid6", "KMD", "MORTY", 12, 3124, 214), // + ]; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -930,7 +930,7 @@ mod tests { swap_item!("uuid6", "KMD", "MORTY", 12, 3124, 214), // + ]; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -978,7 +978,7 @@ mod tests { swap_item!("uuid6", "KMD", "MORTY", 12, 3124, 214), // + ]; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -1021,7 +1021,7 @@ mod tests { swap_item!("uuid6", "KMD", "MORTY", 12, 3124, 214), // + ]; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -1065,7 +1065,7 @@ mod tests { swap_item!("uuid6", "KMD", "MORTY", 12, 3124, 214), // + ]; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -1111,7 +1111,7 @@ mod tests { swap_item!("uuid6", "KMD", "MORTY", 12, 3124, 214), // + ]; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -1158,7 +1158,7 @@ mod tests { swap_item!("uuid3", "RICK", "FTM", 12, 3124, 214), ]; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() diff --git a/mm2src/mm2_db/src/indexed_db/indexed_db.rs b/mm2src/mm2_db/src/indexed_db/indexed_db.rs index d6e8ab15d4..e292f1f475 100644 --- a/mm2src/mm2_db/src/indexed_db/indexed_db.rs +++ b/mm2src/mm2_db/src/indexed_db/indexed_db.rs @@ -81,39 +81,37 @@ pub trait DbInstance: Sized { } #[derive(Clone, Display)] -#[display(fmt = "{}::{}::{}", namespace_id, "self.display_rmd160()", db_name)] +#[display(fmt = "{}::{}::{}", namespace_id, "self.display_pubkey()", db_name)] pub struct DbIdentifier { namespace_id: DbNamespaceId, - /// The `RIPEMD160(SHA256(x))` where x is secp256k1 pubkey derived from passphrase. - /// This value is used to distinguish different databases corresponding to user's different seed phrases. - wallet_rmd160: Option, + /// The pubkey derived from passphrase or coin. + /// This value is used to distinguish different databases corresponding to user's coin activation pubkey or seedphrase. + pubkey: Option, db_name: &'static str, } impl DbIdentifier { pub fn db_name(&self) -> &'static str { self.db_name } - pub fn new(namespace_id: DbNamespaceId, wallet_rmd160: Option) -> DbIdentifier { + pub fn new(namespace_id: DbNamespaceId, pubkey: Option) -> DbIdentifier { + let pubkey = Some(pubkey.unwrap_or_else(|| hex::encode(H160::default().as_slice()))); DbIdentifier { namespace_id, - wallet_rmd160, + pubkey, db_name: Db::DB_NAME, } } - pub fn for_test(db_name: &'static str) -> DbIdentifier { + pub fn for_test(db_name: &'static str, pubkey: Option) -> DbIdentifier { + let pubkey = Some(pubkey.unwrap_or_else(|| hex::encode(H160::default().as_slice()))); DbIdentifier { namespace_id: DbNamespaceId::for_test(), - wallet_rmd160: Some(H160::default()), + pubkey, db_name, } } - pub fn display_rmd160(&self) -> String { - self.wallet_rmd160 - .map(hex::encode) - .unwrap_or_else(|| "KOMODEFI".to_string()) - } + pub fn display_pubkey(&self) -> String { self.pubkey.clone().unwrap_or_else(|| "KOMODEFI".to_string()) } } pub struct IndexedDbBuilder { @@ -978,7 +976,7 @@ mod tests { register_wasm_log(); - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -1048,7 +1046,7 @@ mod tests { register_wasm_log(); - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -1108,7 +1106,7 @@ mod tests { register_wasm_log(); - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -1161,7 +1159,7 @@ mod tests { register_wasm_log(); - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -1238,7 +1236,7 @@ mod tests { register_wasm_log(); - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -1280,7 +1278,7 @@ mod tests { register_wasm_log(); - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -1372,7 +1370,7 @@ mod tests { register_wasm_log(); - let db_identifier = DbIdentifier::for_test(DB_NAME); + let db_identifier = DbIdentifier::for_test(DB_NAME, None); init_and_check(db_identifier.clone(), 1, Some((0, 1))).await.unwrap(); init_and_check(db_identifier.clone(), 2, Some((1, 2))).await.unwrap(); @@ -1386,7 +1384,7 @@ mod tests { const DB_VERSION: u32 = 1; register_wasm_log(); - let db_identifier = DbIdentifier::for_test(DB_NAME); + let db_identifier = DbIdentifier::for_test(DB_NAME, None); let _db = IndexedDbBuilder::new(db_identifier.clone()) .with_version(DB_VERSION) @@ -1414,7 +1412,7 @@ mod tests { const DB_VERSION: u32 = 1; register_wasm_log(); - let db_identifier = DbIdentifier::for_test(DB_NAME); + let db_identifier = DbIdentifier::for_test(DB_NAME, None); let db = IndexedDbBuilder::new(db_identifier.clone()) .with_version(DB_VERSION) @@ -1475,7 +1473,7 @@ mod tests { some_data: "Some data 2".to_owned(), }; - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() @@ -1524,7 +1522,7 @@ mod tests { register_wasm_log(); - let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME)) + let db = IndexedDbBuilder::new(DbIdentifier::for_test(DB_NAME, None)) .with_version(DB_VERSION) .with_table::() .build() diff --git a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs index 1f05370c29..d4b55a15b2 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs @@ -63,7 +63,7 @@ pub(crate) struct WasmAccountStorage { impl WasmAccountStorage { pub fn new(ctx: &MmArc) -> Self { WasmAccountStorage { - account_db: ConstructibleDb::new_shared_db(ctx).into_shared(), + account_db: ConstructibleDb::new_shared_db(ctx, None).into_shared(), } } diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 7aa7958ebb..7f7714d3c5 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -504,7 +504,8 @@ pub async fn lp_init(ctx: MmArc, version: String, datetime: String) -> MmInitRes #[cfg(not(target_arch = "wasm32"))] { - let dbdir = ctx.dbdir(); + // Todo: Handle properly + let dbdir = ctx.dbdir(None); fs::create_dir_all(&dbdir).map_to_mm(|e| MmInitError::ErrorCreatingDbDir { path: dbdir.clone(), error: e.to_string(), diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index ba57cfdf69..edf55206f0 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -2767,7 +2767,7 @@ pub fn init_ordermatch_context(ctx: &MmArc) -> OrdermatchInitResult<()> { orderbook_tickers, original_tickers, #[cfg(target_arch = "wasm32")] - ordermatch_db: ConstructibleDb::new(ctx), + ordermatch_db: ConstructibleDb::new(ctx, None), }; from_ctx(&ctx.ordermatch_ctx, move || Ok(ordermatch_context)) diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 1914b27fd2..af523d105d 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -542,7 +542,7 @@ impl SwapsContext { ))), locked_amounts: Mutex::new(HashMap::new()), #[cfg(target_arch = "wasm32")] - swap_db: ConstructibleDb::new(ctx), + swap_db: ConstructibleDb::new(ctx, None), }) }))) } @@ -997,9 +997,7 @@ pub struct TransactionIdentifier { } #[cfg(not(target_arch = "wasm32"))] -pub fn my_swaps_dir(ctx: &MmArc, db_id: Option<&str>) -> PathBuf { - ctx.dbdir(db_id).join("SWAPS").join("MY") -} +pub fn my_swaps_dir(ctx: &MmArc, db_id: Option<&str>) -> PathBuf { ctx.dbdir(db_id).join("SWAPS").join("MY") } #[cfg(not(target_arch = "wasm32"))] pub fn my_swap_file_path(ctx: &MmArc, db_id: Option<&str>, uuid: &Uuid) -> PathBuf { @@ -1364,8 +1362,8 @@ pub async fn swap_kick_starts(ctx: MmArc) -> Result, String> { let mut coins = HashSet::new(); let legacy_unfinished_uuids = try_s!(get_unfinished_swaps_uuids(ctx.clone(), LEGACY_SWAP_TYPE).await); for uuid in legacy_unfinished_uuids { - let db_id = todo!(); - let swap = match SavedSwap::load_my_swap_from_db(&ctx, db_id, uuid).await { + // Todo db_id + let swap = match SavedSwap::load_my_swap_from_db(&ctx, None, uuid).await { Ok(Some(s)) => s, Ok(None) => { warn!("Swap {} is indexed, but doesn't exist in DB", uuid); diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index 4f42476e2e..a3285d6c52 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -78,10 +78,7 @@ pub const MAKER_PAYMENT_SENT_LOG: &str = "Maker payment sent"; #[cfg(not(target_arch = "wasm32"))] pub fn stats_maker_swap_dir(ctx: &MmArc, db_id: Option<&str>) -> PathBuf { - ctx.dbdir(db_id) - .join("SWAPS") - .join("STATS") - .join("MAKER") + ctx.dbdir(db_id).join("SWAPS").join("STATS").join("MAKER") } #[cfg(not(target_arch = "wasm32"))] @@ -1300,8 +1297,8 @@ impl MakerSwap { taker_coin: MmCoinEnum, swap_uuid: &Uuid, ) -> Result<(Self, Option), String> { - let account_key = todo!(); - let saved = match SavedSwap::load_my_swap_from_db(&ctx, account_key, *swap_uuid).await { + let account_key = maker_coin.account_db_id(); + let saved = match SavedSwap::load_my_swap_from_db(&ctx, account_key.as_deref(), *swap_uuid).await { Ok(Some(saved)) => saved, Ok(None) => return ERR!("Couldn't find a swap with the uuid '{}'", swap_uuid), Err(e) => return ERR!("{}", e), @@ -2100,6 +2097,7 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); swap_ctx.init_msg_store(running_swap.uuid, running_swap.taker); swap_ctx.running_swaps.lock().unwrap().push(weak_ref); + let mut swap_fut = Box::pin( async move { let mut events; @@ -2119,8 +2117,8 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { .dispatch_async(ctx.clone(), LpEvents::MakerSwapStatusChanged(event_to_send)) .await; drop(dispatcher); - let account_key = todo!(); - save_my_maker_swap_event(&ctx, account_key, &running_swap, to_save) + let account_key = running_swap.maker_coin.account_db_id().clone(); + save_my_maker_swap_event(&ctx, account_key.as_deref(), &running_swap, to_save) .await .expect("!save_my_maker_swap_event"); if event.should_ban_taker() { diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index 1770eaf1ee..0af2c39b3e 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -237,7 +237,10 @@ mod native_impl { Ok(read_json(&path).await?) } - async fn load_all_from_maker_stats_db(ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult> { + async fn load_all_from_maker_stats_db( + ctx: &MmArc, + db_id: Option<&str>, + ) -> SavedSwapResult> { let path = stats_maker_swap_dir(ctx, db_id); Ok(read_dir_json(&path).await?) } @@ -251,7 +254,10 @@ mod native_impl { Ok(read_json(&path).await?) } - async fn load_all_from_taker_stats_db(ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult> { + async fn load_all_from_taker_stats_db( + ctx: &MmArc, + db_id: Option<&str>, + ) -> SavedSwapResult> { let path = stats_taker_swap_dir(ctx, db_id); Ok(read_dir_json(&path).await?) } @@ -394,7 +400,11 @@ mod wasm_impl { #[async_trait] impl SavedSwapIo for SavedSwap { - async fn load_my_swap_from_db(ctx: &MmArc, account_key: &str, uuid: Uuid) -> SavedSwapResult> { + async fn load_my_swap_from_db( + ctx: &MmArc, + _db_id: Option<&str>, + uuid: Uuid, + ) -> SavedSwapResult> { let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; @@ -408,7 +418,7 @@ mod wasm_impl { json::from_value(saved_swap_json).map_to_mm(|e| SavedSwapError::ErrorDeserializing(e.to_string())) } - async fn load_all_my_swaps_from_db(ctx: &MmArc, account_key: &str) -> SavedSwapResult> { + async fn load_all_my_swaps_from_db(ctx: &MmArc, _db_id: Option<&str>) -> SavedSwapResult> { let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; @@ -423,7 +433,7 @@ mod wasm_impl { .collect() } - async fn save_to_db(&self, ctx: &MmArc, account_key: &str) -> SavedSwapResult<()> { + async fn save_to_db(&self, ctx: &MmArc, _db_id: Option<&str>) -> SavedSwapResult<()> { let saved_swap = json::to_value(self).map_to_mm(|e| SavedSwapError::ErrorSerializing(e.to_string()))?; let saved_swap_item = SavedSwapTable { uuid: *self.uuid(), @@ -476,7 +486,8 @@ mod tests { saved_swap: json::to_value(&saved_swap).unwrap(), }; - saved_swap.save_to_db(&ctx).await.expect("!save_to_db"); + let accound_id = None; + saved_swap.save_to_db(&ctx, accound_id).await.expect("!save_to_db"); let first_item_id = { let items = get_all_items(&ctx).await; @@ -494,7 +505,7 @@ mod tests { }; assert_ne!(first_saved_item, second_saved_item); - saved_swap.save_to_db(&ctx).await.expect("!save_to_db"); + saved_swap.save_to_db(&ctx, accound_id).await.expect("!save_to_db"); { let items = get_all_items(&ctx).await; @@ -504,7 +515,7 @@ mod tests { assert_eq!(item, second_saved_item); } - let actual_saved_swap = SavedSwap::load_my_swap_from_db(&ctx, *saved_swap.uuid()) + let actual_saved_swap = SavedSwap::load_my_swap_from_db(&ctx, accound_id, *saved_swap.uuid()) .await .expect("!load_from_db") .expect("Swap not found"); @@ -539,9 +550,10 @@ mod tests { async fn test_migrate_swaps_data() { let ctx = MmCtxBuilder::new().with_test_db_namespace().into_mm_arc(); + let account_id = None; let saved_swap_str = r#"{"type":"Maker","error_events":["StartFailed","NegotiateFailed","TakerFeeValidateFailed","MakerPaymentTransactionFailed","MakerPaymentDataSendFailed","TakerPaymentValidateFailed","TakerPaymentSpendFailed","TakerPaymentSpendConfirmFailed","MakerPaymentRefunded","MakerPaymentRefundFailed"],"events":[{"event":{"data":{"lock_duration":7800,"maker_amount":"3.54932734","maker_coin":"KMD","maker_coin_start_block":1452970,"maker_payment_confirmations":1,"maker_payment_lock":1563759539,"my_persistent_pub":"031bb83b58ec130e28e0a6d5d2acf2eb01b0d3f1670e021d47d31db8a858219da8","secret":"e1c9bd12a83f810813dc078ac398069b63d56bf1e94657def995c43cd1975302","started_at":1563743939,"taker":"101ace6b08605b9424b0582b5cce044b70a3c8d8d10cb2965e039b0967ae92b9","taker_amount":"0.02004833998671660000000000","taker_coin":"ETH","taker_coin_start_block":8196380,"taker_payment_confirmations":1,"uuid":"3447b727-fe93-4357-8e5a-8cf2699b7e86"},"type":"Started"},"timestamp":1563743939211},{"event":{"data":{"taker_payment_locktime":1563751737,"taker_pubkey":"03101ace6b08605b9424b0582b5cce044b70a3c8d8d10cb2965e039b0967ae92b9"},"type":"Negotiated"},"timestamp":1563743979835},{"event":{"data":{"tx_hash":"a59203eb2328827de00bed699a29389792906e4f39fdea145eb40dc6b3821bd6","tx_hex":"f8690284ee6b280082520894d8997941dd1346e9231118d5685d866294f59e5b865af3107a4000801ca0743d2b7c9fad65805d882179062012261be328d7628ae12ee08eff8d7657d993a07eecbd051f49d35279416778faa4664962726d516ce65e18755c9b9406a9c2fd"},"type":"TakerFeeValidated"},"timestamp":1563744052878},{"event":{"data":{"error":"lp_swap:1888] eth:654] RPC error: Error { code: ServerError(-32010), message: \"Transaction with the same hash was already imported.\", data: None }"},"type":"MakerPaymentTransactionFailed"},"timestamp":1563744118577},{"event":{"type":"Finished"},"timestamp":1563763243350}],"success_events":["Started","Negotiated","TakerFeeValidated","MakerPaymentSent","TakerPaymentReceived","TakerPaymentWaitConfirmStarted","TakerPaymentValidatedAndConfirmed","TakerPaymentSpent","TakerPaymentSpendConfirmStarted","TakerPaymentSpendConfirmed","TakerPaymentSpendConfirmStarted","TakerPaymentSpendConfirmed","Finished"],"uuid":"3447b727-fe93-4357-8e5a-8cf2699b7e86"}"#; let saved_swap: SavedSwap = json::from_str(saved_swap_str).unwrap(); - saved_swap.save_to_db(&ctx).await.expect("!save_to_db"); + saved_swap.save_to_db(&ctx, account_id).await.expect("!save_to_db"); let swaps_ctx = SwapsContext::from_ctx(&ctx).unwrap(); { diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 645b874aa7..ca7ab51302 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -466,8 +466,8 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { event: event.clone(), }; - let account_key = todo!(); - save_my_taker_swap_event(&ctx, account_key, &running_swap, to_save) + let account_key = Some(running_swap.my_persistent_pub.to_string()); + save_my_taker_swap_event(&ctx, account_key.as_deref(), &running_swap, to_save) .await .expect("!save_my_taker_swap_event"); if event.should_ban_maker() { @@ -1957,9 +1957,9 @@ impl TakerSwap { taker_coin: MmCoinEnum, swap_uuid: &Uuid, ) -> Result<(Self, Option), String> { - let account_key = todo!(); + let account_key = taker_coin.account_db_id(); - let saved = match SavedSwap::load_my_swap_from_db(&ctx, account_key, *swap_uuid).await { + let saved = match SavedSwap::load_my_swap_from_db(&ctx, account_key.as_deref(), *swap_uuid).await { Ok(Some(saved)) => saved, Ok(None) => return ERR!("Couldn't find a swap with the uuid '{}'", swap_uuid), Err(e) => return ERR!("{}", e), From eff374c149d728a666630a5aa07b58fe934c1542 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 9 Apr 2024 23:50:56 +0100 Subject: [PATCH 005/186] Z coin or default pubkey for initializing blocksdb --- mm2src/coins/z_coin.rs | 7 ++++--- .../storage/blockdb/blockdb_idb_storage.rs | 4 ++-- .../storage/blockdb/blockdb_sql_storage.rs | 9 +++++++-- mm2src/coins/z_coin/storage/blockdb/mod.rs | 6 +++--- .../coins/z_coin/storage/walletdb/wasm/mod.rs | 18 +++++++++--------- 5 files changed, 25 insertions(+), 19 deletions(-) diff --git a/mm2src/coins/z_coin.rs b/mm2src/coins/z_coin.rs index 2f526b48a2..bbece49cc6 100644 --- a/mm2src/coins/z_coin.rs +++ b/mm2src/coins/z_coin.rs @@ -889,7 +889,8 @@ impl<'a> UtxoCoinBuilder for ZCoinBuilder<'a> { &my_z_addr, ); - let blocks_db = self.init_blocks_db().await?; + let db_id = Some(my_z_addr_encoded.clone()); + let blocks_db = self.init_blocks_db(db_id.as_deref()).await?; let (z_balance_event_sender, z_balance_event_handler) = if self.ctx.event_stream_configuration.is_some() { let (sender, receiver) = futures::channel::mpsc::unbounded(); (Some(sender), Some(Arc::new(AsyncMutex::new(receiver)))) @@ -997,12 +998,12 @@ impl<'a> ZCoinBuilder<'a> { } } - async fn init_blocks_db(&self) -> Result> { + async fn init_blocks_db(&self, db_id: Option<&str>) -> Result> { let cache_db_path = self.db_dir_path.join(format!("{}_cache.db", self.ticker)); let ctx = self.ctx.clone(); let ticker = self.ticker.to_string(); - BlockDbImpl::new(&ctx, ticker, cache_db_path) + BlockDbImpl::new(&ctx, ticker, cache_db_path, db_id) .map_err(|err| MmError::new(ZcoinClientInitError::ZcoinStorageError(err.to_string()))) .await } diff --git a/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs b/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs index a057fea80d..1265e5b6b1 100644 --- a/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs +++ b/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs @@ -67,9 +67,9 @@ impl BlockDbInner { } impl BlockDbImpl { - pub async fn new(ctx: &MmArc, ticker: String, _path: PathBuf) -> ZcoinStorageRes { + pub async fn new(ctx: &MmArc, ticker: String, _path: PathBuf, db_id: Option<&str>) -> ZcoinStorageRes { Ok(Self { - db: ConstructibleDb::new(ctx, None).into_shared(), + db: ConstructibleDb::new(ctx, db_id).into_shared(), ticker, }) } diff --git a/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs b/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs index e8ce70d6dd..2de560c3c5 100644 --- a/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs +++ b/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs @@ -45,7 +45,7 @@ impl From> for ZcoinStorageError { impl BlockDbImpl { #[cfg(all(not(test)))] - pub async fn new(_ctx: &MmArc, ticker: String, path: PathBuf) -> ZcoinStorageRes { + pub async fn new(_ctx: &MmArc, ticker: String, path: PathBuf, _db_id: Option<&str>) -> ZcoinStorageRes { async_blocking(move || { let conn = Connection::open(path).map_to_mm(|err| ZcoinStorageError::DbError(err.to_string()))?; let conn = Arc::new(Mutex::new(conn)); @@ -68,7 +68,12 @@ impl BlockDbImpl { } #[cfg(all(test))] - pub(crate) async fn new(ctx: &MmArc, ticker: String, _path: PathBuf) -> ZcoinStorageRes { + pub(crate) async fn new( + ctx: &MmArc, + ticker: String, + _path: PathBuf, + _db_id: Option<&str>, + ) -> ZcoinStorageRes { let ctx = ctx.clone(); async_blocking(move || { let conn = ctx diff --git a/mm2src/coins/z_coin/storage/blockdb/mod.rs b/mm2src/coins/z_coin/storage/blockdb/mod.rs index 7e2ef49fe7..e79043d207 100644 --- a/mm2src/coins/z_coin/storage/blockdb/mod.rs +++ b/mm2src/coins/z_coin/storage/blockdb/mod.rs @@ -38,7 +38,7 @@ mod block_db_storage_tests { pub(crate) async fn test_insert_block_and_get_latest_block_impl() { let ctx = mm_ctx_with_custom_db(); - let db = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new()) + let db = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new(), None) .await .unwrap(); // insert block @@ -53,7 +53,7 @@ mod block_db_storage_tests { pub(crate) async fn test_rewind_to_height_impl() { let ctx = mm_ctx_with_custom_db(); - let db = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new()) + let db = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new(), None) .await .unwrap(); // insert block @@ -78,7 +78,7 @@ mod block_db_storage_tests { #[allow(unused)] pub(crate) async fn test_process_blocks_with_mode_impl() { let ctx = mm_ctx_with_custom_db(); - let db = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new()) + let db = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new(), None) .await .unwrap(); // insert block diff --git a/mm2src/coins/z_coin/storage/walletdb/wasm/mod.rs b/mm2src/coins/z_coin/storage/walletdb/wasm/mod.rs index ff35564385..f1f2bd6cde 100644 --- a/mm2src/coins/z_coin/storage/walletdb/wasm/mod.rs +++ b/mm2src/coins/z_coin/storage/walletdb/wasm/mod.rs @@ -205,7 +205,7 @@ mod wasm_test { async fn test_valid_chain_state() { // init blocks_db let ctx = mm_ctx_with_custom_db(); - let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new()) + let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new(), None) .await .unwrap(); @@ -313,7 +313,7 @@ mod wasm_test { async fn invalid_chain_cache_disconnected() { // init blocks_db let ctx = mm_ctx_with_custom_db(); - let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new()) + let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new(), None) .await .unwrap(); @@ -402,7 +402,7 @@ mod wasm_test { async fn test_invalid_chain_reorg() { // init blocks_db let ctx = mm_ctx_with_custom_db(); - let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new()) + let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new(), None) .await .unwrap(); @@ -491,7 +491,7 @@ mod wasm_test { async fn test_data_db_rewinding() { // init blocks_db let ctx = mm_ctx_with_custom_db(); - let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new()) + let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new(), None) .await .unwrap(); @@ -557,7 +557,7 @@ mod wasm_test { async fn test_scan_cached_blocks_requires_sequential_blocks() { // init blocks_db let ctx = mm_ctx_with_custom_db(); - let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new()) + let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new(), None) .await .unwrap(); @@ -625,7 +625,7 @@ mod wasm_test { async fn test_scan_cached_blokcs_finds_received_notes() { // init blocks_db let ctx = mm_ctx_with_custom_db(); - let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new()) + let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new(), None) .await .unwrap(); @@ -678,7 +678,7 @@ mod wasm_test { async fn test_scan_cached_blocks_finds_change_notes() { // init blocks_db let ctx = mm_ctx_with_custom_db(); - let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new()) + let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new(), None) .await .unwrap(); @@ -744,7 +744,7 @@ mod wasm_test { // async fn create_to_address_fails_on_unverified_notes() { // // init blocks_db // let ctx = mm_ctx_with_custom_db(); - // let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new()).await.unwrap(); + // let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new(), None).await.unwrap(); // // // init walletdb. // let mut walletdb = wallet_db_from_zcoin_builder_for_test(&ctx, TICKER).await; @@ -1013,7 +1013,7 @@ mod wasm_test { // // // init blocks_db // let ctx = mm_ctx_with_custom_db(); - // let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new()).await.unwrap(); + // let blockdb = BlockDbImpl::new(&ctx, TICKER.to_string(), PathBuf::new(), None).await.unwrap(); // // // init walletdb. // let mut walletdb = wallet_db_from_zcoin_builder_for_test(&ctx, TICKER).await; From fef8a4ecc0782f2859aa42bb450249d2cf7d3b12 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 10 Apr 2024 15:01:24 +0100 Subject: [PATCH 006/186] zcoin - allow db_id for walletdb --- mm2src/coins/z_coin.rs | 5 +++-- .../z_coin/storage/walletdb/wallet_sql_storage.rs | 3 +++ mm2src/coins/z_coin/storage/walletdb/wasm/mod.rs | 4 +++- .../coins/z_coin/storage/walletdb/wasm/storage.rs | 6 ++++-- mm2src/coins/z_coin/z_rpc.rs | 14 +++++++++++--- 5 files changed, 24 insertions(+), 8 deletions(-) diff --git a/mm2src/coins/z_coin.rs b/mm2src/coins/z_coin.rs index bbece49cc6..43273a07a2 100644 --- a/mm2src/coins/z_coin.rs +++ b/mm2src/coins/z_coin.rs @@ -889,8 +889,9 @@ impl<'a> UtxoCoinBuilder for ZCoinBuilder<'a> { &my_z_addr, ); - let db_id = Some(my_z_addr_encoded.clone()); - let blocks_db = self.init_blocks_db(db_id.as_deref()).await?; + // TODO: db_id + let blocks_db = self.init_blocks_db(None).await?; + let (z_balance_event_sender, z_balance_event_handler) = if self.ctx.event_stream_configuration.is_some() { let (sender, receiver) = futures::channel::mpsc::unbounded(); (Some(sender), Some(Arc::new(AsyncMutex::new(receiver)))) diff --git a/mm2src/coins/z_coin/storage/walletdb/wallet_sql_storage.rs b/mm2src/coins/z_coin/storage/walletdb/wallet_sql_storage.rs index 3a957d375f..4cd1c95187 100644 --- a/mm2src/coins/z_coin/storage/walletdb/wallet_sql_storage.rs +++ b/mm2src/coins/z_coin/storage/walletdb/wallet_sql_storage.rs @@ -23,6 +23,7 @@ pub async fn create_wallet_db( checkpoint_block: Option, evk: ExtendedFullViewingKey, continue_from_prev_sync: bool, + _db_id: Option<&str>, ) -> Result, MmError> { let db = async_blocking(move || { WalletDbAsync::for_path(wallet_db_path, consensus_params) @@ -83,6 +84,7 @@ impl<'a> WalletDbShared { checkpoint_block: Option, z_spending_key: &ExtendedSpendingKey, continue_from_prev_sync: bool, + db_id: Option<&str>, ) -> ZcoinStorageRes { let ticker = builder.ticker; let consensus_params = builder.protocol_info.consensus_params.clone(); @@ -92,6 +94,7 @@ impl<'a> WalletDbShared { checkpoint_block, ExtendedFullViewingKey::from(z_spending_key), continue_from_prev_sync, + db_id, ) .await .map_err(|err| ZcoinStorageError::InitDbError { diff --git a/mm2src/coins/z_coin/storage/walletdb/wasm/mod.rs b/mm2src/coins/z_coin/storage/walletdb/wasm/mod.rs index f1f2bd6cde..6b2a2275dd 100644 --- a/mm2src/coins/z_coin/storage/walletdb/wasm/mod.rs +++ b/mm2src/coins/z_coin/storage/walletdb/wasm/mod.rs @@ -127,7 +127,9 @@ mod wasm_test { } async fn wallet_db_from_zcoin_builder_for_test(ctx: &MmArc, ticker: &str) -> WalletIndexedDb { - WalletIndexedDb::new(ctx, ticker, consensus_params()).await.unwrap() + WalletIndexedDb::new(ctx, ticker, consensus_params(), None) + .await + .unwrap() } #[wasm_bindgen_test] diff --git a/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs b/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs index ca6cfc40f1..1811b1e790 100644 --- a/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs +++ b/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs @@ -56,10 +56,11 @@ impl<'a> WalletDbShared { checkpoint_block: Option, z_spending_key: &ExtendedSpendingKey, continue_from_prev_sync: bool, + db_id: Option<&str>, ) -> ZcoinStorageRes { let ticker = builder.ticker; let consensus_params = builder.protocol_info.consensus_params.clone(); - let db = WalletIndexedDb::new(builder.ctx, ticker, consensus_params).await?; + let db = WalletIndexedDb::new(builder.ctx, ticker, consensus_params, db_id).await?; let extrema = db.block_height_extrema().await?; let get_evk = db.get_extended_full_viewing_keys().await?; let evk = ExtendedFullViewingKey::from(z_spending_key); @@ -136,9 +137,10 @@ impl<'a> WalletIndexedDb { ctx: &MmArc, ticker: &str, consensus_params: ZcoinConsensusParams, + db_id: Option<&str>, ) -> MmResult { let db = Self { - db: ConstructibleDb::new(ctx, None).into_shared(), + db: ConstructibleDb::new(ctx, db_id).into_shared(), ticker: ticker.to_string(), params: consensus_params, }; diff --git a/mm2src/coins/z_coin/z_rpc.rs b/mm2src/coins/z_coin/z_rpc.rs index 8807e6bb82..8c8ee4347a 100644 --- a/mm2src/coins/z_coin/z_rpc.rs +++ b/mm2src/coins/z_coin/z_rpc.rs @@ -541,8 +541,15 @@ pub(super) async fn init_light_client<'a>( // check if no sync_params was provided and continue syncing from last height in db if it's > 0 or skip_sync_params is true. let continue_from_prev_sync = (min_height > 0 && sync_params.is_none()) || (skip_sync_params && min_height < sapling_activation_height); - let wallet_db = - WalletDbShared::new(builder, maybe_checkpoint_block, z_spending_key, continue_from_prev_sync).await?; + // TODO: db_id + let wallet_db = WalletDbShared::new( + builder, + maybe_checkpoint_block, + z_spending_key, + continue_from_prev_sync, + None, + ) + .await?; // Check min_height in blocks_db and rewind blocks_db to 0 if sync_height != min_height if !continue_from_prev_sync && (sync_height != min_height) { // let user know we're clearing cache and re-syncing from new provided height. @@ -601,7 +608,8 @@ pub(super) async fn init_native_client<'a>( is_pre_sapling: false, actual: checkpoint_height, }; - let wallet_db = WalletDbShared::new(builder, checkpoint_block, z_spending_key, true) + // TODO: db_id + let wallet_db = WalletDbShared::new(builder, checkpoint_block, z_spending_key, true, None) .await .mm_err(|err| ZcoinClientInitError::ZcoinStorageError(err.to_string()))?; From 624776603749487976185571552dbe924faf6e73 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 10 Apr 2024 16:07:28 +0100 Subject: [PATCH 007/186] =?UTF-8?q?wip=20=E2=80=94=20=20SwapsContext?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- mm2src/mm2_main/src/database.rs | 4 ++- mm2src/mm2_main/src/database/my_swaps.rs | 13 +++++-- mm2src/mm2_main/src/database/stats_swaps.rs | 13 +++++-- mm2src/mm2_main/src/lp_swap.rs | 34 ++++++++++++------- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 2 +- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 13 ++++--- .../mm2_main/src/lp_swap/my_swaps_storage.rs | 6 ++-- mm2src/mm2_main/src/lp_swap/pubkey_banning.rs | 15 +++++--- mm2src/mm2_main/src/lp_swap/saved_swap.rs | 26 +++++++------- mm2src/mm2_main/src/lp_swap/swap_lock.rs | 12 ++++--- mm2src/mm2_main/src/lp_swap/swap_v2_common.rs | 22 +++++++----- mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs | 9 +++-- mm2src/mm2_main/src/lp_swap/swap_watcher.rs | 3 +- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 2 +- mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 12 ++++--- 15 files changed, 121 insertions(+), 65 deletions(-) diff --git a/mm2src/mm2_main/src/database.rs b/mm2src/mm2_main/src/database.rs index cdaa5f94cc..4ffe035546 100644 --- a/mm2src/mm2_main/src/database.rs +++ b/mm2src/mm2_main/src/database.rs @@ -72,7 +72,9 @@ fn clean_db(ctx: &MmArc) { } } -async fn migration_1(ctx: &MmArc) -> Vec<(&'static str, Vec)> { fill_my_swaps_from_json_statements(ctx, None).await } +async fn migration_1(ctx: &MmArc) -> Vec<(&'static str, Vec)> { + fill_my_swaps_from_json_statements(ctx, None).await +} async fn migration_2(ctx: &MmArc) -> Vec<(&'static str, Vec)> { create_and_fill_stats_swaps_from_json_statements(ctx, None).await diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index 612f766f75..fc0a1f94eb 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -129,7 +129,9 @@ pub fn insert_new_swap_v2(ctx: &MmArc, params: &[(&str, &dyn ToSql)]) -> SqlResu /// Returns SQL statements to initially fill my_swaps table using existing DB with JSON files /// Use this only in migration code! pub async fn fill_my_swaps_from_json_statements(ctx: &MmArc, db_id: Option<&str>) -> Vec<(&'static str, Vec)> { - let swaps = SavedSwap::load_all_my_swaps_from_db(ctx, db_id).await.unwrap_or_default(); + let swaps = SavedSwap::load_all_my_swaps_from_db(ctx, db_id) + .await + .unwrap_or_default(); swaps .into_iter() .filter_map(insert_saved_swap_sql_migration_1) @@ -343,8 +345,13 @@ WHERE uuid = :uuid; "#; /// Returns SQL statements to set is_finished to 1 for completed legacy swaps -pub async fn set_is_finished_for_legacy_swaps_statements(ctx: &MmArc, db_id: Option<&str>) -> Vec<(&'static str, Vec)> { - let swaps = SavedSwap::load_all_my_swaps_from_db(ctx, db_id).await.unwrap_or_default(); +pub async fn set_is_finished_for_legacy_swaps_statements( + ctx: &MmArc, + db_id: Option<&str>, +) -> Vec<(&'static str, Vec)> { + let swaps = SavedSwap::load_all_my_swaps_from_db(ctx, db_id) + .await + .unwrap_or_default(); swaps .into_iter() .filter_map(|swap| { diff --git a/mm2src/mm2_main/src/database/stats_swaps.rs b/mm2src/mm2_main/src/database/stats_swaps.rs index cca86463a3..e800a3cb0e 100644 --- a/mm2src/mm2_main/src/database/stats_swaps.rs +++ b/mm2src/mm2_main/src/database/stats_swaps.rs @@ -97,9 +97,16 @@ pub const ADD_MAKER_TAKER_GUI_AND_VERSION: &[&str] = &[ pub const SELECT_ID_BY_UUID: &str = "SELECT id FROM stats_swaps WHERE uuid = ?1"; /// Returns SQL statements to initially fill stats_swaps table using existing DB with JSON files -pub async fn create_and_fill_stats_swaps_from_json_statements(ctx: &MmArc, db_id: Option<&str>) -> Vec<(&'static str, Vec)> { - let maker_swaps = SavedSwap::load_all_from_maker_stats_db(ctx, db_id).await.unwrap_or_default(); - let taker_swaps = SavedSwap::load_all_from_taker_stats_db(ctx, db_id).await.unwrap_or_default(); +pub async fn create_and_fill_stats_swaps_from_json_statements( + ctx: &MmArc, + db_id: Option<&str>, +) -> Vec<(&'static str, Vec)> { + let maker_swaps = SavedSwap::load_all_from_maker_stats_db(ctx, db_id) + .await + .unwrap_or_default(); + let taker_swaps = SavedSwap::load_all_from_taker_stats_db(ctx, db_id) + .await + .unwrap_or_default(); let mut result = vec![(CREATE_STATS_SWAPS_TABLE, vec![])]; let mut inserted_maker_uuids = HashSet::with_capacity(maker_swaps.len()); diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index af523d105d..75dba0ab7f 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -359,7 +359,8 @@ pub async fn process_swap_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PRequest }; debug!("Processing swap msg {:?} for uuid {}", msg, uuid); - let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); + let db_dir = None; + let swap_ctx = SwapsContext::from_ctx(&ctx, db_dir).unwrap(); let mut msgs = swap_ctx.swap_msgs.lock().unwrap(); if let Some(msg_store) = msgs.get_mut(&uuid) { if msg_store.accept_only_from.bytes == msg.2.unprefixed() { @@ -402,7 +403,8 @@ async fn recv_swap_msg( let wait_until = started + timeout; loop { Timer::sleep(1.).await; - let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); + // TODO: db_id + let swap_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); let mut msgs = swap_ctx.swap_msgs.lock().unwrap(); if let Some(msg_store) = msgs.get_mut(uuid) { if let Some(msg) = getter(msg_store) { @@ -529,7 +531,8 @@ struct SwapsContext { impl SwapsContext { /// Obtains a reference to this crate context, creating it if necessary. - fn from_ctx(ctx: &MmArc) -> Result, String> { + #[allow(unused_variables)] + fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { Ok(try_s!(from_ctx(&ctx.swaps_ctx, move || { Ok(SwapsContext { running_swaps: Mutex::new(vec![]), @@ -542,7 +545,7 @@ impl SwapsContext { ))), locked_amounts: Mutex::new(HashMap::new()), #[cfg(target_arch = "wasm32")] - swap_db: ConstructibleDb::new(ctx, None), + swap_db: ConstructibleDb::new(ctx, db_id), }) }))) } @@ -614,7 +617,8 @@ pub async fn get_locked_amount_rpc( /// Get total amount of selected coin locked by all currently ongoing swaps pub fn get_locked_amount(ctx: &MmArc, coin: &str) -> MmNumber { - let swap_ctx = SwapsContext::from_ctx(ctx).unwrap(); + // TODO: db_id + let swap_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); let swap_lock = swap_ctx.running_swaps.lock().unwrap(); let mut locked = swap_lock @@ -654,7 +658,8 @@ pub fn get_locked_amount(ctx: &MmArc, coin: &str) -> MmNumber { /// Get number of currently running swaps pub fn running_swaps_num(ctx: &MmArc) -> u64 { - let swap_ctx = SwapsContext::from_ctx(ctx).unwrap(); + // TODO: db_id + let swap_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); let swaps = swap_ctx.running_swaps.lock().unwrap(); swaps.iter().fold(0, |total, swap| match swap.upgrade() { Some(_) => total + 1, @@ -664,7 +669,8 @@ pub fn running_swaps_num(ctx: &MmArc) -> u64 { /// Get total amount of selected coin locked by all currently ongoing swaps except the one with selected uuid fn get_locked_amount_by_other_swaps(ctx: &MmArc, except_uuid: &Uuid, coin: &str) -> MmNumber { - let swap_ctx = SwapsContext::from_ctx(ctx).unwrap(); + // TODO: db_id + let swap_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); let swap_lock = swap_ctx.running_swaps.lock().unwrap(); swap_lock @@ -686,7 +692,8 @@ fn get_locked_amount_by_other_swaps(ctx: &MmArc, except_uuid: &Uuid, coin: &str) } pub fn active_swaps_using_coins(ctx: &MmArc, coins: &HashSet) -> Result, String> { - let swap_ctx = try_s!(SwapsContext::from_ctx(ctx)); + // TODO: db_id + let swap_ctx = try_s!(SwapsContext::from_ctx(ctx, None)); let swaps = try_s!(swap_ctx.running_swaps.lock()); let mut uuids = vec![]; for swap in swaps.iter() { @@ -708,7 +715,8 @@ pub fn active_swaps_using_coins(ctx: &MmArc, coins: &HashSet) -> Result< } pub fn active_swaps(ctx: &MmArc) -> Result, String> { - let swap_ctx = try_s!(SwapsContext::from_ctx(ctx)); + // TODO: db_id + let swap_ctx = try_s!(SwapsContext::from_ctx(ctx, None)); let swaps = swap_ctx.running_swaps.lock().unwrap(); let mut uuids = vec![]; for swap in swaps.iter() { @@ -1357,7 +1365,7 @@ pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result Result, String> { #[cfg(target_arch = "wasm32")] - try_s!(migrate_swaps_data(&ctx).await); + try_s!(migrate_swaps_data(&ctx, None).await); let mut coins = HashSet::new(); let legacy_unfinished_uuids = try_s!(get_unfinished_swaps_uuids(ctx.clone(), LEGACY_SWAP_TYPE).await); @@ -1747,7 +1755,8 @@ pub fn process_swap_v2_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PProcessRes let uuid = Uuid::from_str(topic).map_to_mm(|e| P2PProcessError::DecodeError(e.to_string()))?; - let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); + // TODO: db_id + let swap_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); let mut msgs = swap_ctx.swap_v2_msgs.lock().unwrap(); if let Some(msg_store) = msgs.get_mut(&uuid) { let signed_message = SignedMessage::decode(msg).map_to_mm(|e| P2PProcessError::DecodeError(e.to_string()))?; @@ -1814,7 +1823,8 @@ async fn recv_swap_v2_msg( let wait_until = started + timeout; loop { Timer::sleep(1.).await; - let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); + // TODO: db_id + let swap_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); let mut msgs = swap_ctx.swap_v2_msgs.lock().unwrap(); if let Some(msg_store) = msgs.get_mut(uuid) { if let Some(msg) = getter(msg_store) { diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index a3285d6c52..3b0fa54d38 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -2094,7 +2094,7 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { } let running_swap = Arc::new(swap); let weak_ref = Arc::downgrade(&running_swap); - let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx, running_swap.maker_coin.account_db_id().as_deref()).unwrap(); swap_ctx.init_msg_store(running_swap.uuid, running_swap.taker); swap_ctx.running_swaps.lock().unwrap().push(weak_ref); diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index 77ce092be1..a784abb937 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -179,7 +179,8 @@ impl StateMachineStorage for MakerSwapStorage { #[cfg(target_arch = "wasm32")] async fn store_repr(&mut self, uuid: Self::MachineId, repr: Self::DbRepr) -> Result<(), Self::Error> { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("SwapsContext::from_ctx should not fail"); + // TODO: db_id + let swaps_ctx = SwapsContext::from_ctx(&self.ctx, None).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; @@ -682,7 +683,8 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.maker_coin.account_db_id().as_deref()) + .expect("from_ctx should not fail at this point"); let maker_coin_ticker: String = self.maker_coin.ticker().into(); let new_locked = LockedAmountInfo { swap_uuid: self.uuid, @@ -701,7 +703,8 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.maker_coin.account_db_id().as_deref()) + .expect("from_ctx should not fail at this point"); let ticker = self.maker_coin.ticker(); if let Some(maker_coin_locked) = swaps_ctx.locked_amounts.lock().unwrap().get_mut(ticker) { maker_coin_locked.retain(|locked| locked.swap_uuid != self.uuid); @@ -735,7 +738,9 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); + // TODO: db_id + let swaps_ctx = + SwapsContext::from_ctx(&self.ctx, None).expect("from_ctx should not fail at this point"); let maker_coin_ticker: String = self.maker_coin.ticker().into(); let new_locked = LockedAmountInfo { swap_uuid: self.uuid, diff --git a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs index 33640350aa..70efedb108 100644 --- a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs +++ b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs @@ -176,7 +176,8 @@ mod wasm_impl { started_at: u64, swap_type: u8, ) -> MySwapsResult<()> { - let swap_ctx = SwapsContext::from_ctx(&self.ctx).map_to_mm(MySwapsError::InternalError)?; + // TODO: db_id + let swap_ctx = SwapsContext::from_ctx(&self.ctx, None).map_to_mm(MySwapsError::InternalError)?; let db = swap_ctx.swap_db().await?; let transaction = db.transaction().await?; let my_swaps_table = transaction.table::().await?; @@ -198,7 +199,8 @@ mod wasm_impl { filter: &MySwapsFilter, paging_options: Option<&PagingOptions>, ) -> MySwapsResult { - let swap_ctx = SwapsContext::from_ctx(&self.ctx).map_to_mm(MySwapsError::InternalError)?; + // TODO: db_id + let swap_ctx = SwapsContext::from_ctx(&self.ctx, None).map_to_mm(MySwapsError::InternalError)?; let db = swap_ctx.swap_db().await?; let transaction = db.transaction().await?; let my_swaps_table = transaction.table::().await?; diff --git a/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs b/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs index 5aa8f94103..11065cb704 100644 --- a/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs +++ b/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs @@ -21,7 +21,8 @@ pub enum BanReason { } pub fn ban_pubkey_on_failed_swap(ctx: &MmArc, pubkey: H256, swap_uuid: &Uuid, event: SwapEvent) { - let ctx = SwapsContext::from_ctx(ctx).unwrap(); + // TODO: db_id + let ctx = SwapsContext::from_ctx(ctx, None).unwrap(); let mut banned = ctx.banned_pubkeys.lock().unwrap(); banned.insert(pubkey.into(), BanReason::FailedSwap { caused_by_swap: *swap_uuid, @@ -30,13 +31,15 @@ pub fn ban_pubkey_on_failed_swap(ctx: &MmArc, pubkey: H256, swap_uuid: &Uuid, ev } pub fn is_pubkey_banned(ctx: &MmArc, pubkey: &H256Json) -> bool { - let ctx = SwapsContext::from_ctx(ctx).unwrap(); + // TODO: db_id + let ctx = SwapsContext::from_ctx(ctx, None).unwrap(); let banned = ctx.banned_pubkeys.lock().unwrap(); banned.contains_key(pubkey) } pub async fn list_banned_pubkeys_rpc(ctx: MmArc) -> Result>, String> { - let ctx = try_s!(SwapsContext::from_ctx(&ctx)); + // TODO: db_id + let ctx = try_s!(SwapsContext::from_ctx(&ctx, None)); let res = try_s!(json::to_vec(&json!({ "result": *try_s!(ctx.banned_pubkeys.lock()), }))); @@ -51,7 +54,8 @@ struct BanPubkeysReq { pub async fn ban_pubkey_rpc(ctx: MmArc, req: Json) -> Result>, String> { let req: BanPubkeysReq = try_s!(json::from_value(req)); - let ctx = try_s!(SwapsContext::from_ctx(&ctx)); + // TODO: db_id + let ctx = try_s!(SwapsContext::from_ctx(&ctx, None)); let mut banned_pubs = try_s!(ctx.banned_pubkeys.lock()); match banned_pubs.entry(req.pubkey) { @@ -75,7 +79,8 @@ enum UnbanPubkeysReq { pub async fn unban_pubkeys_rpc(ctx: MmArc, req: Json) -> Result>, String> { let req: UnbanPubkeysReq = try_s!(json::from_value(req["unban_by"].clone())); - let ctx = try_s!(SwapsContext::from_ctx(&ctx)); + // TODO: db_id + let ctx = try_s!(SwapsContext::from_ctx(&ctx, None)); let mut banned_pubs = try_s!(ctx.banned_pubkeys.lock()); let mut unbanned = HashMap::new(); let mut were_not_banned = vec![]; diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index 0af2c39b3e..467b551d07 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -313,8 +313,8 @@ mod wasm_impl { Ok(migrations.first().map(|(_, m)| m.migration).unwrap_or_default()) } - pub async fn migrate_swaps_data(ctx: &MmArc) -> MmResult<(), SavedSwapError> { - let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SavedSwapError::InternalError)?; + pub async fn migrate_swaps_data(ctx: &MmArc, db_id: Option<&str>) -> MmResult<(), SavedSwapError> { + let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let migration_table = transaction.table::().await?; @@ -402,10 +402,10 @@ mod wasm_impl { impl SavedSwapIo for SavedSwap { async fn load_my_swap_from_db( ctx: &MmArc, - _db_id: Option<&str>, + db_id: Option<&str>, uuid: Uuid, ) -> SavedSwapResult> { - let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SavedSwapError::InternalError)?; + let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -418,8 +418,8 @@ mod wasm_impl { json::from_value(saved_swap_json).map_to_mm(|e| SavedSwapError::ErrorDeserializing(e.to_string())) } - async fn load_all_my_swaps_from_db(ctx: &MmArc, _db_id: Option<&str>) -> SavedSwapResult> { - let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SavedSwapError::InternalError)?; + async fn load_all_my_swaps_from_db(ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult> { + let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -433,14 +433,14 @@ mod wasm_impl { .collect() } - async fn save_to_db(&self, ctx: &MmArc, _db_id: Option<&str>) -> SavedSwapResult<()> { + async fn save_to_db(&self, ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult<()> { let saved_swap = json::to_value(self).map_to_mm(|e| SavedSwapError::ErrorSerializing(e.to_string()))?; let saved_swap_item = SavedSwapTable { uuid: *self.uuid(), saved_swap, }; - let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SavedSwapError::InternalError)?; + let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -465,7 +465,7 @@ mod tests { wasm_bindgen_test_configure!(run_in_browser); async fn get_all_items(ctx: &MmArc) -> Vec<(ItemId, SavedSwapTable)> { - let swaps_ctx = SwapsContext::from_ctx(ctx).unwrap(); + let swaps_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); let db = swaps_ctx.swap_db().await.expect("Error getting SwapDb"); let transaction = db.transaction().await.expect("Error creating transaction"); let table = transaction @@ -530,7 +530,7 @@ mod tests { async fn test_get_current_migration() { let ctx = MmCtxBuilder::new().with_test_db_namespace().into_mm_arc(); - let swaps_ctx = SwapsContext::from_ctx(&ctx).unwrap(); + let swaps_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); let db = swaps_ctx.swap_db().await.expect("Error getting SwapDb"); let transaction = db.transaction().await.expect("Error creating transaction"); let table = transaction @@ -550,12 +550,12 @@ mod tests { async fn test_migrate_swaps_data() { let ctx = MmCtxBuilder::new().with_test_db_namespace().into_mm_arc(); - let account_id = None; let saved_swap_str = r#"{"type":"Maker","error_events":["StartFailed","NegotiateFailed","TakerFeeValidateFailed","MakerPaymentTransactionFailed","MakerPaymentDataSendFailed","TakerPaymentValidateFailed","TakerPaymentSpendFailed","TakerPaymentSpendConfirmFailed","MakerPaymentRefunded","MakerPaymentRefundFailed"],"events":[{"event":{"data":{"lock_duration":7800,"maker_amount":"3.54932734","maker_coin":"KMD","maker_coin_start_block":1452970,"maker_payment_confirmations":1,"maker_payment_lock":1563759539,"my_persistent_pub":"031bb83b58ec130e28e0a6d5d2acf2eb01b0d3f1670e021d47d31db8a858219da8","secret":"e1c9bd12a83f810813dc078ac398069b63d56bf1e94657def995c43cd1975302","started_at":1563743939,"taker":"101ace6b08605b9424b0582b5cce044b70a3c8d8d10cb2965e039b0967ae92b9","taker_amount":"0.02004833998671660000000000","taker_coin":"ETH","taker_coin_start_block":8196380,"taker_payment_confirmations":1,"uuid":"3447b727-fe93-4357-8e5a-8cf2699b7e86"},"type":"Started"},"timestamp":1563743939211},{"event":{"data":{"taker_payment_locktime":1563751737,"taker_pubkey":"03101ace6b08605b9424b0582b5cce044b70a3c8d8d10cb2965e039b0967ae92b9"},"type":"Negotiated"},"timestamp":1563743979835},{"event":{"data":{"tx_hash":"a59203eb2328827de00bed699a29389792906e4f39fdea145eb40dc6b3821bd6","tx_hex":"f8690284ee6b280082520894d8997941dd1346e9231118d5685d866294f59e5b865af3107a4000801ca0743d2b7c9fad65805d882179062012261be328d7628ae12ee08eff8d7657d993a07eecbd051f49d35279416778faa4664962726d516ce65e18755c9b9406a9c2fd"},"type":"TakerFeeValidated"},"timestamp":1563744052878},{"event":{"data":{"error":"lp_swap:1888] eth:654] RPC error: Error { code: ServerError(-32010), message: \"Transaction with the same hash was already imported.\", data: None }"},"type":"MakerPaymentTransactionFailed"},"timestamp":1563744118577},{"event":{"type":"Finished"},"timestamp":1563763243350}],"success_events":["Started","Negotiated","TakerFeeValidated","MakerPaymentSent","TakerPaymentReceived","TakerPaymentWaitConfirmStarted","TakerPaymentValidatedAndConfirmed","TakerPaymentSpent","TakerPaymentSpendConfirmStarted","TakerPaymentSpendConfirmed","TakerPaymentSpendConfirmStarted","TakerPaymentSpendConfirmed","Finished"],"uuid":"3447b727-fe93-4357-8e5a-8cf2699b7e86"}"#; let saved_swap: SavedSwap = json::from_str(saved_swap_str).unwrap(); + let account_id = None; saved_swap.save_to_db(&ctx, account_id).await.expect("!save_to_db"); - let swaps_ctx = SwapsContext::from_ctx(&ctx).unwrap(); + let swaps_ctx = SwapsContext::from_ctx(&ctx, account_id).unwrap(); { let db = swaps_ctx.swap_db().await.expect("Error getting SwapDb"); let transaction = db.transaction().await.expect("Error creating transaction"); @@ -575,7 +575,7 @@ mod tests { table.add_item(&filter).await.unwrap(); } - wasm_impl::migrate_swaps_data(&ctx).await.unwrap(); + wasm_impl::migrate_swaps_data(&ctx, account_id).await.unwrap(); let db = swaps_ctx.swap_db().await.expect("Error getting SwapDb"); let transaction = db.transaction().await.expect("Error creating transaction"); diff --git a/mm2src/mm2_main/src/lp_swap/swap_lock.rs b/mm2src/mm2_main/src/lp_swap/swap_lock.rs index 29b053db51..9f6e4673e1 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_lock.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_lock.rs @@ -127,7 +127,8 @@ mod wasm_lock { #[async_trait] impl SwapLockOps for SwapLock { async fn lock(ctx: &MmArc, uuid: Uuid, ttl_sec: f64) -> SwapLockResult> { - let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SwapLockError::InternalError)?; + // TODO: db_id + let swaps_ctx = SwapsContext::from_ctx(ctx, None).map_to_mm(SwapLockError::InternalError)?; let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -157,7 +158,8 @@ mod wasm_lock { } async fn touch(&self) -> SwapLockResult<()> { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx).map_to_mm(SwapLockError::InternalError)?; + // TODO: db_id + let swaps_ctx = SwapsContext::from_ctx(&self.ctx, None).map_to_mm(SwapLockError::InternalError)?; let db = swaps_ctx.swap_db().await?; let item = SwapLockTable { @@ -180,7 +182,8 @@ mod wasm_lock { impl SwapLock { async fn release(ctx: MmArc, record_id: ItemId) -> SwapLockResult<()> { - let swaps_ctx = SwapsContext::from_ctx(&ctx).map_to_mm(SwapLockError::InternalError)?; + // TODO: db_id + let swaps_ctx = SwapsContext::from_ctx(&ctx, None).map_to_mm(SwapLockError::InternalError)?; let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -206,7 +209,8 @@ mod tests { wasm_bindgen_test_configure!(run_in_browser); async fn get_all_items(ctx: &MmArc) -> Vec<(ItemId, SwapLockTable)> { - let swaps_ctx = SwapsContext::from_ctx(ctx).unwrap(); + // TODO: db_id + let swaps_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); let db = swaps_ctx.swap_db().await.expect("Error getting SwapDb"); let transaction = db.transaction().await.expect("Error creating transaction"); let table = transaction.table::().await.expect("Error opening table"); diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs index ec87e9b79b..c5a0f15e82 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs @@ -105,7 +105,8 @@ pub(super) async fn has_db_record_for(ctx: MmArc, id: &Uuid) -> MmResult MmResult { - let swaps_ctx = SwapsContext::from_ctx(&ctx).expect("SwapsContext::from_ctx should not fail"); + // TODO: db_id + let swaps_ctx = SwapsContext::from_ctx(&ctx, None).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -141,7 +142,8 @@ pub(super) async fn store_swap_event MmResult<(), SwapStateMachineError> { - let swaps_ctx = SwapsContext::from_ctx(&ctx).expect("SwapsContext::from_ctx should not fail"); + // TODO: db_id + let swaps_ctx = SwapsContext::from_ctx(&ctx, None).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -164,7 +166,8 @@ pub(super) async fn store_swap_event(ctx: &MmArc, id: Uuid) -> MmResult { - let swaps_ctx = SwapsContext::from_ctx(ctx).expect("SwapsContext::from_ctx should not fail"); + // TODO: db_id + let swaps_ctx = SwapsContext::from_ctx(ctx, None).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; @@ -198,8 +201,8 @@ pub(super) async fn get_unfinished_swaps_uuids( let index = MultiIndex::new(IS_FINISHED_SWAP_TYPE_INDEX) .with_value(BoolAsInt::new(false))? .with_value(swap_type)?; - - let swaps_ctx = SwapsContext::from_ctx(&ctx).expect("SwapsContext::from_ctx should not fail"); + // TODO: db_id + let swaps_ctx = SwapsContext::from_ctx(&ctx, None).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -215,7 +218,8 @@ pub(super) async fn mark_swap_as_finished(ctx: MmArc, id: Uuid) -> MmResult<(), #[cfg(target_arch = "wasm32")] pub(super) async fn mark_swap_as_finished(ctx: MmArc, id: Uuid) -> MmResult<(), SwapStateMachineError> { - let swaps_ctx = SwapsContext::from_ctx(&ctx).expect("SwapsContext::from_ctx should not fail"); + // TODO: db_id + let swaps_ctx = SwapsContext::from_ctx(&ctx, None).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -230,7 +234,8 @@ pub(super) async fn mark_swap_as_finished(ctx: MmArc, id: Uuid) -> MmResult<(), pub(super) fn init_additional_context_impl(ctx: &MmArc, swap_info: ActiveSwapV2Info, other_p2p_pubkey: PublicKey) { subscribe_to_topic(ctx, swap_v2_topic(&swap_info.uuid)); - let swap_ctx = SwapsContext::from_ctx(ctx).expect("SwapsContext::from_ctx should not fail"); + // TODO: db_id + let swap_ctx = SwapsContext::from_ctx(ctx, None).expect("SwapsContext::from_ctx should not fail"); swap_ctx.init_msg_v2_store(swap_info.uuid, other_p2p_pubkey); swap_ctx .active_swaps_v2_infos @@ -241,7 +246,8 @@ pub(super) fn init_additional_context_impl(ctx: &MmArc, swap_info: ActiveSwapV2I pub(super) fn clean_up_context_impl(ctx: &MmArc, uuid: &Uuid, maker_coin: &str, taker_coin: &str) { unsubscribe_from_topic(ctx, swap_v2_topic(uuid)); - let swap_ctx = SwapsContext::from_ctx(ctx).expect("SwapsContext::from_ctx should not fail"); + // TODO: db_id + let swap_ctx = SwapsContext::from_ctx(ctx, None).expect("SwapsContext::from_ctx should not fail"); swap_ctx.remove_msg_v2_store(uuid); swap_ctx.active_swaps_v2_infos.lock().unwrap().remove(uuid); diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs index 04bb0df564..a84ba06980 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs @@ -79,7 +79,8 @@ impl From for SwapV2DbError { pub(super) async fn get_swap_type(ctx: &MmArc, uuid: &Uuid) -> MmResult, SwapV2DbError> { use crate::mm2::lp_swap::swap_wasm_db::MySwapsFiltersTable; - let swaps_ctx = SwapsContext::from_ctx(ctx).unwrap(); + // TODO db_id + let swaps_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -186,7 +187,8 @@ pub(super) async fn get_maker_swap_data_for_rpc( ctx: &MmArc, uuid: &Uuid, ) -> MmResult>, SwapV2DbError> { - let swaps_ctx = SwapsContext::from_ctx(ctx).unwrap(); + // TODO: db_id + let swaps_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -226,7 +228,8 @@ pub(super) async fn get_taker_swap_data_for_rpc( ctx: &MmArc, uuid: &Uuid, ) -> MmResult>, SwapV2DbError> { - let swaps_ctx = SwapsContext::from_ctx(ctx).unwrap(); + // TODO: db_id + let swaps_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; diff --git a/mm2src/mm2_main/src/lp_swap/swap_watcher.rs b/mm2src/mm2_main/src/lp_swap/swap_watcher.rs index a8df8f2455..c43517b925 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_watcher.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_watcher.rs @@ -587,7 +587,8 @@ fn spawn_taker_swap_watcher(ctx: MmArc, watcher_data: TakerSwapWatcherData, veri return; } - let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); + // TODO: db_id + let swap_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); if swap_ctx.swap_msgs.lock().unwrap().contains_key(&watcher_data.uuid) { return; } diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index ca7ab51302..584ab5bb11 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -450,7 +450,7 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { let to_broadcast = !(swap.maker_coin.is_privacy() || swap.taker_coin.is_privacy()); let running_swap = Arc::new(swap); let weak_ref = Arc::downgrade(&running_swap); - let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx, running_swap.taker_coin.account_db_id().as_deref()).unwrap(); swap_ctx.init_msg_store(running_swap.uuid, running_swap.maker); swap_ctx.running_swaps.lock().unwrap().push(weak_ref); diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index ec5c88c0c5..66a12ea7cf 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -210,7 +210,8 @@ impl StateMachineStorage for TakerSwapStorage { #[cfg(target_arch = "wasm32")] async fn store_repr(&mut self, uuid: Self::MachineId, repr: Self::DbRepr) -> Result<(), Self::Error> { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("SwapsContext::from_ctx should not fail"); + // TODO: db_id + let swaps_ctx = SwapsContext::from_ctx(&self.ctx, None).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; @@ -797,7 +798,8 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.taker_coin.account_db_id().as_deref()) + .expect("from_ctx should not fail at this point"); let taker_coin_ticker: String = self.taker_coin.ticker().into(); let new_locked = LockedAmountInfo { swap_uuid: self.uuid, @@ -816,7 +818,8 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.taker_coin.account_db_id().as_deref()) + .expect("from_ctx should not fail at this point"); let ticker = self.taker_coin.ticker(); if let Some(taker_coin_locked) = swaps_ctx.locked_amounts.lock().unwrap().get_mut(ticker) { taker_coin_locked.retain(|locked| locked.swap_uuid != self.uuid); @@ -844,7 +847,8 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.taker_coin.account_db_id().as_deref()) + .expect("from_ctx should not fail at this point"); let taker_coin_ticker: String = self.taker_coin.ticker().into(); let new_locked = LockedAmountInfo { swap_uuid: self.uuid, From fa0f5ab710a8a7c351c52fa112a5caa8a212b939 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 10 Apr 2024 16:30:43 +0100 Subject: [PATCH 008/186] changes to maker and taker swap db_id --- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 15 +++++---------- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 18 ++++++------------ 2 files changed, 11 insertions(+), 22 deletions(-) diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index 3b0fa54d38..a1760e1b3b 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -86,13 +86,9 @@ pub fn stats_maker_swap_file_path(ctx: &MmArc, db_id: Option<&str>, uuid: &Uuid) stats_maker_swap_dir(ctx, db_id).join(format!("{}.json", uuid)) } -async fn save_my_maker_swap_event( - ctx: &MmArc, - db_id: Option<&str>, - swap: &MakerSwap, - event: MakerSavedEvent, -) -> Result<(), String> { - let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id, swap.uuid).await { +async fn save_my_maker_swap_event(ctx: &MmArc, swap: &MakerSwap, event: MakerSavedEvent) -> Result<(), String> { + let db_id = swap.maker_coin.account_db_id(); + let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id.as_deref(), swap.uuid).await { Ok(Some(swap)) => swap, Ok(None) => SavedSwap::Maker(MakerSavedSwap { uuid: swap.uuid, @@ -118,7 +114,7 @@ async fn save_my_maker_swap_event( maker_swap.fetch_and_set_usd_prices().await; } let new_swap = SavedSwap::Maker(maker_swap); - try_s!(new_swap.save_to_db(ctx, db_id).await); + try_s!(new_swap.save_to_db(ctx, db_id.as_deref()).await); Ok(()) } else { ERR!("Expected SavedSwap::Maker, got {:?}", swap) @@ -2117,8 +2113,7 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { .dispatch_async(ctx.clone(), LpEvents::MakerSwapStatusChanged(event_to_send)) .await; drop(dispatcher); - let account_key = running_swap.maker_coin.account_db_id().clone(); - save_my_maker_swap_event(&ctx, account_key.as_deref(), &running_swap, to_save) + save_my_maker_swap_event(&ctx, &running_swap, to_save) .await .expect("!save_my_maker_swap_event"); if event.should_ban_taker() { diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 584ab5bb11..6aa1163e52 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -107,13 +107,9 @@ pub fn stats_taker_swap_file_path(ctx: &MmArc, db_id: Option<&str>, uuid: &Uuid) stats_taker_swap_dir(ctx, db_id).join(format!("{}.json", uuid)) } -async fn save_my_taker_swap_event( - ctx: &MmArc, - db_id: Option<&str>, - swap: &TakerSwap, - event: TakerSavedEvent, -) -> Result<(), String> { - let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id, swap.uuid).await { +async fn save_my_taker_swap_event(ctx: &MmArc, swap: &TakerSwap, event: TakerSavedEvent) -> Result<(), String> { + let db_id = swap.taker_coin.account_db_id(); + let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id.as_deref(), swap.uuid).await { Ok(Some(swap)) => swap, Ok(None) => SavedSwap::Taker(TakerSavedSwap { uuid: swap.uuid, @@ -149,7 +145,7 @@ async fn save_my_taker_swap_event( taker_swap.fetch_and_set_usd_prices().await; } let new_swap = SavedSwap::Taker(taker_swap); - try_s!(new_swap.save_to_db(ctx, db_id).await); + try_s!(new_swap.save_to_db(ctx, db_id.as_deref()).await); Ok(()) } else { ERR!("Expected SavedSwap::Taker, got {:?}", swap) @@ -330,7 +326,7 @@ impl TakerSavedSwap { } } - // TODO: Adjust for private coins when/if they are braodcasted + // TODO: Adjust for private coins when/if they are broadcasted // TODO: Adjust for HD wallet when completed pub fn swap_pubkeys(&self) -> Result { let taker = match &self.events.first() { @@ -466,8 +462,7 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { event: event.clone(), }; - let account_key = Some(running_swap.my_persistent_pub.to_string()); - save_my_taker_swap_event(&ctx, account_key.as_deref(), &running_swap, to_save) + save_my_taker_swap_event(&ctx, &running_swap, to_save) .await .expect("!save_my_taker_swap_event"); if event.should_ban_maker() { @@ -1958,7 +1953,6 @@ impl TakerSwap { swap_uuid: &Uuid, ) -> Result<(Self, Option), String> { let account_key = taker_coin.account_db_id(); - let saved = match SavedSwap::load_my_swap_from_db(&ctx, account_key.as_deref(), *swap_uuid).await { Ok(Some(saved)) => saved, Ok(None) => return ERR!("Couldn't find a swap with the uuid '{}'", swap_uuid), From 947ab8d7c029308e89c05250f4182e9c7922da9d Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 10 Apr 2024 16:51:06 +0100 Subject: [PATCH 009/186] complete zcoin dbs multi pubkey support for WASM target --- mm2src/coins/lp_coins.rs | 8 +------- mm2src/coins/z_coin.rs | 10 ++++++++-- mm2src/coins/z_coin/z_rpc.rs | 11 +++++++---- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index c58660f7ef..0bad365190 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -2967,13 +2967,7 @@ pub trait MmCoin: fn process_history_loop(&self, ctx: MmArc) -> Box + Send>; #[cfg(not(target_arch = "wasm32"))] - fn account_db_id(&self) -> Option { - if let Ok(key) = self.get_public_key() { - return Some(key); - }; - - None - } + fn account_db_id(&self) -> Option { self.get_public_key().ok() } #[cfg(target_arch = "wasm32")] fn account_db_id(&self) -> Option { None } diff --git a/mm2src/coins/z_coin.rs b/mm2src/coins/z_coin.rs index 43273a07a2..4d589cfd33 100644 --- a/mm2src/coins/z_coin.rs +++ b/mm2src/coins/z_coin.rs @@ -889,8 +889,12 @@ impl<'a> UtxoCoinBuilder for ZCoinBuilder<'a> { &my_z_addr, ); - // TODO: db_id - let blocks_db = self.init_blocks_db(None).await?; + #[cfg(target_arch = "wasm32")] + let db_id = utxo_common::my_public_key(&utxo_arc).ok().map(|k| k.to_string()); + #[cfg(not(target_arch = "wasm32"))] + let db_id: Option = None; + + let blocks_db = self.init_blocks_db(db_id.as_deref()).await?; let (z_balance_event_sender, z_balance_event_handler) = if self.ctx.event_stream_configuration.is_some() { let (sender, receiver) = futures::channel::mpsc::unbounded(); @@ -908,6 +912,7 @@ impl<'a> UtxoCoinBuilder for ZCoinBuilder<'a> { blocks_db, &z_spending_key, z_balance_event_sender, + db_id.as_deref(), ) .await? }, @@ -925,6 +930,7 @@ impl<'a> UtxoCoinBuilder for ZCoinBuilder<'a> { skip_sync_params.unwrap_or_default(), &z_spending_key, z_balance_event_sender, + db_id.as_deref(), ) .await? }, diff --git a/mm2src/coins/z_coin/z_rpc.rs b/mm2src/coins/z_coin/z_rpc.rs index 8c8ee4347a..271712dd90 100644 --- a/mm2src/coins/z_coin/z_rpc.rs +++ b/mm2src/coins/z_coin/z_rpc.rs @@ -501,6 +501,7 @@ impl ZRpcOps for NativeClient { } } +#[allow(clippy::too_many_arguments)] pub(super) async fn init_light_client<'a>( builder: &ZCoinBuilder<'a>, lightwalletd_urls: Vec, @@ -509,6 +510,7 @@ pub(super) async fn init_light_client<'a>( skip_sync_params: bool, z_spending_key: &ExtendedSpendingKey, z_balance_event_sender: Option, + db_id: Option<&str>, ) -> Result<(AsyncMutex, WalletDbShared), MmError> { let coin = builder.ticker.to_string(); let (sync_status_notifier, sync_watcher) = channel(1); @@ -541,13 +543,12 @@ pub(super) async fn init_light_client<'a>( // check if no sync_params was provided and continue syncing from last height in db if it's > 0 or skip_sync_params is true. let continue_from_prev_sync = (min_height > 0 && sync_params.is_none()) || (skip_sync_params && min_height < sapling_activation_height); - // TODO: db_id let wallet_db = WalletDbShared::new( builder, maybe_checkpoint_block, z_spending_key, continue_from_prev_sync, - None, + db_id, ) .await?; // Check min_height in blocks_db and rewind blocks_db to 0 if sync_height != min_height @@ -589,12 +590,14 @@ pub(super) async fn init_light_client<'a>( } #[cfg(not(target_arch = "wasm32"))] +#[allow(clippy::too_many_arguments)] pub(super) async fn init_native_client<'a>( builder: &ZCoinBuilder<'a>, native_client: NativeClient, blocks_db: BlockDbImpl, z_spending_key: &ExtendedSpendingKey, z_balance_event_sender: Option, + db_id: Option<&str>, ) -> Result<(AsyncMutex, WalletDbShared), MmError> { let coin = builder.ticker.to_string(); let (sync_status_notifier, sync_watcher) = channel(1); @@ -608,8 +611,8 @@ pub(super) async fn init_native_client<'a>( is_pre_sapling: false, actual: checkpoint_height, }; - // TODO: db_id - let wallet_db = WalletDbShared::new(builder, checkpoint_block, z_spending_key, true, None) + + let wallet_db = WalletDbShared::new(builder, checkpoint_block, z_spending_key, true, db_id) .await .mm_err(|err| ZcoinClientInitError::ZcoinStorageError(err.to_string()))?; From ae95d4e69df6b359761d233f076a454c827b0c73 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 10 Apr 2024 17:13:57 +0100 Subject: [PATCH 010/186] fix clippy/fmt --- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 6aa1163e52..6c67ea44cb 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -3215,7 +3215,7 @@ mod taker_swap_tests { taker_saved_swap, )) .unwrap(); - let swaps_ctx = SwapsContext::from_ctx(&ctx).unwrap(); + let swaps_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); let arc = Arc::new(swap); let weak_ref = Arc::downgrade(&arc); swaps_ctx.running_swaps.lock().unwrap().push(weak_ref); From c0d13e72b4480d94425375299aa135ede77ae34d Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 10 Apr 2024 18:26:26 +0100 Subject: [PATCH 011/186] impl account_db_id for wasm target --- mm2src/coins/lp_coins.rs | 12 +++++++++-- mm2src/coins/z_coin.rs | 20 ++++++++++-------- .../z_coin/storage/z_params/indexeddb.rs | 4 ++-- mm2src/coins/z_coin/storage/z_params/mod.rs | 4 ++-- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 14 ++++++++++--- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 14 +++++++++---- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 14 ++++++++++--- mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 21 +++++++++++++------ 8 files changed, 72 insertions(+), 31 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 0bad365190..f315787ee8 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -2967,10 +2967,18 @@ pub trait MmCoin: fn process_history_loop(&self, ctx: MmArc) -> Box + Send>; #[cfg(not(target_arch = "wasm32"))] - fn account_db_id(&self) -> Option { self.get_public_key().ok() } + fn account_db_id(&self) -> Result, String> { Ok(None) } #[cfg(target_arch = "wasm32")] - fn account_db_id(&self) -> Option { None } + fn account_db_id(&self) -> Result, String> { + Ok(Some( + try_s!(Public::from_slice( + try_s!(hex::decode(try_s!(self.get_public_key()))).as_slice() + )) + .address_hash() + .to_string(), + )) + } /// Path to tx history file #[cfg(not(target_arch = "wasm32"))] diff --git a/mm2src/coins/z_coin.rs b/mm2src/coins/z_coin.rs index 4d589cfd33..367c3412d0 100644 --- a/mm2src/coins/z_coin.rs +++ b/mm2src/coins/z_coin.rs @@ -863,6 +863,13 @@ impl<'a> UtxoCoinBuilder for ZCoinBuilder<'a> { let utxo = self.build_utxo_fields().await?; let utxo_arc = UtxoArc::new(utxo); + #[cfg(target_arch = "wasm32")] + let db_id = utxo_common::my_public_key(&utxo_arc) + .ok() + .map(|k| k.address_hash().to_string()); + #[cfg(not(target_arch = "wasm32"))] + let db_id: Option = None; + let z_spending_key = match self.z_spending_key { Some(ref z_spending_key) => z_spending_key.clone(), None => extended_spending_key_from_protocol_info_and_policy( @@ -883,17 +890,12 @@ impl<'a> UtxoCoinBuilder for ZCoinBuilder<'a> { .expect("DEX_FEE_Z_ADDR is a valid z-address") .expect("DEX_FEE_Z_ADDR is a valid z-address"); - let z_tx_prover = self.z_tx_prover().await?; + let z_tx_prover = self.z_tx_prover(db_id.as_deref()).await?; let my_z_addr_encoded = encode_payment_address( self.protocol_info.consensus_params.hrp_sapling_payment_address(), &my_z_addr, ); - #[cfg(target_arch = "wasm32")] - let db_id = utxo_common::my_public_key(&utxo_arc).ok().map(|k| k.to_string()); - #[cfg(not(target_arch = "wasm32"))] - let db_id: Option = None; - let blocks_db = self.init_blocks_db(db_id.as_deref()).await?; let (z_balance_event_sender, z_balance_event_handler) = if self.ctx.event_stream_configuration.is_some() { @@ -1016,7 +1018,7 @@ impl<'a> ZCoinBuilder<'a> { } #[cfg(not(target_arch = "wasm32"))] - async fn z_tx_prover(&self) -> Result> { + async fn z_tx_prover(&self, _db_id: Option<&str>) -> Result> { let params_dir = match &self.z_coin_params.zcash_params_path { None => default_params_folder().or_mm_err(|| ZCoinBuildError::ZCashParamsNotFound)?, Some(file_path) => PathBuf::from(file_path), @@ -1035,8 +1037,8 @@ impl<'a> ZCoinBuilder<'a> { } #[cfg(target_arch = "wasm32")] - async fn z_tx_prover(&self) -> Result> { - let params_db = ZcashParamsWasmImpl::new(self.ctx) + async fn z_tx_prover(&self, db_id: Option<&str>) -> Result> { + let params_db = ZcashParamsWasmImpl::new(self.ctx, db_id) .await .mm_err(|err| ZCoinBuildError::ZCashParamsError(err.to_string()))?; let (sapling_spend, sapling_output) = if !params_db diff --git a/mm2src/coins/z_coin/storage/z_params/indexeddb.rs b/mm2src/coins/z_coin/storage/z_params/indexeddb.rs index d513cfce22..ac6eeada39 100644 --- a/mm2src/coins/z_coin/storage/z_params/indexeddb.rs +++ b/mm2src/coins/z_coin/storage/z_params/indexeddb.rs @@ -69,8 +69,8 @@ impl ZcashParamsWasmInner { pub(crate) struct ZcashParamsWasmImpl(SharedDb); impl ZcashParamsWasmImpl { - pub(crate) async fn new(ctx: &MmArc) -> MmResult { - Ok(Self(ConstructibleDb::new(ctx, None).into_shared())) + pub(crate) async fn new(ctx: &MmArc, db_id: Option<&str>) -> MmResult { + Ok(Self(ConstructibleDb::new(ctx, db_id).into_shared())) } async fn lock_db(&self) -> ZcashParamsWasmRes> { diff --git a/mm2src/coins/z_coin/storage/z_params/mod.rs b/mm2src/coins/z_coin/storage/z_params/mod.rs index d86a7181a0..1ca891a370 100644 --- a/mm2src/coins/z_coin/storage/z_params/mod.rs +++ b/mm2src/coins/z_coin/storage/z_params/mod.rs @@ -65,7 +65,7 @@ async fn test_download_save_and_get_params() { register_wasm_log(); info!("Testing download, save and get params"); let ctx = mm_ctx_with_custom_db(); - let db = ZcashParamsWasmImpl::new(&ctx).await.unwrap(); + let db = ZcashParamsWasmImpl::new(&ctx, None).await.unwrap(); // save params let (sapling_spend, sapling_output) = db.download_and_save_params().await.unwrap(); // get params @@ -79,7 +79,7 @@ async fn test_download_save_and_get_params() { async fn test_check_for_no_params() { register_wasm_log(); let ctx = mm_ctx_with_custom_db(); - let db = ZcashParamsWasmImpl::new(&ctx).await.unwrap(); + let db = ZcashParamsWasmImpl::new(&ctx, None).await.unwrap(); // check for no params let check_params = db.check_params().await.unwrap(); assert!(!check_params) diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index a1760e1b3b..72b97dfed8 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -87,7 +87,7 @@ pub fn stats_maker_swap_file_path(ctx: &MmArc, db_id: Option<&str>, uuid: &Uuid) } async fn save_my_maker_swap_event(ctx: &MmArc, swap: &MakerSwap, event: MakerSavedEvent) -> Result<(), String> { - let db_id = swap.maker_coin.account_db_id(); + let db_id = try_s!(swap.maker_coin.account_db_id()); let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id.as_deref(), swap.uuid).await { Ok(Some(swap)) => swap, Ok(None) => SavedSwap::Maker(MakerSavedSwap { @@ -1293,7 +1293,7 @@ impl MakerSwap { taker_coin: MmCoinEnum, swap_uuid: &Uuid, ) -> Result<(Self, Option), String> { - let account_key = maker_coin.account_db_id(); + let account_key = try_s!(maker_coin.account_db_id()); let saved = match SavedSwap::load_my_swap_from_db(&ctx, account_key.as_deref(), *swap_uuid).await { Ok(Some(saved)) => saved, Ok(None) => return ERR!("Couldn't find a swap with the uuid '{}'", swap_uuid), @@ -2090,7 +2090,15 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { } let running_swap = Arc::new(swap); let weak_ref = Arc::downgrade(&running_swap); - let swap_ctx = SwapsContext::from_ctx(&ctx, running_swap.maker_coin.account_db_id().as_deref()).unwrap(); + let swap_ctx = SwapsContext::from_ctx( + &ctx, + running_swap + .maker_coin + .account_db_id() + .expect("Valid maker pubkey") + .as_deref(), + ) + .unwrap(); swap_ctx.init_msg_store(running_swap.uuid, running_swap.taker); swap_ctx.running_swaps.lock().unwrap().push(weak_ref); diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index a784abb937..e3385d3b6c 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -683,8 +683,11 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.maker_coin.account_db_id().as_deref()) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx( + &self.ctx, + self.maker_coin.account_db_id().expect("Valid maker pubkey").as_deref(), + ) + .expect("from_ctx should not fail at this point"); let maker_coin_ticker: String = self.maker_coin.ticker().into(); let new_locked = LockedAmountInfo { swap_uuid: self.uuid, @@ -703,8 +706,11 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.maker_coin.account_db_id().as_deref()) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx( + &self.ctx, + self.maker_coin.account_db_id().expect("Valid maker pubkey").as_deref(), + ) + .expect("from_ctx should not fail at this point"); let ticker = self.maker_coin.ticker(); if let Some(maker_coin_locked) = swaps_ctx.locked_amounts.lock().unwrap().get_mut(ticker) { maker_coin_locked.retain(|locked| locked.swap_uuid != self.uuid); diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 6c67ea44cb..6fee2893fa 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -108,7 +108,7 @@ pub fn stats_taker_swap_file_path(ctx: &MmArc, db_id: Option<&str>, uuid: &Uuid) } async fn save_my_taker_swap_event(ctx: &MmArc, swap: &TakerSwap, event: TakerSavedEvent) -> Result<(), String> { - let db_id = swap.taker_coin.account_db_id(); + let db_id = try_s!(swap.taker_coin.account_db_id()); let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id.as_deref(), swap.uuid).await { Ok(Some(swap)) => swap, Ok(None) => SavedSwap::Taker(TakerSavedSwap { @@ -446,7 +446,15 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { let to_broadcast = !(swap.maker_coin.is_privacy() || swap.taker_coin.is_privacy()); let running_swap = Arc::new(swap); let weak_ref = Arc::downgrade(&running_swap); - let swap_ctx = SwapsContext::from_ctx(&ctx, running_swap.taker_coin.account_db_id().as_deref()).unwrap(); + let swap_ctx = SwapsContext::from_ctx( + &ctx, + running_swap + .taker_coin + .account_db_id() + .expect("Valid maker pubkey") + .as_deref(), + ) + .unwrap(); swap_ctx.init_msg_store(running_swap.uuid, running_swap.maker); swap_ctx.running_swaps.lock().unwrap().push(weak_ref); @@ -1952,7 +1960,7 @@ impl TakerSwap { taker_coin: MmCoinEnum, swap_uuid: &Uuid, ) -> Result<(Self, Option), String> { - let account_key = taker_coin.account_db_id(); + let account_key = try_s!(taker_coin.account_db_id()); let saved = match SavedSwap::load_my_swap_from_db(&ctx, account_key.as_deref(), *swap_uuid).await { Ok(Some(saved)) => saved, Ok(None) => return ERR!("Couldn't find a swap with the uuid '{}'", swap_uuid), diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index 66a12ea7cf..f224b5f909 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -798,8 +798,11 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.taker_coin.account_db_id().as_deref()) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx( + &self.ctx, + self.taker_coin.account_db_id().expect("Valid maker pubkey").as_deref(), + ) + .expect("from_ctx should not fail at this point"); let taker_coin_ticker: String = self.taker_coin.ticker().into(); let new_locked = LockedAmountInfo { swap_uuid: self.uuid, @@ -818,8 +821,11 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.taker_coin.account_db_id().as_deref()) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx( + &self.ctx, + self.taker_coin.account_db_id().expect("Valid maker pubkey").as_deref(), + ) + .expect("from_ctx should not fail at this point"); let ticker = self.taker_coin.ticker(); if let Some(taker_coin_locked) = swaps_ctx.locked_amounts.lock().unwrap().get_mut(ticker) { taker_coin_locked.retain(|locked| locked.swap_uuid != self.uuid); @@ -847,8 +853,11 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.taker_coin.account_db_id().as_deref()) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx( + &self.ctx, + self.taker_coin.account_db_id().expect("Valid maker pubkey").as_deref(), + ) + .expect("from_ctx should not fail at this point"); let taker_coin_ticker: String = self.taker_coin.ticker().into(); let new_locked = LockedAmountInfo { swap_uuid: self.uuid, From 71c4e9c9323d6bc261255d38fe8095d9f267d292 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 10 Apr 2024 19:54:58 +0100 Subject: [PATCH 012/186] complete some todos in swap mod --- mm2src/mm2_main/src/lp_swap.rs | 33 ++++++++++--------- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 17 ++++------ mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 3 +- mm2src/mm2_main/src/lp_swap/swap_v2_common.rs | 15 ++++++--- mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs | 33 +++++++++++-------- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 19 +++++------ mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 2 +- 7 files changed, 66 insertions(+), 56 deletions(-) diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 75dba0ab7f..c712412fc3 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -336,7 +336,8 @@ pub async fn process_swap_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PRequest return match json::from_slice::(msg) { Ok(mut status) => { status.data.fetch_and_set_usd_prices().await; - if let Err(e) = save_stats_swap(&ctx, &status.data).await { + // TODO: db_id + if let Err(e) = save_stats_swap(&ctx, &status.data, None).await { error!("Error saving the swap {} status: {}", status.data.uuid(), e); } Ok(()) @@ -1034,8 +1035,8 @@ fn add_swap_to_db_index(ctx: &MmArc, swap: &SavedSwap) { } #[cfg(not(target_arch = "wasm32"))] -async fn save_stats_swap(ctx: &MmArc, swap: &SavedSwap) -> Result<(), String> { - try_s!(swap.save_to_stats_db(ctx, None).await); +async fn save_stats_swap(ctx: &MmArc, swap: &SavedSwap, db_id: Option<&str>) -> Result<(), String> { + try_s!(swap.save_to_stats_db(ctx, db_id).await); add_swap_to_db_index(ctx, swap); Ok(()) } @@ -1117,11 +1118,12 @@ impl From for MySwapStatusResponse { /// Returns the status of swap performed on `my` node pub async fn my_swap_status(ctx: MmArc, req: Json) -> Result>, String> { let uuid: Uuid = try_s!(json::from_value(req["params"]["uuid"].clone())); - let swap_type = try_s!(get_swap_type(&ctx, &uuid).await); + let db_id: Option = try_s!(json::from_value(req["params"]["db_id"].clone())); + let swap_type = try_s!(get_swap_type(&ctx, &uuid, db_id.as_deref()).await); match swap_type { Some(LEGACY_SWAP_TYPE) => { - let status = match SavedSwap::load_my_swap_from_db(&ctx, None, uuid).await { + let status = match SavedSwap::load_my_swap_from_db(&ctx, db_id.as_deref(), uuid).await { Ok(Some(status)) => status, Ok(None) => return Err("swap data is not found".to_owned()), Err(e) => return ERR!("{}", e), @@ -1132,13 +1134,13 @@ pub async fn my_swap_status(ctx: MmArc, req: Json) -> Result>, Ok(try_s!(Response::builder().body(res))) }, Some(MAKER_SWAP_V2_TYPE) => { - let swap_data = try_s!(get_maker_swap_data_for_rpc(&ctx, &uuid).await); + let swap_data = try_s!(get_maker_swap_data_for_rpc(&ctx, &uuid, db_id.as_deref()).await); let res_js = json!({ "result": swap_data }); let res = try_s!(json::to_vec(&res_js)); Ok(try_s!(Response::builder().body(res))) }, Some(TAKER_SWAP_V2_TYPE) => { - let swap_data = try_s!(get_taker_swap_data_for_rpc(&ctx, &uuid).await); + let swap_data = try_s!(get_taker_swap_data_for_rpc(&ctx, &uuid, db_id.as_deref()).await); let res_js = json!({ "result": swap_data }); let res = try_s!(json::to_vec(&res_js)); Ok(try_s!(Response::builder().body(res))) @@ -1157,9 +1159,10 @@ pub async fn stats_swap_status(_ctx: MmArc, _req: Json) -> Result Result>, String> { let uuid: Uuid = try_s!(json::from_value(req["params"]["uuid"].clone())); + let db_id: Option = try_s!(json::from_value(req["params"]["db_id"].clone())); - let maker_status = try_s!(SavedSwap::load_from_maker_stats_db(&ctx, None, uuid).await); - let taker_status = try_s!(SavedSwap::load_from_taker_stats_db(&ctx, None, uuid).await); + let maker_status = try_s!(SavedSwap::load_from_maker_stats_db(&ctx, db_id.as_deref(), uuid).await); + let taker_status = try_s!(SavedSwap::load_from_taker_stats_db(&ctx, db_id.as_deref(), uuid).await); if maker_status.is_none() && taker_status.is_none() { return ERR!("swap data is not found"); @@ -1182,15 +1185,15 @@ struct SwapStatus { } /// Broadcasts `my` swap status to P2P network -async fn broadcast_my_swap_status(ctx: &MmArc, uuid: Uuid) -> Result<(), String> { - let mut status = match try_s!(SavedSwap::load_my_swap_from_db(ctx, None, uuid).await) { +async fn broadcast_my_swap_status(ctx: &MmArc, uuid: Uuid, db_id: Option<&str>) -> Result<(), String> { + let mut status = match try_s!(SavedSwap::load_my_swap_from_db(ctx, db_id, uuid).await) { Some(status) => status, None => return ERR!("swap data is not found"), }; status.hide_secrets(); #[cfg(not(target_arch = "wasm32"))] - try_s!(save_stats_swap(ctx, &status).await); + try_s!(save_stats_swap(ctx, &status, db_id).await); let status = SwapStatus { method: "swapstatus".into(), @@ -1240,6 +1243,7 @@ pub struct MyRecentSwapsReq { pub paging_options: PagingOptions, #[serde(flatten)] pub filter: MySwapsFilter, + pub db_id: Option, } #[derive(Debug, Default, PartialEq)] @@ -1327,14 +1331,14 @@ pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result warn!("No such swap with the uuid '{}'", uuid), Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), }, - MAKER_SWAP_V2_TYPE => match get_maker_swap_data_for_rpc(&ctx, uuid).await { + MAKER_SWAP_V2_TYPE => match get_maker_swap_data_for_rpc(&ctx, uuid, req.db_id.as_deref()).await { Ok(data) => { let swap_json = try_s!(json::to_value(data)); swaps.push(swap_json); }, Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), }, - TAKER_SWAP_V2_TYPE => match get_taker_swap_data_for_rpc(&ctx, uuid).await { + TAKER_SWAP_V2_TYPE => match get_taker_swap_data_for_rpc(&ctx, uuid, req.db_id.as_deref()).await { Ok(data) => { let swap_json = try_s!(json::to_value(data)); swaps.push(swap_json); @@ -1755,7 +1759,6 @@ pub fn process_swap_v2_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PProcessRes let uuid = Uuid::from_str(topic).map_to_mm(|e| P2PProcessError::DecodeError(e.to_string()))?; - // TODO: db_id let swap_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); let mut msgs = swap_ctx.swap_v2_msgs.lock().unwrap(); if let Some(msg_store) = msgs.get_mut(&uuid) { diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index 72b97dfed8..3c70e46951 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -2089,16 +2089,9 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { }; } let running_swap = Arc::new(swap); + let account_id = running_swap.maker_coin.account_db_id().expect("Valid maker pubkey"); let weak_ref = Arc::downgrade(&running_swap); - let swap_ctx = SwapsContext::from_ctx( - &ctx, - running_swap - .maker_coin - .account_db_id() - .expect("Valid maker pubkey") - .as_deref(), - ) - .unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx, account_id.as_deref()).unwrap(); swap_ctx.init_msg_store(running_swap.uuid, running_swap.taker); swap_ctx.running_swaps.lock().unwrap().push(weak_ref); @@ -2146,12 +2139,14 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { command = c; }, None => { - if let Err(e) = mark_swap_as_finished(ctx.clone(), running_swap.uuid).await { + if let Err(e) = + mark_swap_as_finished(ctx.clone(), running_swap.uuid, account_id.as_deref()).await + { error!("!mark_swap_finished({}): {}", uuid, e); } if to_broadcast { - if let Err(e) = broadcast_my_swap_status(&ctx, uuid).await { + if let Err(e) = broadcast_my_swap_status(&ctx, uuid, account_id.as_deref()).await { error!("!broadcast_my_swap_status({}): {}", uuid, e); } } diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index e3385d3b6c..e63495e60c 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -238,7 +238,8 @@ impl StateMachineStorage for MakerSwapStorage { } async fn mark_finished(&mut self, id: Self::MachineId) -> Result<(), Self::Error> { - mark_swap_as_finished(self.ctx.clone(), id).await + // TODO: db_id + mark_swap_as_finished(self.ctx.clone(), id, None).await } } diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs index c5a0f15e82..174e059b82 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs @@ -212,14 +212,21 @@ pub(super) async fn get_unfinished_swaps_uuids( } #[cfg(not(target_arch = "wasm32"))] -pub(super) async fn mark_swap_as_finished(ctx: MmArc, id: Uuid) -> MmResult<(), SwapStateMachineError> { +pub(super) async fn mark_swap_as_finished( + ctx: MmArc, + id: Uuid, + _db_id: Option<&str>, +) -> MmResult<(), SwapStateMachineError> { async_blocking(move || Ok(set_swap_is_finished(&ctx.sqlite_connection(), &id.to_string())?)).await } #[cfg(target_arch = "wasm32")] -pub(super) async fn mark_swap_as_finished(ctx: MmArc, id: Uuid) -> MmResult<(), SwapStateMachineError> { - // TODO: db_id - let swaps_ctx = SwapsContext::from_ctx(&ctx, None).expect("SwapsContext::from_ctx should not fail"); +pub(super) async fn mark_swap_as_finished( + ctx: MmArc, + id: Uuid, + db_id: Option<&str>, +) -> MmResult<(), SwapStateMachineError> { + let swaps_ctx = SwapsContext::from_ctx(&ctx, db_id).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs index a84ba06980..6d49e56ecb 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs @@ -34,7 +34,7 @@ cfg_wasm32!( ); #[cfg(not(target_arch = "wasm32"))] -pub(super) async fn get_swap_type(ctx: &MmArc, uuid: &Uuid) -> MmResult, SqlError> { +pub(super) async fn get_swap_type(ctx: &MmArc, uuid: &Uuid, _db_id: Option<&str>) -> MmResult, SqlError> { let ctx = ctx.clone(); let uuid = uuid.to_string(); @@ -76,11 +76,14 @@ impl From for SwapV2DbError { } #[cfg(target_arch = "wasm32")] -pub(super) async fn get_swap_type(ctx: &MmArc, uuid: &Uuid) -> MmResult, SwapV2DbError> { +pub(super) async fn get_swap_type( + ctx: &MmArc, + uuid: &Uuid, + db_id: Option<&str>, +) -> MmResult, SwapV2DbError> { use crate::mm2::lp_swap::swap_wasm_db::MySwapsFiltersTable; - // TODO db_id - let swaps_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); + let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).unwrap(); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -150,22 +153,25 @@ impl MySwapForRpc { pub(super) async fn get_maker_swap_data_for_rpc( ctx: &MmArc, uuid: &Uuid, + db_id: Option<&str>, ) -> MmResult>, SqlError> { - get_swap_data_for_rpc_impl(ctx, uuid).await + get_swap_data_for_rpc_impl(ctx, uuid, db_id).await } #[cfg(not(target_arch = "wasm32"))] pub(super) async fn get_taker_swap_data_for_rpc( ctx: &MmArc, uuid: &Uuid, + db_id: Option<&str>, ) -> MmResult>, SqlError> { - get_swap_data_for_rpc_impl(ctx, uuid).await + get_swap_data_for_rpc_impl(ctx, uuid, db_id).await } #[cfg(not(target_arch = "wasm32"))] async fn get_swap_data_for_rpc_impl( ctx: &MmArc, uuid: &Uuid, + _db_id: Option<&str>, ) -> MmResult>, SqlError> { let ctx = ctx.clone(); let uuid = uuid.to_string(); @@ -186,9 +192,9 @@ async fn get_swap_data_for_rpc_impl( pub(super) async fn get_maker_swap_data_for_rpc( ctx: &MmArc, uuid: &Uuid, + db_id: Option<&str>, ) -> MmResult>, SwapV2DbError> { - // TODO: db_id - let swaps_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); + let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).unwrap(); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -227,9 +233,9 @@ pub(super) async fn get_maker_swap_data_for_rpc( pub(super) async fn get_taker_swap_data_for_rpc( ctx: &MmArc, uuid: &Uuid, + db_id: Option<&str>, ) -> MmResult>, SwapV2DbError> { - // TODO: db_id - let swaps_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); + let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).unwrap(); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -308,11 +314,11 @@ async fn get_swap_data_by_uuid_and_type( })) }, MAKER_SWAP_V2_TYPE => { - let data = get_maker_swap_data_for_rpc(ctx, &uuid).await?; + let data = get_maker_swap_data_for_rpc(ctx, &uuid, db_id).await?; Ok(data.map(SwapRpcData::MakerV2)) }, TAKER_SWAP_V2_TYPE => { - let data = get_taker_swap_data_for_rpc(ctx, &uuid).await?; + let data = get_taker_swap_data_for_rpc(ctx, &uuid, db_id).await?; Ok(data.map(SwapRpcData::TakerV2)) }, unsupported => MmError::err(GetSwapDataErr::UnsupportedSwapType(unsupported)), @@ -322,6 +328,7 @@ async fn get_swap_data_by_uuid_and_type( #[derive(Deserialize)] pub(crate) struct MySwapStatusRequest { uuid: Uuid, + db_id: Option, } #[derive(Display, Serialize, SerializeErrorType)] @@ -366,7 +373,7 @@ pub(crate) async fn my_swap_status_rpc( ctx: MmArc, req: MySwapStatusRequest, ) -> MmResult { - let swap_type = get_swap_type(&ctx, &req.uuid) + let swap_type = get_swap_type(&ctx, &req.uuid, req.db_id.as_deref()) .await? .or_mm_err(|| MySwapStatusError::NoSwapWithUuid(req.uuid))?; get_swap_data_by_uuid_and_type(&ctx, None, req.uuid, swap_type) diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 6fee2893fa..4276901d42 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -445,16 +445,9 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { let uuid = swap.uuid.to_string(); let to_broadcast = !(swap.maker_coin.is_privacy() || swap.taker_coin.is_privacy()); let running_swap = Arc::new(swap); + let account_id = running_swap.taker_coin.account_db_id().expect("Valid maker pubkey"); let weak_ref = Arc::downgrade(&running_swap); - let swap_ctx = SwapsContext::from_ctx( - &ctx, - running_swap - .taker_coin - .account_db_id() - .expect("Valid maker pubkey") - .as_deref(), - ) - .unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx, account_id.as_deref()).unwrap(); swap_ctx.init_msg_store(running_swap.uuid, running_swap.maker); swap_ctx.running_swaps.lock().unwrap().push(weak_ref); @@ -495,12 +488,16 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { command = c; }, None => { - if let Err(e) = mark_swap_as_finished(ctx.clone(), running_swap.uuid).await { + if let Err(e) = + mark_swap_as_finished(ctx.clone(), running_swap.uuid, account_id.as_deref()).await + { error!("!mark_swap_finished({}): {}", uuid, e); } if to_broadcast { - if let Err(e) = broadcast_my_swap_status(&ctx, running_swap.uuid).await { + if let Err(e) = + broadcast_my_swap_status(&ctx, running_swap.uuid, account_id.as_deref()).await + { error!("!broadcast_my_swap_status({}): {}", uuid, e); } } diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index f224b5f909..568e8d3b3c 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -269,7 +269,7 @@ impl StateMachineStorage for TakerSwapStorage { } async fn mark_finished(&mut self, id: Self::MachineId) -> Result<(), Self::Error> { - mark_swap_as_finished(self.ctx.clone(), id).await + mark_swap_as_finished(self.ctx.clone(), id, None).await } } From be3dc818fd280f7e6ad4d3fdaaf9135ea7acc887 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 10 Apr 2024 21:03:18 +0100 Subject: [PATCH 013/186] minor changes to DbIdentifier constructor(new) --- mm2src/mm2_db/src/indexed_db/indexed_db.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/mm2src/mm2_db/src/indexed_db/indexed_db.rs b/mm2src/mm2_db/src/indexed_db/indexed_db.rs index e292f1f475..1aa2577009 100644 --- a/mm2src/mm2_db/src/indexed_db/indexed_db.rs +++ b/mm2src/mm2_db/src/indexed_db/indexed_db.rs @@ -94,7 +94,6 @@ impl DbIdentifier { pub fn db_name(&self) -> &'static str { self.db_name } pub fn new(namespace_id: DbNamespaceId, pubkey: Option) -> DbIdentifier { - let pubkey = Some(pubkey.unwrap_or_else(|| hex::encode(H160::default().as_slice()))); DbIdentifier { namespace_id, pubkey, From ffbc262ed29596fbb0e112e3acaa018f39dc5125 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 11 Apr 2024 17:35:22 +0100 Subject: [PATCH 014/186] changes to swaps db --- mm2src/mm2_main/src/database/my_swaps.rs | 2 + mm2src/mm2_main/src/lp_ordermatch.rs | 2 + mm2src/mm2_main/src/lp_swap.rs | 38 +++++++++++++++---- .../mm2_main/src/lp_swap/my_swaps_storage.rs | 18 ++++++--- mm2src/mm2_main/src/lp_swap/saved_swap.rs | 18 ++++++++- mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs | 3 +- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 1 + 7 files changed, 66 insertions(+), 16 deletions(-) diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index fc0a1f94eb..d93831db65 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -70,6 +70,7 @@ pub fn insert_new_swap( uuid: &str, started_at: &str, swap_type: u8, + _db_id: Option<&str>, ) -> SqlResult<()> { debug!("Inserting new swap {} to the SQLite database", uuid); let conn = ctx.sqlite_connection(); @@ -199,6 +200,7 @@ pub fn select_uuids_by_my_swaps_filter( conn: &Connection, filter: &MySwapsFilter, paging_options: Option<&PagingOptions>, + _db_id: Option<&str>, ) -> SqlResult { let mut query_builder = SqlBuilder::select_from(MY_SWAPS_TABLE); let mut params = vec![]; diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index edf55206f0..345d8b543a 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -3019,6 +3019,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO uuid, now, LEGACY_SWAP_TYPE, + maker_coin.account_db_id().expect("Valid coin pubkey").as_deref(), ) .await { @@ -3181,6 +3182,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat uuid, now, LEGACY_SWAP_TYPE, + taker_coin.account_db_id().expect("Valid coin pubkey").as_deref(), ) .await { diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index c712412fc3..82d74ad482 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -336,8 +336,8 @@ pub async fn process_swap_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PRequest return match json::from_slice::(msg) { Ok(mut status) => { status.data.fetch_and_set_usd_prices().await; - // TODO: db_id - if let Err(e) = save_stats_swap(&ctx, &status.data, None).await { + let account_id = status.data.account_db_id(&ctx).await.expect("Valid coin pubkey"); + if let Err(e) = save_stats_swap(&ctx, &status.data, account_id.as_deref()).await { error!("Error saving the swap {} status: {}", status.data.uuid(), e); } Ok(()) @@ -1020,9 +1020,10 @@ pub async fn insert_new_swap_to_db( uuid: Uuid, started_at: u64, swap_type: u8, + db_id: Option<&str>, ) -> Result<(), String> { MySwapsStorage::new(ctx) - .save_new_swap(my_coin, other_coin, uuid, started_at, swap_type) + .save_new_swap(my_coin, other_coin, uuid, started_at, swap_type, db_id) .await .map_err(|e| ERRL!("{}", e)) } @@ -1217,9 +1218,10 @@ pub struct MySwapsFilter { /// Returns *all* uuids of swaps, which match the selected filter. pub async fn all_swaps_uuids_by_filter(ctx: MmArc, req: Json) -> Result>, String> { let filter: MySwapsFilter = try_s!(json::from_value(req)); + // TODO: db_id let db_result = try_s!( MySwapsStorage::new(ctx) - .my_recent_swaps_with_filters(&filter, None) + .my_recent_swaps_with_filters(&filter, None, None) .await ); @@ -1264,14 +1266,30 @@ pub enum LatestSwapsErr { UnableToLoadSavedSwaps(SavedSwapError), #[display(fmt = "Unable to query swaps storage")] UnableToQuerySwapStorage, + #[display(fmt = "My coin not fouond or activated")] + CoinNotFound, } +// pub async fn get_account_db_id(ctx: &MmArc, coin: &str) -> Result, String> { +// let db_id = try_s!(lp_coinfind_any(&ctx, &coin).await); +// let db_id = if let Some(id) = db_id { +// try_s!(id.inner.account_db_id()) +// } else { +// None +// }; + +// Ok(db_id) +// } + pub async fn latest_swaps_for_pair( ctx: MmArc, my_coin: String, other_coin: String, limit: usize, ) -> Result, MmError> { + // TODO: db_id + let db_id: Option = None; + let filter = MySwapsFilter { my_coin: Some(my_coin), other_coin: Some(other_coin), @@ -1286,7 +1304,7 @@ pub async fn latest_swaps_for_pair( }; let db_result = match MySwapsStorage::new(ctx.clone()) - .my_recent_swaps_with_filters(&filter, Some(&paging_options)) + .my_recent_swaps_with_filters(&filter, Some(&paging_options), db_id.as_deref()) .await { Ok(x) => x, @@ -1296,7 +1314,7 @@ pub async fn latest_swaps_for_pair( let mut swaps = Vec::with_capacity(db_result.uuids_and_types.len()); // TODO this is needed for trading bot, which seems not used as of now. Remove the code? for (uuid, _) in db_result.uuids_and_types.iter() { - let swap = match SavedSwap::load_my_swap_from_db(&ctx, None, *uuid).await { + let swap = match SavedSwap::load_my_swap_from_db(&ctx, db_id.as_deref(), *uuid).await { Ok(Some(swap)) => swap, Ok(None) => { error!("No such swap with the uuid '{}'", uuid); @@ -1313,9 +1331,11 @@ pub async fn latest_swaps_for_pair( /// Returns the data of recent swaps of `my` node. pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result>, String> { let req: MyRecentSwapsReq = try_s!(json::from_value(req)); + + // TODO: db_id let db_result = try_s!( MySwapsStorage::new(ctx.clone()) - .my_recent_swaps_with_filters(&req.filter, Some(&req.paging_options)) + .my_recent_swaps_with_filters(&req.filter, Some(&req.paging_options), None) .await ); @@ -1554,7 +1574,8 @@ pub async fn import_swaps(ctx: MmArc, req: Json) -> Result>, St let mut imported = vec![]; let mut skipped = HashMap::new(); for swap in swaps { - match swap.save_to_db(&ctx, None).await { + let accound_id = swap.account_db_id(&ctx).await?; + match swap.save_to_db(&ctx, accound_id.as_deref()).await { Ok(_) => { if let Some(info) = swap.get_my_info() { if let Err(e) = insert_new_swap_to_db( @@ -1564,6 +1585,7 @@ pub async fn import_swaps(ctx: MmArc, req: Json) -> Result>, St *swap.uuid(), info.started_at, LEGACY_SWAP_TYPE, + accound_id.as_deref(), ) .await { diff --git a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs index 70efedb108..97a7d884d4 100644 --- a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs +++ b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs @@ -38,12 +38,14 @@ pub trait MySwapsOps { uuid: Uuid, started_at: u64, swap_type: u8, + db_id: Option<&str>, ) -> MySwapsResult<()>; async fn my_recent_swaps_with_filters( &self, filter: &MySwapsFilter, paging_options: Option<&PagingOptions>, + db_id: Option<&str>, ) -> MySwapsResult; } @@ -83,6 +85,7 @@ mod native_impl { uuid: Uuid, started_at: u64, swap_type: u8, + db_id: Option<&str>, ) -> MySwapsResult<()> { Ok(insert_new_swap( &self.ctx, @@ -91,6 +94,7 @@ mod native_impl { &uuid.to_string(), &started_at.to_string(), swap_type, + db_id, )?) } @@ -98,11 +102,13 @@ mod native_impl { &self, filter: &MySwapsFilter, paging_options: Option<&PagingOptions>, + db_id: Option<&str>, ) -> MySwapsResult { Ok(select_uuids_by_my_swaps_filter( &self.ctx.sqlite_connection(), filter, paging_options, + db_id, )?) } } @@ -175,9 +181,9 @@ mod wasm_impl { uuid: Uuid, started_at: u64, swap_type: u8, + db_id: Option<&str>, ) -> MySwapsResult<()> { - // TODO: db_id - let swap_ctx = SwapsContext::from_ctx(&self.ctx, None).map_to_mm(MySwapsError::InternalError)?; + let swap_ctx = SwapsContext::from_ctx(&self.ctx, db_id).map_to_mm(MySwapsError::InternalError)?; let db = swap_ctx.swap_db().await?; let transaction = db.transaction().await?; let my_swaps_table = transaction.table::().await?; @@ -198,9 +204,9 @@ mod wasm_impl { &self, filter: &MySwapsFilter, paging_options: Option<&PagingOptions>, + db_id: Option<&str>, ) -> MySwapsResult { - // TODO: db_id - let swap_ctx = SwapsContext::from_ctx(&self.ctx, None).map_to_mm(MySwapsError::InternalError)?; + let swap_ctx = SwapsContext::from_ctx(&self.ctx, db_id).map_to_mm(MySwapsError::InternalError)?; let db = swap_ctx.swap_db().await?; let transaction = db.transaction().await?; let my_swaps_table = transaction.table::().await?; @@ -387,13 +393,13 @@ mod wasm_tests { }); } my_swaps - .save_new_swap(my_coin, other_coin, uuid, started_at, swap_type) + .save_new_swap(my_coin, other_coin, uuid, started_at, swap_type, None) .await .expect("!MySwapsStorage::save_new_swap"); } let actual = my_swaps - .my_recent_swaps_with_filters(&filters, None) + .my_recent_swaps_with_filters(&filters, None, None) .await .expect("!MySwapsStorage::my_recent_swaps_with_filters"); diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index 467b551d07..ac80b10071 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -2,7 +2,7 @@ use crate::mm2::lp_swap::maker_swap::{MakerSavedSwap, MakerSwap, MakerSwapEvent} use crate::mm2::lp_swap::taker_swap::{TakerSavedSwap, TakerSwap, TakerSwapEvent}; use crate::mm2::lp_swap::{MySwapInfo, RecoveredSwap}; use async_trait::async_trait; -use coins::lp_coinfind; +use coins::{lp_coinfind, lp_coinfind_any}; use derive_more::Display; use mm2_core::mm_ctx::MmArc; use mm2_err_handle::prelude::*; @@ -153,6 +153,22 @@ impl SavedSwap { SavedSwap::Taker(taker) => taker.fetch_and_set_usd_prices().await, } } + + pub async fn account_db_id(&self, ctx: &MmArc) -> Result, String> { + let coin_ticker = match self { + SavedSwap::Maker(swap) => &swap.maker_coin, + SavedSwap::Taker(swap) => &swap.taker_coin, + }; + + if let Some(ticker) = coin_ticker { + let coin = lp_coinfind_any(ctx, ticker).await?.map(|c| c.inner); + if let Some(coin) = coin { + return coin.account_db_id(); + } + } + + Ok(None) + } } #[async_trait] diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs index 6d49e56ecb..3635c54c76 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs @@ -434,8 +434,9 @@ pub(crate) async fn my_recent_swaps_rpc( ctx: MmArc, req: MyRecentSwapsRequest, ) -> MmResult { + // TODO: db_id let db_result = MySwapsStorage::new(ctx.clone()) - .my_recent_swaps_with_filters(&req.filter, Some(&req.paging_options)) + .my_recent_swaps_with_filters(&req.filter, Some(&req.paging_options), None) .await?; let mut swaps = Vec::with_capacity(db_result.uuids_and_types.len()); for (uuid, swap_type) in db_result.uuids_and_types.iter() { diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 4276901d42..782942b97b 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -446,6 +446,7 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { let to_broadcast = !(swap.maker_coin.is_privacy() || swap.taker_coin.is_privacy()); let running_swap = Arc::new(swap); let account_id = running_swap.taker_coin.account_db_id().expect("Valid maker pubkey"); + info!("USING COIN PUBKEY: {account_id:?}"); let weak_ref = Arc::downgrade(&running_swap); let swap_ctx = SwapsContext::from_ctx(&ctx, account_id.as_deref()).unwrap(); swap_ctx.init_msg_store(running_swap.uuid, running_swap.maker); From 07c1168a12a1bddf48a0cac2d2486bfd55ba89db Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 11 Apr 2024 19:38:24 +0100 Subject: [PATCH 015/186] handle recv_swap_msg with db_id --- mm2src/mm2_main/src/lp_swap.rs | 4 ++-- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 6 ++++++ mm2src/mm2_main/src/lp_swap/taker_swap.rs | 6 ++++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 82d74ad482..b85fea70cf 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -398,14 +398,14 @@ async fn recv_swap_msg( mut getter: impl FnMut(&mut SwapMsgStore) -> Option, uuid: &Uuid, timeout: u64, + db_id: Option<&str>, ) -> Result { let started = now_sec(); let timeout = BASIC_COMM_TIMEOUT + timeout; let wait_until = started + timeout; loop { Timer::sleep(1.).await; - // TODO: db_id - let swap_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx, db_id).unwrap(); let mut msgs = swap_ctx.swap_msgs.lock().unwrap(); if let Some(msg_store) = msgs.get_mut(uuid) { if let Some(msg) = getter(msg_store) { diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index 3c70e46951..2cfb4dfefb 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -594,11 +594,13 @@ impl MakerSwap { NEGOTIATION_TIMEOUT_SEC as f64 / 6., self.p2p_privkey, ); + let db_id = try_s!(self.maker_coin.account_db_id()); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.negotiation_reply.take(), &self.uuid, NEGOTIATION_TIMEOUT_SEC, + db_id.as_deref(), ); let taker_data = match recv_fut.await { Ok(d) => d, @@ -698,11 +700,13 @@ impl MakerSwap { self.p2p_privkey, ); + let db_id = try_s!(self.maker_coin.account_db_id()); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.taker_fee.take(), &self.uuid, TAKER_FEE_RECV_TIMEOUT_SEC, + db_id.as_deref(), ); let payload = match recv_fut.await { Ok(d) => d, @@ -933,11 +937,13 @@ impl MakerSwap { // wait for 3/5, we need to leave some time space for transaction to be confirmed let wait_duration = (self.r().data.lock_duration * 3) / 5; + let db_id = try_s!(self.maker_coin.account_db_id()); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.taker_payment.take(), &self.uuid, wait_duration, + db_id.as_deref(), ); // Todo: taker_payment should be a message on lightning network not a swap message let payload = match recv_fut.await { diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 782942b97b..0bc0364261 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -1123,11 +1123,13 @@ impl TakerSwap { async fn negotiate(&self) -> Result<(Option, Vec), String> { const NEGOTIATE_TIMEOUT_SEC: u64 = 90; + let db_id = try_s!(self.maker_coin.account_db_id()); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.negotiation.take(), &self.uuid, NEGOTIATE_TIMEOUT_SEC, + db_id.as_deref(), ); let maker_data = match recv_fut.await { Ok(d) => d, @@ -1234,11 +1236,13 @@ impl TakerSwap { NEGOTIATE_TIMEOUT_SEC as f64 / 6., self.p2p_privkey, ); + let db_id = try_s!(self.taker_coin.account_db_id()); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.negotiated.take(), &self.uuid, NEGOTIATE_TIMEOUT_SEC, + db_id.as_deref(), ); let negotiated = match recv_fut.await { Ok(d) => d, @@ -1329,11 +1333,13 @@ impl TakerSwap { self.p2p_privkey, ); + let db_id = try_s!(self.maker_coin.account_db_id()); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.maker_payment.take(), &self.uuid, MAKER_PAYMENT_WAIT_TIMEOUT_SEC, + db_id.as_deref(), ); let payload = match recv_fut.await { Ok(p) => p, From 1ec96bcf73ad619f7302ecd8d2f20302016303fd Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 11 Apr 2024 19:58:13 +0100 Subject: [PATCH 016/186] add todos for StateMachineStorage for TakerSwapStorage --- mm2src/mm2_main/src/database/my_swaps.rs | 14 ++++-- mm2src/mm2_main/src/lp_swap.rs | 6 ++- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 15 ++++-- mm2src/mm2_main/src/lp_swap/swap_v2_common.rs | 50 +++++++++++++------ mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 12 +++-- 5 files changed, 69 insertions(+), 28 deletions(-) diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index d93831db65..8554e0caa9 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -259,14 +259,14 @@ pub fn select_uuids_by_my_swaps_filter( } /// Returns whether a swap with specified uuid exists in DB -pub fn does_swap_exist(conn: &Connection, uuid: &str) -> SqlResult { +pub fn does_swap_exist(conn: &Connection, uuid: &str, _db_id: Option<&str>) -> SqlResult { const SELECT_SWAP_ID_BY_UUID: &str = "SELECT id FROM my_swaps WHERE uuid = :uuid;"; let res: Option = query_single_row(conn, SELECT_SWAP_ID_BY_UUID, &[(":uuid", uuid)], |row| row.get(0))?; Ok(res.is_some()) } /// Queries swap events by uuid -pub fn get_swap_events(conn: &Connection, uuid: &str) -> SqlResult { +pub fn get_swap_events(conn: &Connection, uuid: &str, _db_id: Option<&str>) -> SqlResult { const SELECT_SWAP_EVENTS_BY_UUID: &str = "SELECT events_json FROM my_swaps WHERE uuid = :uuid;"; let mut stmt = conn.prepare(SELECT_SWAP_EVENTS_BY_UUID)?; let swap_type = stmt.query_row(&[(":uuid", uuid)], |row| row.get(0))?; @@ -274,7 +274,7 @@ pub fn get_swap_events(conn: &Connection, uuid: &str) -> SqlResult { } /// Updates swap events by uuid -pub fn update_swap_events(conn: &Connection, uuid: &str, events_json: &str) -> SqlResult<()> { +pub fn update_swap_events(conn: &Connection, uuid: &str, events_json: &str, _db_id: Option<&str>) -> SqlResult<()> { const UPDATE_SWAP_EVENTS_BY_UUID: &str = "UPDATE my_swaps SET events_json = :events_json WHERE uuid = :uuid;"; let mut stmt = conn.prepare(UPDATE_SWAP_EVENTS_BY_UUID)?; stmt.execute(&[(":uuid", uuid), (":events_json", events_json)]) @@ -282,12 +282,16 @@ pub fn update_swap_events(conn: &Connection, uuid: &str, events_json: &str) -> S } const UPDATE_SWAP_IS_FINISHED_BY_UUID: &str = "UPDATE my_swaps SET is_finished = 1 WHERE uuid = :uuid;"; -pub fn set_swap_is_finished(conn: &Connection, uuid: &str) -> SqlResult<()> { +pub fn set_swap_is_finished(conn: &Connection, uuid: &str, _db_id: Option<&str>) -> SqlResult<()> { let mut stmt = conn.prepare(UPDATE_SWAP_IS_FINISHED_BY_UUID)?; stmt.execute(&[(":uuid", uuid)]).map(|_| ()) } -pub fn select_unfinished_swaps_uuids(conn: &Connection, swap_type: u8) -> SqlResult, SelectSwapsUuidsErr> { +pub fn select_unfinished_swaps_uuids( + conn: &Connection, + swap_type: u8, + _db_id: Option<&str>, +) -> SqlResult, SelectSwapsUuidsErr> { const SELECT_UNFINISHED_SWAPS_UUIDS_BY_TYPE: &str = "SELECT uuid FROM my_swaps WHERE is_finished = 0 AND swap_type = :type;"; let mut stmt = conn.prepare(SELECT_UNFINISHED_SWAPS_UUIDS_BY_TYPE)?; diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index b85fea70cf..18a18ce63a 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -1390,9 +1390,11 @@ pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result Result, String> { #[cfg(target_arch = "wasm32")] try_s!(migrate_swaps_data(&ctx, None).await); - + // TODO: db_id + let db_id: Option = None; let mut coins = HashSet::new(); - let legacy_unfinished_uuids = try_s!(get_unfinished_swaps_uuids(ctx.clone(), LEGACY_SWAP_TYPE).await); + let legacy_unfinished_uuids = + try_s!(get_unfinished_swaps_uuids(ctx.clone(), LEGACY_SWAP_TYPE, db_id.as_deref()).await); for uuid in legacy_unfinished_uuids { // Todo db_id let swap = match SavedSwap::load_my_swap_from_db(&ctx, None, uuid).await { diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index e63495e60c..9bc5a92699 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -226,20 +226,27 @@ impl StateMachineStorage for MakerSwapStorage { } async fn has_record_for(&mut self, id: &Self::MachineId) -> Result { - has_db_record_for(self.ctx.clone(), id).await + // TODO: db_id + let db_id: Option = None; + has_db_record_for(self.ctx.clone(), id, db_id.as_deref()).await } async fn store_event(&mut self, id: Self::MachineId, event: MakerSwapEvent) -> Result<(), Self::Error> { - store_swap_event::(self.ctx.clone(), id, event).await + // TODO: db_id + let db_id: Option = None; + store_swap_event::(self.ctx.clone(), id, event, db_id.as_deref()).await } async fn get_unfinished(&self) -> Result, Self::Error> { - get_unfinished_swaps_uuids(self.ctx.clone(), MAKER_SWAP_V2_TYPE).await + // TODO: db_id + let db_id: Option = None; + get_unfinished_swaps_uuids(self.ctx.clone(), MAKER_SWAP_V2_TYPE, db_id.as_deref()).await } async fn mark_finished(&mut self, id: Self::MachineId) -> Result<(), Self::Error> { // TODO: db_id - mark_swap_as_finished(self.ctx.clone(), id, None).await + let db_id: Option = None; + mark_swap_as_finished(self.ctx.clone(), id, db_id.as_deref()).await } } diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs index 174e059b82..2152b243f3 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs @@ -98,15 +98,24 @@ pub struct SwapRecreateCtx { } #[cfg(not(target_arch = "wasm32"))] -pub(super) async fn has_db_record_for(ctx: MmArc, id: &Uuid) -> MmResult { +pub(super) async fn has_db_record_for( + ctx: MmArc, + id: &Uuid, + db_id: Option<&str>, +) -> MmResult { let id_str = id.to_string(); - Ok(async_blocking(move || does_swap_exist(&ctx.sqlite_connection(), &id_str)).await?) + let db_id = db_id.map(|e| e.to_string()); + + Ok(async_blocking(move || does_swap_exist(&ctx.sqlite_connection(), &id_str, db_id.as_deref())).await?) } #[cfg(target_arch = "wasm32")] -pub(super) async fn has_db_record_for(ctx: MmArc, id: &Uuid) -> MmResult { - // TODO: db_id - let swaps_ctx = SwapsContext::from_ctx(&ctx, None).expect("SwapsContext::from_ctx should not fail"); +pub(super) async fn has_db_record_for( + ctx: MmArc, + id: &Uuid, + db_id: Option<&str>, +) -> MmResult { + let swaps_ctx = SwapsContext::from_ctx(&ctx, db_id).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -119,18 +128,21 @@ pub(super) async fn store_swap_event( ctx: MmArc, id: Uuid, event: T::Event, + db_id: Option<&str>, ) -> MmResult<(), SwapStateMachineError> where T::Event: DeserializeOwned + Serialize + Send + 'static, { let id_str = id.to_string(); + let db_id = db_id.map(|e| e.to_string()); + async_blocking(move || { - let events_json = get_swap_events(&ctx.sqlite_connection(), &id_str)?; + let events_json = get_swap_events(&ctx.sqlite_connection(), &id_str, db_id.as_deref())?; let mut events: Vec = serde_json::from_str(&events_json)?; events.push(event); drop_mutability!(events); let serialized_events = serde_json::to_string(&events)?; - update_swap_events(&ctx.sqlite_connection(), &id_str, &serialized_events)?; + update_swap_events(&ctx.sqlite_connection(), &id_str, &serialized_events, db_id.as_deref())?; Ok(()) }) .await @@ -141,9 +153,9 @@ pub(super) async fn store_swap_event, ) -> MmResult<(), SwapStateMachineError> { - // TODO: db_id - let swaps_ctx = SwapsContext::from_ctx(&ctx, None).expect("SwapsContext::from_ctx should not fail"); + let swaps_ctx = SwapsContext::from_ctx(&ctx, db_id).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -185,9 +197,11 @@ pub(super) async fn get_swap_repr(ctx: &MmArc, id: Uuid) -> pub(super) async fn get_unfinished_swaps_uuids( ctx: MmArc, swap_type: u8, + db_id: Option<&str>, ) -> MmResult, SwapStateMachineError> { + let db_id = db_id.map(|e| e.to_string()); async_blocking(move || { - select_unfinished_swaps_uuids(&ctx.sqlite_connection(), swap_type) + select_unfinished_swaps_uuids(&ctx.sqlite_connection(), swap_type, db_id.as_deref()) .map_to_mm(|e| SwapStateMachineError::StorageError(e.to_string())) }) .await @@ -197,12 +211,12 @@ pub(super) async fn get_unfinished_swaps_uuids( pub(super) async fn get_unfinished_swaps_uuids( ctx: MmArc, swap_type: u8, + db_id: Option<&str>, ) -> MmResult, SwapStateMachineError> { let index = MultiIndex::new(IS_FINISHED_SWAP_TYPE_INDEX) .with_value(BoolAsInt::new(false))? .with_value(swap_type)?; - // TODO: db_id - let swaps_ctx = SwapsContext::from_ctx(&ctx, None).expect("SwapsContext::from_ctx should not fail"); + let swaps_ctx = SwapsContext::from_ctx(&ctx, db_id).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -215,9 +229,17 @@ pub(super) async fn get_unfinished_swaps_uuids( pub(super) async fn mark_swap_as_finished( ctx: MmArc, id: Uuid, - _db_id: Option<&str>, + db_id: Option<&str>, ) -> MmResult<(), SwapStateMachineError> { - async_blocking(move || Ok(set_swap_is_finished(&ctx.sqlite_connection(), &id.to_string())?)).await + let db_id = db_id.map(|e| e.to_string()); + async_blocking(move || { + Ok(set_swap_is_finished( + &ctx.sqlite_connection(), + &id.to_string(), + db_id.as_deref(), + )?) + }) + .await } #[cfg(target_arch = "wasm32")] diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index 568e8d3b3c..c92a72b35b 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -257,15 +257,21 @@ impl StateMachineStorage for TakerSwapStorage { } async fn has_record_for(&mut self, id: &Self::MachineId) -> Result { - has_db_record_for(self.ctx.clone(), id).await + // TODO: db_id + let db_id: Option = None; + has_db_record_for(self.ctx.clone(), id, db_id.as_deref()).await } async fn store_event(&mut self, id: Self::MachineId, event: TakerSwapEvent) -> Result<(), Self::Error> { - store_swap_event::(self.ctx.clone(), id, event).await + // TODO: db_id + let db_id: Option = None; + store_swap_event::(self.ctx.clone(), id, event, db_id.as_deref()).await } async fn get_unfinished(&self) -> Result, Self::Error> { - get_unfinished_swaps_uuids(self.ctx.clone(), TAKER_SWAP_V2_TYPE).await + // TODO: db_id + let db_id: Option = None; + get_unfinished_swaps_uuids(self.ctx.clone(), TAKER_SWAP_V2_TYPE, db_id.as_deref()).await } async fn mark_finished(&mut self, id: Self::MachineId) -> Result<(), Self::Error> { From 3ccfa003be17a7953e8a28c185577a5fe572cba1 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 11 Apr 2024 20:00:08 +0100 Subject: [PATCH 017/186] add todos for StateMachineStorage for TakerSwapStorage --- mm2src/mm2_main/src/lp_swap/swap_v2_common.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs index 2152b243f3..096a7d017c 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs @@ -177,9 +177,12 @@ pub(super) async fn store_swap_event(ctx: &MmArc, id: Uuid) -> MmResult { - // TODO: db_id - let swaps_ctx = SwapsContext::from_ctx(ctx, None).expect("SwapsContext::from_ctx should not fail"); +pub(super) async fn get_swap_repr( + ctx: &MmArc, + id: Uuid, + db_id: Option<&str>, +) -> MmResult { + let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; From 29763e2c052fcc46bc2a0a82d23dff1d760b6f2d Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 11 Apr 2024 20:03:17 +0100 Subject: [PATCH 018/186] get_swap_repr --- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 4 +++- mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index 9bc5a92699..fad6c56cd9 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -222,7 +222,9 @@ impl StateMachineStorage for MakerSwapStorage { #[cfg(target_arch = "wasm32")] async fn get_repr(&self, id: Self::MachineId) -> Result { - get_swap_repr(&self.ctx, id).await + // TODO: db_id + let db_id: Option = None; + get_swap_repr(&self.ctx, id, db_id.as_deref()).await } async fn has_record_for(&mut self, id: &Self::MachineId) -> Result { diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index c92a72b35b..079064cb2b 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -253,7 +253,9 @@ impl StateMachineStorage for TakerSwapStorage { #[cfg(target_arch = "wasm32")] async fn get_repr(&self, id: Self::MachineId) -> Result { - get_swap_repr(&self.ctx, id).await + // TODO: db_id + let db_id: Option = None; + get_swap_repr(&self.ctx, id, db_id.as_deref()).await } async fn has_record_for(&mut self, id: &Self::MachineId) -> Result { From cf749ebfb96dec92e5e0e5ccbb627b36d253160b Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 11 Apr 2024 20:05:29 +0100 Subject: [PATCH 019/186] mark account_db_id as todo for native target --- mm2src/mm2_main/src/lp_swap/saved_swap.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index ac80b10071..8340f8c231 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -154,6 +154,13 @@ impl SavedSwap { } } + #[cfg(not(target_arch = "wasm32"))] + pub async fn account_db_id(&self, ctx: &MmArc) -> Result, String> { + // TODO + Ok(None) + } + + #[cfg(target_arch = "wasm32")] pub async fn account_db_id(&self, ctx: &MmArc) -> Result, String> { let coin_ticker = match self { SavedSwap::Maker(swap) => &swap.maker_coin, From 368441ce0517eace48d692817a0928547017cef9 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 11 Apr 2024 21:35:22 +0100 Subject: [PATCH 020/186] impl find_unique_active_account_ids and use for swap_kick_starts --- mm2src/coins/lp_coins.rs | 31 ++++ mm2src/mm2_main/src/lp_swap.rs | 174 +++++++++++----------- mm2src/mm2_main/src/lp_swap/saved_swap.rs | 11 +- 3 files changed, 128 insertions(+), 88 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index f315787ee8..3abffd56f1 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -4136,6 +4136,37 @@ pub async fn lp_coinfind_any(ctx: &MmArc, ticker: &str) -> Result Result, String> { + let cctx = try_s!(CoinsContext::from_ctx(ctx)); + let coins = cctx.coins.lock().await; + let coins = coins.values().collect::>(); + + // Using a HashSet to ensure uniqueness efficiently + let mut account_ids = HashSet::new(); + // Add default wallet pubkey + account_ids.insert(hex::encode(ctx.rmd160().as_slice())); + + for coin in coins { + if let Some(account) = try_s!(coin.inner.account_db_id()) { + if coin.is_available() { + account_ids.insert(account); + } + }; + } + + Ok(account_ids) +} + +#[cfg(not(target_arch = "wasm32"))] +// TODO: complete impl when implementing multikey support for sqlite/native +pub async fn find_unique_active_account_ids(ctx: &MmArc) -> Result, String> { + let cctx = try_s!(CoinsContext::from_ctx(ctx)); + let mut account_ids = HashSet::new(); + account_ids.insert(hex::encode(ctx.rmd160().as_slice())); + Ok(account_ids) +} + /// Attempts to find a pair of active coins returning None if one is not enabled pub async fn find_pair(ctx: &MmArc, base: &str, rel: &str) -> Result, String> { let fut_base = lp_coinfind(ctx, base); diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 18a18ce63a..edac9e1d1f 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -62,7 +62,8 @@ use crate::mm2::lp_network::{broadcast_p2p_msg, Libp2pPeerId, P2PProcessError, P use crate::mm2::lp_swap::maker_swap_v2::{MakerSwapStateMachine, MakerSwapStorage}; use crate::mm2::lp_swap::taker_swap_v2::{TakerSwapStateMachine, TakerSwapStorage}; use bitcrypto::{dhash160, sha256}; -use coins::{lp_coinfind, lp_coinfind_or_err, CoinFindError, DexFee, MmCoin, MmCoinEnum, TradeFee, TransactionEnum}; +use coins::{find_unique_active_account_ids, lp_coinfind, lp_coinfind_or_err, CoinFindError, DexFee, MmCoin, + MmCoinEnum, TradeFee, TransactionEnum}; use common::log::{debug, warn}; use common::now_sec; use common::time_cache::DuplicateCache; @@ -1388,97 +1389,102 @@ pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result Result, String> { - #[cfg(target_arch = "wasm32")] - try_s!(migrate_swaps_data(&ctx, None).await); - // TODO: db_id - let db_id: Option = None; + let db_ids = try_s!(find_unique_active_account_ids(&ctx).await); let mut coins = HashSet::new(); - let legacy_unfinished_uuids = - try_s!(get_unfinished_swaps_uuids(ctx.clone(), LEGACY_SWAP_TYPE, db_id.as_deref()).await); - for uuid in legacy_unfinished_uuids { - // Todo db_id - let swap = match SavedSwap::load_my_swap_from_db(&ctx, None, uuid).await { - Ok(Some(s)) => s, - Ok(None) => { - warn!("Swap {} is indexed, but doesn't exist in DB", uuid); - continue; - }, - Err(e) => { - error!("Error {} on getting swap {} data from DB", e, uuid); - continue; - }, - }; - info!("Kick starting the swap {}", swap.uuid()); - let maker_coin_ticker = match swap.maker_coin_ticker() { - Ok(t) => t, - Err(e) => { - error!("Error {} getting maker coin of swap: {}", e, swap.uuid()); - continue; - }, - }; - let taker_coin_ticker = match swap.taker_coin_ticker() { - Ok(t) => t, - Err(e) => { - error!("Error {} getting taker coin of swap {}", e, swap.uuid()); - continue; - }, - }; - coins.insert(maker_coin_ticker.clone()); - coins.insert(taker_coin_ticker.clone()); - let fut = kickstart_thread_handler(ctx.clone(), swap, maker_coin_ticker, taker_coin_ticker); - ctx.spawner().spawn(fut); - } + for db_id in db_ids { + let db_id = Some(db_id); - let maker_swap_storage = MakerSwapStorage::new(ctx.clone()); - let unfinished_maker_uuids = try_s!(maker_swap_storage.get_unfinished().await); - for maker_uuid in unfinished_maker_uuids { - info!("Trying to kickstart maker swap {}", maker_uuid); - let maker_swap_repr = match maker_swap_storage.get_repr(maker_uuid).await { - Ok(repr) => repr, - Err(e) => { - error!("Error {} getting DB repr of maker swap {}", e, maker_uuid); - continue; - }, - }; - debug!("Got maker swap repr {:?}", maker_swap_repr); + #[cfg(target_arch = "wasm32")] + try_s!(migrate_swaps_data(&ctx, db_id.as_deref()).await); - coins.insert(maker_swap_repr.maker_coin.clone()); - coins.insert(maker_swap_repr.taker_coin.clone()); - - let fut = swap_kickstart_handler::>( - ctx.clone(), - maker_swap_repr, - maker_swap_storage.clone(), - maker_uuid, - ); - ctx.spawner().spawn(fut); - } + let legacy_unfinished_uuids = + try_s!(get_unfinished_swaps_uuids(ctx.clone(), LEGACY_SWAP_TYPE, db_id.as_deref()).await); + for uuid in legacy_unfinished_uuids { + let swap = match SavedSwap::load_my_swap_from_db(&ctx, db_id.as_deref(), uuid).await { + Ok(Some(s)) => s, + Ok(None) => { + warn!("Swap {} is indexed, but doesn't exist in DB", uuid); + continue; + }, + Err(e) => { + error!("Error {} on getting swap {} data from DB", e, uuid); + continue; + }, + }; + info!("Kick starting the swap {}", swap.uuid()); + let maker_coin_ticker = match swap.maker_coin_ticker() { + Ok(t) => t, + Err(e) => { + error!("Error {} getting maker coin of swap: {}", e, swap.uuid()); + continue; + }, + }; + let taker_coin_ticker = match swap.taker_coin_ticker() { + Ok(t) => t, + Err(e) => { + error!("Error {} getting taker coin of swap {}", e, swap.uuid()); + continue; + }, + }; + coins.insert(maker_coin_ticker.clone()); + coins.insert(taker_coin_ticker.clone()); - let taker_swap_storage = TakerSwapStorage::new(ctx.clone()); - let unfinished_taker_uuids = try_s!(taker_swap_storage.get_unfinished().await); - for taker_uuid in unfinished_taker_uuids { - info!("Trying to kickstart taker swap {}", taker_uuid); - let taker_swap_repr = match taker_swap_storage.get_repr(taker_uuid).await { - Ok(repr) => repr, - Err(e) => { - error!("Error {} getting DB repr of taker swap {}", e, taker_uuid); - continue; - }, - }; - debug!("Got taker swap repr {:?}", taker_swap_repr); + let fut = kickstart_thread_handler(ctx.clone(), swap, maker_coin_ticker, taker_coin_ticker); + ctx.spawner().spawn(fut); + } - coins.insert(taker_swap_repr.maker_coin.clone()); - coins.insert(taker_swap_repr.taker_coin.clone()); + let maker_swap_storage = MakerSwapStorage::new(ctx.clone()); + let unfinished_maker_uuids = try_s!(maker_swap_storage.get_unfinished().await); + for maker_uuid in unfinished_maker_uuids { + info!("Trying to kickstart maker swap {}", maker_uuid); + let maker_swap_repr = match maker_swap_storage.get_repr(maker_uuid).await { + Ok(repr) => repr, + Err(e) => { + error!("Error {} getting DB repr of maker swap {}", e, maker_uuid); + continue; + }, + }; + debug!("Got maker swap repr {:?}", maker_swap_repr); + + coins.insert(maker_swap_repr.maker_coin.clone()); + coins.insert(maker_swap_repr.taker_coin.clone()); + + let fut = swap_kickstart_handler::>( + ctx.clone(), + maker_swap_repr, + maker_swap_storage.clone(), + maker_uuid, + ); + ctx.spawner().spawn(fut); + } - let fut = swap_kickstart_handler::>( - ctx.clone(), - taker_swap_repr, - taker_swap_storage.clone(), - taker_uuid, - ); - ctx.spawner().spawn(fut); + let taker_swap_storage = TakerSwapStorage::new(ctx.clone()); + let unfinished_taker_uuids = try_s!(taker_swap_storage.get_unfinished().await); + for taker_uuid in unfinished_taker_uuids { + info!("Trying to kickstart taker swap {}", taker_uuid); + let taker_swap_repr = match taker_swap_storage.get_repr(taker_uuid).await { + Ok(repr) => repr, + Err(e) => { + error!("Error {} getting DB repr of taker swap {}", e, taker_uuid); + continue; + }, + }; + debug!("Got taker swap repr {:?}", taker_swap_repr); + + coins.insert(taker_swap_repr.maker_coin.clone()); + coins.insert(taker_swap_repr.taker_coin.clone()); + + let fut = swap_kickstart_handler::>( + ctx.clone(), + taker_swap_repr, + taker_swap_storage.clone(), + taker_uuid, + ); + ctx.spawner().spawn(fut); + } } + Ok(coins) } diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index 8340f8c231..ea2949b801 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -2,7 +2,7 @@ use crate::mm2::lp_swap::maker_swap::{MakerSavedSwap, MakerSwap, MakerSwapEvent} use crate::mm2::lp_swap::taker_swap::{TakerSavedSwap, TakerSwap, TakerSwapEvent}; use crate::mm2::lp_swap::{MySwapInfo, RecoveredSwap}; use async_trait::async_trait; -use coins::{lp_coinfind, lp_coinfind_any}; +use coins::lp_coinfind; use derive_more::Display; use mm2_core::mm_ctx::MmArc; use mm2_err_handle::prelude::*; @@ -155,13 +155,14 @@ impl SavedSwap { } #[cfg(not(target_arch = "wasm32"))] - pub async fn account_db_id(&self, ctx: &MmArc) -> Result, String> { - // TODO - Ok(None) + pub async fn account_db_id(&self, _ctx: &MmArc) -> Result, String> { + // TODO Ok(None) } #[cfg(target_arch = "wasm32")] pub async fn account_db_id(&self, ctx: &MmArc) -> Result, String> { + use coins::lp_coinfind_any; + let coin_ticker = match self { SavedSwap::Maker(swap) => &swap.maker_coin, SavedSwap::Taker(swap) => &swap.taker_coin, @@ -337,6 +338,7 @@ mod wasm_impl { } pub async fn migrate_swaps_data(ctx: &MmArc, db_id: Option<&str>) -> MmResult<(), SavedSwapError> { + info!("migrate_swaps_data: {db_id:?}"); let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; @@ -457,6 +459,7 @@ mod wasm_impl { } async fn save_to_db(&self, ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult<()> { + info!("save_to_db: {db_id:?}"); let saved_swap = json::to_value(self).map_to_mm(|e| SavedSwapError::ErrorSerializing(e.to_string()))?; let saved_swap_item = SavedSwapTable { uuid: *self.uuid(), From 7095605e9146081b9cdb1955e4a1011f143cf770 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 11 Apr 2024 21:58:57 +0100 Subject: [PATCH 021/186] make swap_v2 multi_key --- mm2src/coins/lp_coins.rs | 1 - mm2src/mm2_main/src/lp_ordermatch.rs | 10 +++--- mm2src/mm2_main/src/lp_swap.rs | 4 +-- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 32 +++++++++----------- mm2src/mm2_main/src/lp_swap/saved_swap.rs | 3 +- mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 30 +++++++++--------- 6 files changed, 38 insertions(+), 42 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 3abffd56f1..e5945e6dd9 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -4161,7 +4161,6 @@ pub async fn find_unique_active_account_ids(ctx: &MmArc) -> Result Result, String> { - let cctx = try_s!(CoinsContext::from_ctx(ctx)); let mut account_ids = HashSet::new(); account_ids.insert(hex::encode(ctx.rmd160().as_slice())); Ok(account_ids) diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 345d8b543a..09717e727a 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -2974,12 +2974,13 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO }, }; + let account_db_id = maker_coin.account_db_id().expect("Valid coin pubkey"); if ctx.use_trading_proto_v2() { let secret_hash_algo = detect_secret_hash_algo(&maker_coin, &taker_coin); match (maker_coin, taker_coin) { (MmCoinEnum::UtxoCoin(m), MmCoinEnum::UtxoCoin(t)) => { let mut maker_swap_state_machine = MakerSwapStateMachine { - storage: MakerSwapStorage::new(ctx.clone()), + storage: MakerSwapStorage::new(ctx.clone(), account_db_id.as_deref()), abortable_system: ctx .abortable_system .create_subsystem() @@ -3019,7 +3020,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO uuid, now, LEGACY_SWAP_TYPE, - maker_coin.account_db_id().expect("Valid coin pubkey").as_deref(), + account_db_id.as_deref(), ) .await { @@ -3126,6 +3127,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat ); let now = now_sec(); + let account_db_id = taker_coin.account_db_id().expect("Valid taker coin pubkey"); if ctx.use_trading_proto_v2() { let taker_secret = match generate_secret() { Ok(s) => s.into(), @@ -3138,7 +3140,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat match (maker_coin, taker_coin) { (MmCoinEnum::UtxoCoin(m), MmCoinEnum::UtxoCoin(t)) => { let mut taker_swap_state_machine = TakerSwapStateMachine { - storage: TakerSwapStorage::new(ctx.clone()), + storage: TakerSwapStorage::new(ctx.clone(), account_db_id.as_deref()), abortable_system: ctx .abortable_system .create_subsystem() @@ -3182,7 +3184,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat uuid, now, LEGACY_SWAP_TYPE, - taker_coin.account_db_id().expect("Valid coin pubkey").as_deref(), + account_db_id.as_deref(), ) .await { diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index edac9e1d1f..20812c5a59 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -1434,7 +1434,7 @@ pub async fn swap_kick_starts(ctx: MmArc) -> Result, String> { ctx.spawner().spawn(fut); } - let maker_swap_storage = MakerSwapStorage::new(ctx.clone()); + let maker_swap_storage = MakerSwapStorage::new(ctx.clone(), db_id.as_deref()); let unfinished_maker_uuids = try_s!(maker_swap_storage.get_unfinished().await); for maker_uuid in unfinished_maker_uuids { info!("Trying to kickstart maker swap {}", maker_uuid); @@ -1459,7 +1459,7 @@ pub async fn swap_kick_starts(ctx: MmArc) -> Result, String> { ctx.spawner().spawn(fut); } - let taker_swap_storage = TakerSwapStorage::new(ctx.clone()); + let taker_swap_storage = TakerSwapStorage::new(ctx.clone(), db_id.as_deref()); let unfinished_taker_uuids = try_s!(taker_swap_storage.get_unfinished().await); for taker_uuid in unfinished_taker_uuids { info!("Trying to kickstart taker swap {}", taker_uuid); diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index fad6c56cd9..28d6cf4124 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -132,10 +132,16 @@ pub enum MakerSwapEvent { #[derive(Clone)] pub struct MakerSwapStorage { ctx: MmArc, + pub db_id: Option, } impl MakerSwapStorage { - pub fn new(ctx: MmArc) -> Self { MakerSwapStorage { ctx } } + pub fn new(ctx: MmArc, db_id: Option<&str>) -> Self { + MakerSwapStorage { + ctx, + db_id: db_id.map(|c| c.to_string()), + } + } } #[async_trait] @@ -179,8 +185,8 @@ impl StateMachineStorage for MakerSwapStorage { #[cfg(target_arch = "wasm32")] async fn store_repr(&mut self, uuid: Self::MachineId, repr: Self::DbRepr) -> Result<(), Self::Error> { - // TODO: db_id - let swaps_ctx = SwapsContext::from_ctx(&self.ctx, None).expect("SwapsContext::from_ctx should not fail"); + let swaps_ctx = + SwapsContext::from_ctx(&self.ctx, self.db_id.as_deref()).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; @@ -222,33 +228,23 @@ impl StateMachineStorage for MakerSwapStorage { #[cfg(target_arch = "wasm32")] async fn get_repr(&self, id: Self::MachineId) -> Result { - // TODO: db_id - let db_id: Option = None; - get_swap_repr(&self.ctx, id, db_id.as_deref()).await + get_swap_repr(&self.ctx, id, self.db_id.as_deref()).await } async fn has_record_for(&mut self, id: &Self::MachineId) -> Result { - // TODO: db_id - let db_id: Option = None; - has_db_record_for(self.ctx.clone(), id, db_id.as_deref()).await + has_db_record_for(self.ctx.clone(), id, self.db_id.as_deref()).await } async fn store_event(&mut self, id: Self::MachineId, event: MakerSwapEvent) -> Result<(), Self::Error> { - // TODO: db_id - let db_id: Option = None; - store_swap_event::(self.ctx.clone(), id, event, db_id.as_deref()).await + store_swap_event::(self.ctx.clone(), id, event, self.db_id.as_deref()).await } async fn get_unfinished(&self) -> Result, Self::Error> { - // TODO: db_id - let db_id: Option = None; - get_unfinished_swaps_uuids(self.ctx.clone(), MAKER_SWAP_V2_TYPE, db_id.as_deref()).await + get_unfinished_swaps_uuids(self.ctx.clone(), MAKER_SWAP_V2_TYPE, self.db_id.as_deref()).await } async fn mark_finished(&mut self, id: Self::MachineId) -> Result<(), Self::Error> { - // TODO: db_id - let db_id: Option = None; - mark_swap_as_finished(self.ctx.clone(), id, db_id.as_deref()).await + mark_swap_as_finished(self.ctx.clone(), id, self.db_id.as_deref()).await } } diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index ea2949b801..2e0dfa1dc6 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -156,7 +156,8 @@ impl SavedSwap { #[cfg(not(target_arch = "wasm32"))] pub async fn account_db_id(&self, _ctx: &MmArc) -> Result, String> { - // TODO Ok(None) + // TODO + Ok(None) } #[cfg(target_arch = "wasm32")] diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index 079064cb2b..94ee88a506 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -163,10 +163,16 @@ pub enum TakerSwapEvent { #[derive(Clone)] pub struct TakerSwapStorage { ctx: MmArc, + pub db_id: Option, } impl TakerSwapStorage { - pub fn new(ctx: MmArc) -> Self { TakerSwapStorage { ctx } } + pub fn new(ctx: MmArc, db_id: Option<&str>) -> Self { + TakerSwapStorage { + ctx, + db_id: db_id.map(|c| c.to_string()), + } + } } #[async_trait] @@ -210,8 +216,8 @@ impl StateMachineStorage for TakerSwapStorage { #[cfg(target_arch = "wasm32")] async fn store_repr(&mut self, uuid: Self::MachineId, repr: Self::DbRepr) -> Result<(), Self::Error> { - // TODO: db_id - let swaps_ctx = SwapsContext::from_ctx(&self.ctx, None).expect("SwapsContext::from_ctx should not fail"); + let swaps_ctx = + SwapsContext::from_ctx(&self.ctx, self.db_id.as_deref()).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db().await?; let transaction = db.transaction().await?; @@ -253,31 +259,23 @@ impl StateMachineStorage for TakerSwapStorage { #[cfg(target_arch = "wasm32")] async fn get_repr(&self, id: Self::MachineId) -> Result { - // TODO: db_id - let db_id: Option = None; - get_swap_repr(&self.ctx, id, db_id.as_deref()).await + get_swap_repr(&self.ctx, id, self.db_id.as_deref()).await } async fn has_record_for(&mut self, id: &Self::MachineId) -> Result { - // TODO: db_id - let db_id: Option = None; - has_db_record_for(self.ctx.clone(), id, db_id.as_deref()).await + has_db_record_for(self.ctx.clone(), id, self.db_id.as_deref()).await } async fn store_event(&mut self, id: Self::MachineId, event: TakerSwapEvent) -> Result<(), Self::Error> { - // TODO: db_id - let db_id: Option = None; - store_swap_event::(self.ctx.clone(), id, event, db_id.as_deref()).await + store_swap_event::(self.ctx.clone(), id, event, self.db_id.as_deref()).await } async fn get_unfinished(&self) -> Result, Self::Error> { - // TODO: db_id - let db_id: Option = None; - get_unfinished_swaps_uuids(self.ctx.clone(), TAKER_SWAP_V2_TYPE, db_id.as_deref()).await + get_unfinished_swaps_uuids(self.ctx.clone(), TAKER_SWAP_V2_TYPE, self.db_id.as_deref()).await } async fn mark_finished(&mut self, id: Self::MachineId) -> Result<(), Self::Error> { - mark_swap_as_finished(self.ctx.clone(), id, None).await + mark_swap_as_finished(self.ctx.clone(), id, self.db_id.as_deref()).await } } From 8823be20af7e466b8d1bc87bad440fa73232df8c Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 11 Apr 2024 22:16:48 +0100 Subject: [PATCH 022/186] WIP - start nft multi key support --- mm2src/coins/nft.rs | 26 +++++++++++++++------ mm2src/coins/nft/nft_structs.rs | 6 ++--- mm2src/coins/nft/storage/db_test_helpers.rs | 2 +- 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/mm2src/coins/nft.rs b/mm2src/coins/nft.rs index 8e73e2b272..9d16247a7a 100644 --- a/mm2src/coins/nft.rs +++ b/mm2src/coins/nft.rs @@ -80,7 +80,9 @@ pub type WithdrawNftResult = Result MmResult { - let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; + // TODO: db_id + let db_id: Option = None; + let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; let storage = nft_ctx.lock_db().await?; for chain in req.chains.iter() { @@ -105,7 +107,9 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult MmResult { - let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; + // TODO: db_id + let db_id: Option = None; + let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; let storage = nft_ctx.lock_db().await?; if !NftListStorageOps::is_initialized(&storage, &req.chain).await? { @@ -145,7 +149,9 @@ pub async fn get_nft_metadata(ctx: MmArc, req: NftMetadataReq) -> MmResult MmResult { - let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; + // TODO: db_id + let db_id: Option = None; + let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; let storage = nft_ctx.lock_db().await?; for chain in req.chains.iter() { @@ -212,7 +218,9 @@ async fn process_transfers_confirmations( /// data fetched from the provided `url`. The function ensures the local cache is in /// sync with the latest data from the source, validates against spam contract addresses and phishing domains. pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNftError> { - let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; + // TODO: db_id + let db_id: Option = None; + let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; let storage = nft_ctx.lock_db().await?; for chain in req.chains.iter() { @@ -454,7 +462,9 @@ fn prepare_uri_for_blocklist_endpoint( /// is identified as spam or matches with any phishing domains, the NFT's `possible_spam` and/or /// `possible_phishing` flags are set to true. pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResult<(), UpdateNftError> { - let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; + // TODO: db_id + let db_id: Option = None; + let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; let storage = nft_ctx.lock_db().await?; let token_address_str = eth_addr_to_hex(&req.token_address); @@ -1458,8 +1468,10 @@ pub(crate) fn get_domain_from_url(url: Option<&str>) -> Option { /// Clears NFT data from the database for specified chains. pub async fn clear_nft_db(ctx: MmArc, req: ClearNftDbReq) -> MmResult<(), ClearNftDbError> { + // TODO: db_id + let db_id: Option = None; if req.clear_all { - let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(ClearNftDbError::Internal)?; + let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(ClearNftDbError::Internal)?; let storage = nft_ctx.lock_db().await?; storage.clear_all_nft_data().await?; storage.clear_all_history_data().await?; @@ -1472,7 +1484,7 @@ pub async fn clear_nft_db(ctx: MmArc, req: ClearNftDbReq) -> MmResult<(), ClearN )); } - let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(ClearNftDbError::Internal)?; + let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(ClearNftDbError::Internal)?; let storage = nft_ctx.lock_db().await?; let mut errors = Vec::new(); for chain in req.chains.iter() { diff --git a/mm2src/coins/nft/nft_structs.rs b/mm2src/coins/nft/nft_structs.rs index 600c95220e..75c5a5af46 100644 --- a/mm2src/coins/nft/nft_structs.rs +++ b/mm2src/coins/nft/nft_structs.rs @@ -725,7 +725,7 @@ impl NftCtx { /// /// If an `NftCtx` instance doesn't already exist in the MM context, it gets created and cached for subsequent use. #[cfg(not(target_arch = "wasm32"))] - pub(crate) fn from_ctx(ctx: &MmArc) -> Result, String> { + pub(crate) fn from_ctx(ctx: &MmArc, _db_id: Option<&str>) -> Result, String> { Ok(try_s!(from_ctx(&ctx.nft_ctx, move || { let async_sqlite_connection = ctx .async_sqlite_connection @@ -737,10 +737,10 @@ impl NftCtx { } #[cfg(target_arch = "wasm32")] - pub(crate) fn from_ctx(ctx: &MmArc) -> Result, String> { + pub(crate) fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { Ok(try_s!(from_ctx(&ctx.nft_ctx, move || { Ok(NftCtx { - nft_cache_db: ConstructibleDb::new(ctx, None).into_shared(), + nft_cache_db: ConstructibleDb::new(ctx, db_id).into_shared(), }) }))) } diff --git a/mm2src/coins/nft/storage/db_test_helpers.rs b/mm2src/coins/nft/storage/db_test_helpers.rs index d59b845661..75c7b248c2 100644 --- a/mm2src/coins/nft/storage/db_test_helpers.rs +++ b/mm2src/coins/nft/storage/db_test_helpers.rs @@ -358,5 +358,5 @@ pub(crate) async fn get_nft_ctx(_chain: &Chain) -> Arc { let ctx = mm_ctx_with_custom_async_db().await; #[cfg(target_arch = "wasm32")] let ctx = mm_ctx_with_custom_db(); - NftCtx::from_ctx(&ctx).unwrap() + NftCtx::from_ctx(&ctx, None).unwrap() } From 42a22b1dab35cc53f254184c7e7346bf1d0bbd87 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 12 Apr 2024 04:12:05 +0100 Subject: [PATCH 023/186] WIP - my_swaps_storage --- mm2src/mm2_db/src/indexed_db/db_lock.rs | 14 +- mm2src/mm2_db/src/indexed_db/indexed_db.rs | 20 +- mm2src/mm2_main/src/database/my_swaps.rs | 8 +- mm2src/mm2_main/src/lp_ordermatch.rs | 2 +- mm2src/mm2_main/src/lp_swap.rs | 187 ++++++++++-------- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 2 +- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 6 +- .../mm2_main/src/lp_swap/my_swaps_storage.rs | 29 ++- mm2src/mm2_main/src/lp_swap/swap_v2_common.rs | 2 +- mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs | 76 ++++--- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 4 +- mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 6 +- 12 files changed, 201 insertions(+), 155 deletions(-) diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index 33f03ecac6..481f078ae7 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -13,7 +13,7 @@ pub struct ConstructibleDb { /// It's better to use something like [`Constructible`], but it doesn't provide a method to get the inner value by the mutable reference. mutex: AsyncMutex>, db_namespace: DbNamespaceId, - pubkey: Option, + db_id: Option, } impl ConstructibleDb { @@ -23,11 +23,11 @@ impl ConstructibleDb { /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new(ctx: &MmArc, db_id: Option<&str>) -> Self { let rmd = hex::encode(ctx.rmd160().as_slice()); - let pubkey = db_id.unwrap_or(&rmd); + let db_id = db_id.unwrap_or(&rmd); ConstructibleDb { mutex: AsyncMutex::new(None), db_namespace: ctx.db_namespace, - pubkey: Some(pubkey.to_string()), + db_id: Some(db_id.to_string()), } } @@ -36,11 +36,11 @@ impl ConstructibleDb { /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new_shared_db(ctx: &MmArc, db_id: Option<&str>) -> Self { let rmd = hex::encode(ctx.shared_db_id().as_slice()); - let pubkey = db_id.unwrap_or(&rmd); + let db_id = db_id.unwrap_or(&rmd); ConstructibleDb { mutex: AsyncMutex::new(None), db_namespace: ctx.db_namespace, - pubkey: Some(pubkey.to_string()), + db_id: Some(db_id.to_string()), } } @@ -50,7 +50,7 @@ impl ConstructibleDb { ConstructibleDb { mutex: AsyncMutex::new(None), db_namespace: ctx.db_namespace, - pubkey: None, + db_id: None, } } @@ -63,7 +63,7 @@ impl ConstructibleDb { return Ok(unwrap_db_instance(locked_db)); } - let db_id = DbIdentifier::new::(self.db_namespace, self.pubkey.clone()); + let db_id = DbIdentifier::new::(self.db_namespace, self.db_id.clone()); let db = Db::init(db_id).await?; *locked_db = Some(db); diff --git a/mm2src/mm2_db/src/indexed_db/indexed_db.rs b/mm2src/mm2_db/src/indexed_db/indexed_db.rs index 1aa2577009..9dbdc7391c 100644 --- a/mm2src/mm2_db/src/indexed_db/indexed_db.rs +++ b/mm2src/mm2_db/src/indexed_db/indexed_db.rs @@ -81,36 +81,36 @@ pub trait DbInstance: Sized { } #[derive(Clone, Display)] -#[display(fmt = "{}::{}::{}", namespace_id, "self.display_pubkey()", db_name)] +#[display(fmt = "{}::{}::{}", namespace_id, "self.display_db_id()", db_name)] pub struct DbIdentifier { namespace_id: DbNamespaceId, - /// The pubkey derived from passphrase or coin. - /// This value is used to distinguish different databases corresponding to user's coin activation pubkey or seedphrase. - pubkey: Option, + /// The db_id derived from passphrase or coin. + /// This value is used to distinguish different databases corresponding to user's coin activation db_id or seedphrase. + db_id: Option, db_name: &'static str, } impl DbIdentifier { pub fn db_name(&self) -> &'static str { self.db_name } - pub fn new(namespace_id: DbNamespaceId, pubkey: Option) -> DbIdentifier { + pub fn new(namespace_id: DbNamespaceId, db_id: Option) -> DbIdentifier { DbIdentifier { namespace_id, - pubkey, + db_id, db_name: Db::DB_NAME, } } - pub fn for_test(db_name: &'static str, pubkey: Option) -> DbIdentifier { - let pubkey = Some(pubkey.unwrap_or_else(|| hex::encode(H160::default().as_slice()))); + pub fn for_test(db_name: &'static str, db_id: Option) -> DbIdentifier { + let db_id = Some(db_id.unwrap_or_else(|| hex::encode(H160::default().as_slice()))); DbIdentifier { namespace_id: DbNamespaceId::for_test(), - pubkey, + db_id, db_name, } } - pub fn display_pubkey(&self) -> String { self.pubkey.clone().unwrap_or_else(|| "KOMODEFI".to_string()) } + pub fn display_db_id(&self) -> String { self.db_id.clone().unwrap_or_else(|| "KOMODEFI".to_string()) } } pub struct IndexedDbBuilder { diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index 8554e0caa9..2c58fe0935 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -200,7 +200,7 @@ pub fn select_uuids_by_my_swaps_filter( conn: &Connection, filter: &MySwapsFilter, paging_options: Option<&PagingOptions>, - _db_id: Option<&str>, + db_id: &str, ) -> SqlResult { let mut query_builder = SqlBuilder::select_from(MY_SWAPS_TABLE); let mut params = vec![]; @@ -217,7 +217,10 @@ pub fn select_uuids_by_my_swaps_filter( let total_count: isize = conn.query_row_named(&count_query, params_as_trait.as_slice(), |row| row.get(0))?; let total_count = total_count.try_into().expect("COUNT should always be >= 0"); if total_count == 0 { - return Ok(MyRecentSwapsUuids::default()); + let mut default = MyRecentSwapsUuids::default(); + default.pubkey = db_id.to_string(); + drop_mutability!(default); + return Ok(default); } // query the uuids and types finally @@ -255,6 +258,7 @@ pub fn select_uuids_by_my_swaps_filter( uuids_and_types, total_count, skipped, + pubkey: db_id.to_string(), }) } diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 09717e727a..c3b3930670 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -2797,7 +2797,7 @@ impl OrdermatchContext { orderbook_tickers: Default::default(), original_tickers: Default::default(), #[cfg(target_arch = "wasm32")] - ordermatch_db: ConstructibleDb::new(ctx), + ordermatch_db: ConstructibleDb::new(ctx, None), }) }))) } diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 20812c5a59..1f65bc9920 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -1219,23 +1219,30 @@ pub struct MySwapsFilter { /// Returns *all* uuids of swaps, which match the selected filter. pub async fn all_swaps_uuids_by_filter(ctx: MmArc, req: Json) -> Result>, String> { let filter: MySwapsFilter = try_s!(json::from_value(req)); - // TODO: db_id - let db_result = try_s!( - MySwapsStorage::new(ctx) - .my_recent_swaps_with_filters(&filter, None, None) - .await - ); + let db_ids = try_s!(find_unique_active_account_ids(&ctx).await); + let mut res_js = vec![]; + + for db_id in db_ids { + let db_result = try_s!( + MySwapsStorage::new(ctx.clone()) + .my_recent_swaps_with_filters(&filter, None, &db_id) + .await + ); + let res = json!({ + "result": { + "found_records": db_result.uuids_and_types.len(), + "uuids": db_result.uuids_and_types.into_iter().map(|(uuid, _)| uuid).collect::>(), + "my_coin": filter.my_coin, + "other_coin": filter.other_coin, + "from_timestamp": filter.from_timestamp, + "to_timestamp": filter.to_timestamp, + "pubkey": db_result.pubkey + }, + }); + + res_js.push(res); + } - let res_js = json!({ - "result": { - "found_records": db_result.uuids_and_types.len(), - "uuids": db_result.uuids_and_types.into_iter().map(|(uuid, _)| uuid).collect::>(), - "my_coin": filter.my_coin, - "other_coin": filter.other_coin, - "from_timestamp": filter.from_timestamp, - "to_timestamp": filter.to_timestamp, - }, - }); let res = try_s!(json::to_vec(&res_js)); Ok(try_s!(Response::builder().body(res))) } @@ -1246,11 +1253,12 @@ pub struct MyRecentSwapsReq { pub paging_options: PagingOptions, #[serde(flatten)] pub filter: MySwapsFilter, - pub db_id: Option, } #[derive(Debug, Default, PartialEq)] pub struct MyRecentSwapsUuids { + /// Pubkey i which swaps belongs to. + pub pubkey: String, /// UUIDs and types of swaps matching the query pub uuids_and_types: Vec<(Uuid, u8)>, /// Total count of swaps matching the query @@ -1267,7 +1275,7 @@ pub enum LatestSwapsErr { UnableToLoadSavedSwaps(SavedSwapError), #[display(fmt = "Unable to query swaps storage")] UnableToQuerySwapStorage, - #[display(fmt = "My coin not fouond or activated")] + #[display(fmt = "No active coin pubkey not found")] CoinNotFound, } @@ -1288,42 +1296,44 @@ pub async fn latest_swaps_for_pair( other_coin: String, limit: usize, ) -> Result, MmError> { - // TODO: db_id - let db_id: Option = None; - - let filter = MySwapsFilter { - my_coin: Some(my_coin), - other_coin: Some(other_coin), - from_timestamp: None, - to_timestamp: None, - }; + let db_ids = find_unique_active_account_ids(&ctx) + .await + .map_to_mm(|_| LatestSwapsErr::CoinNotFound)?; + let mut swaps = vec![]; - let paging_options = PagingOptions { - limit, - page_number: NonZeroUsize::new(1).expect("1 > 0"), - from_uuid: None, - }; + for db_id in db_ids { + let filter = MySwapsFilter { + my_coin: Some(my_coin.clone()), + other_coin: Some(other_coin.clone()), + from_timestamp: None, + to_timestamp: None, + }; - let db_result = match MySwapsStorage::new(ctx.clone()) - .my_recent_swaps_with_filters(&filter, Some(&paging_options), db_id.as_deref()) - .await - { - Ok(x) => x, - Err(_) => return Err(MmError::new(LatestSwapsErr::UnableToQuerySwapStorage)), - }; + let paging_options = PagingOptions { + limit, + page_number: NonZeroUsize::new(1).expect("1 > 0"), + from_uuid: None, + }; - let mut swaps = Vec::with_capacity(db_result.uuids_and_types.len()); - // TODO this is needed for trading bot, which seems not used as of now. Remove the code? - for (uuid, _) in db_result.uuids_and_types.iter() { - let swap = match SavedSwap::load_my_swap_from_db(&ctx, db_id.as_deref(), *uuid).await { - Ok(Some(swap)) => swap, - Ok(None) => { - error!("No such swap with the uuid '{}'", uuid); - continue; - }, - Err(e) => return Err(MmError::new(LatestSwapsErr::UnableToLoadSavedSwaps(e.into_inner()))), + let db_result = match MySwapsStorage::new(ctx.clone()) + .my_recent_swaps_with_filters(&filter, Some(&paging_options), &db_id) + .await + { + Ok(x) => x, + Err(_) => return Err(MmError::new(LatestSwapsErr::UnableToQuerySwapStorage)), }; - swaps.push(swap); + + for (uuid, _) in db_result.uuids_and_types.iter() { + let swap = match SavedSwap::load_my_swap_from_db(&ctx, Some(&db_result.pubkey), *uuid).await { + Ok(Some(swap)) => swap, + Ok(None) => { + error!("No such swap with the uuid '{}'", uuid); + continue; + }, + Err(e) => return Err(MmError::new(LatestSwapsErr::UnableToLoadSavedSwaps(e.into_inner()))), + }; + swaps.push(swap); + } } Ok(swaps) @@ -1332,46 +1342,47 @@ pub async fn latest_swaps_for_pair( /// Returns the data of recent swaps of `my` node. pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result>, String> { let req: MyRecentSwapsReq = try_s!(json::from_value(req)); + let db_ids = try_s!(find_unique_active_account_ids(&ctx).await); - // TODO: db_id - let db_result = try_s!( - MySwapsStorage::new(ctx.clone()) - .my_recent_swaps_with_filters(&req.filter, Some(&req.paging_options), None) - .await - ); - - // iterate over uuids trying to parse the corresponding files content and add to result vector - let mut swaps = Vec::with_capacity(db_result.uuids_and_types.len()); - for (uuid, swap_type) in db_result.uuids_and_types.iter() { - match *swap_type { - LEGACY_SWAP_TYPE => match SavedSwap::load_my_swap_from_db(&ctx, None, *uuid).await { - Ok(Some(swap)) => { - let swap_json = try_s!(json::to_value(MySwapStatusResponse::from(swap))); - swaps.push(swap_json) + let mut res_js = vec![]; + for db_id in db_ids { + let db_result = try_s!( + MySwapsStorage::new(ctx.clone()) + .my_recent_swaps_with_filters(&req.filter, Some(&req.paging_options), &db_id) + .await + ); + + // iterate over uuids trying to parse the corresponding files content and add to result vector + let mut swaps = vec![]; + for (uuid, swap_type) in db_result.uuids_and_types.iter() { + match *swap_type { + LEGACY_SWAP_TYPE => match SavedSwap::load_my_swap_from_db(&ctx, Some(&db_result.pubkey), *uuid).await { + Ok(Some(swap)) => { + let swap_json = try_s!(json::to_value(MySwapStatusResponse::from(swap))); + swaps.push(swap_json) + }, + Ok(None) => warn!("No such swap with the uuid '{}'", uuid), + Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), }, - Ok(None) => warn!("No such swap with the uuid '{}'", uuid), - Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), - }, - MAKER_SWAP_V2_TYPE => match get_maker_swap_data_for_rpc(&ctx, uuid, req.db_id.as_deref()).await { - Ok(data) => { - let swap_json = try_s!(json::to_value(data)); - swaps.push(swap_json); + MAKER_SWAP_V2_TYPE => match get_maker_swap_data_for_rpc(&ctx, uuid, Some(&db_result.pubkey)).await { + Ok(data) => { + let swap_json = try_s!(json::to_value(data)); + swaps.push(swap_json); + }, + Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), }, - Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), - }, - TAKER_SWAP_V2_TYPE => match get_taker_swap_data_for_rpc(&ctx, uuid, req.db_id.as_deref()).await { - Ok(data) => { - let swap_json = try_s!(json::to_value(data)); - swaps.push(swap_json); + TAKER_SWAP_V2_TYPE => match get_taker_swap_data_for_rpc(&ctx, uuid, Some(&db_result.pubkey)).await { + Ok(data) => { + let swap_json = try_s!(json::to_value(data)); + swaps.push(swap_json); + }, + Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), }, - Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), - }, - unknown_type => error!("Swap with the uuid '{}' has unknown type {}", uuid, unknown_type), + unknown_type => error!("Swap with the uuid '{}' has unknown type {}", uuid, unknown_type), + } } - } - let res_js = json!({ - "result": { + res_js.push(json!({ "swaps": swaps, "from_uuid": req.paging_options.from_uuid, "skipped": db_result.skipped, @@ -1380,8 +1391,11 @@ pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result Result, String> { for db_id in db_ids { let db_id = Some(db_id); - #[cfg(target_arch = "wasm32")] try_s!(migrate_swaps_data(&ctx, db_id.as_deref()).await); diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index 2cfb4dfefb..3a46f51c5a 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -1792,7 +1792,7 @@ impl MakerSwapStatusChanged { } } -#[derive(Debug, Default, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)] pub struct MakerSavedSwap { pub uuid: Uuid, pub my_order_uuid: Option, diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index 28d6cf4124..102e725d89 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -46,7 +46,7 @@ cfg_wasm32!( #[allow(unused_imports)] use prost::Message; /// Negotiation data representation to be stored in DB. -#[derive(Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize)] pub struct StoredNegotiationData { taker_payment_locktime: u64, taker_funding_locktime: u64, @@ -58,7 +58,7 @@ pub struct StoredNegotiationData { } /// Represents events produced by maker swap states. -#[derive(Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize)] #[serde(tag = "event_type", content = "event_data")] pub enum MakerSwapEvent { /// Swap has been successfully initialized. @@ -132,7 +132,7 @@ pub enum MakerSwapEvent { #[derive(Clone)] pub struct MakerSwapStorage { ctx: MmArc, - pub db_id: Option, + db_id: Option, } impl MakerSwapStorage { diff --git a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs index 97a7d884d4..77f283c2df 100644 --- a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs +++ b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs @@ -45,7 +45,7 @@ pub trait MySwapsOps { &self, filter: &MySwapsFilter, paging_options: Option<&PagingOptions>, - db_id: Option<&str>, + db_id: &str, ) -> MySwapsResult; } @@ -102,7 +102,7 @@ mod native_impl { &self, filter: &MySwapsFilter, paging_options: Option<&PagingOptions>, - db_id: Option<&str>, + db_id: &str, ) -> MySwapsResult { Ok(select_uuids_by_my_swaps_filter( &self.ctx.sqlite_connection(), @@ -204,9 +204,9 @@ mod wasm_impl { &self, filter: &MySwapsFilter, paging_options: Option<&PagingOptions>, - db_id: Option<&str>, + db_id: &str, ) -> MySwapsResult { - let swap_ctx = SwapsContext::from_ctx(&self.ctx, db_id).map_to_mm(MySwapsError::InternalError)?; + let swap_ctx = SwapsContext::from_ctx(&self.ctx, Some(&db_id)).map_to_mm(MySwapsError::InternalError)?; let db = swap_ctx.swap_db().await?; let transaction = db.transaction().await?; let my_swaps_table = transaction.table::().await?; @@ -265,7 +265,7 @@ mod wasm_impl { .map(|(_item_id, item)| OrderedUuid::from(item)) .collect(); match paging_options { - Some(paging) => take_according_to_paging_opts(uuids, paging), + Some(paging) => take_according_to_paging_opts(uuids, paging, db_id), None => { let total_count = uuids.len(); Ok(MyRecentSwapsUuids { @@ -275,6 +275,7 @@ mod wasm_impl { .collect(), total_count, skipped: 0, + pubkey: db_id.to_string(), }) }, } @@ -284,6 +285,7 @@ mod wasm_impl { pub(super) fn take_according_to_paging_opts( uuids: BTreeSet, paging: &PagingOptions, + db_id: &str, ) -> MySwapsResult { let total_count = uuids.len(); @@ -310,6 +312,7 @@ mod wasm_impl { uuids_and_types, total_count, skipped: skip, + pubkey: db_id.to_string(), }) } @@ -339,6 +342,7 @@ mod wasm_tests { use crate::mm2::lp_swap::{LEGACY_SWAP_TYPE, MAKER_SWAP_V2_TYPE, TAKER_SWAP_V2_TYPE}; use common::log::wasm_log::register_wasm_log; use common::new_uuid; + use keys::hash::H160; use mm2_core::mm_ctx::MmCtxBuilder; use rand::seq::SliceRandom; use rand::Rng; @@ -373,6 +377,7 @@ mod wasm_tests { filters: MySwapsFilter, ) { let ctx = MmCtxBuilder::new().with_test_db_namespace().into_mm_arc(); + let pubkey = hex::encode(H160::default().as_slice()); let my_swaps = MySwapsStorage::new(ctx); let mut expected_uuids = BTreeSet::new(); @@ -393,13 +398,13 @@ mod wasm_tests { }); } my_swaps - .save_new_swap(my_coin, other_coin, uuid, started_at, swap_type, None) + .save_new_swap(my_coin, other_coin, uuid, started_at, swap_type, Some(&pubkey)) .await .expect("!MySwapsStorage::save_new_swap"); } let actual = my_swaps - .my_recent_swaps_with_filters(&filters, None, None) + .my_recent_swaps_with_filters(&filters, None, &pubkey) .await .expect("!MySwapsStorage::my_recent_swaps_with_filters"); @@ -411,6 +416,7 @@ mod wasm_tests { .collect(), total_count: expected_total_count, skipped: 0, + pubkey, }; assert_eq!(actual, expected); } @@ -419,6 +425,7 @@ mod wasm_tests { fn test_take_according_to_paging_opts() { register_wasm_log(); + let pubkey = hex::encode(H160::default().as_slice()); let uuids: BTreeSet = [ (1, "49c79ea4-e1eb-4fb2-a0ef-265bded0b77f", TAKER_SWAP_V2_TYPE), (2, "2f9afe84-7a89-4194-8947-45fba563118f", MAKER_SWAP_V2_TYPE), @@ -445,7 +452,7 @@ mod wasm_tests { page_number: NonZeroUsize::new(10).unwrap(), from_uuid: Some(Uuid::parse_str("8f5b267a-efa8-49d6-a92d-ec0523cca891").unwrap()), }; - let actual = take_according_to_paging_opts(uuids.clone(), &paging).unwrap(); + let actual = take_according_to_paging_opts(uuids.clone(), &paging, &pubkey).unwrap(); let expected = MyRecentSwapsUuids { uuids_and_types: vec![ ( @@ -459,6 +466,7 @@ mod wasm_tests { ], total_count: uuids.len(), skipped: 6, + pubkey: pubkey.clone(), }; assert_eq!(actual, expected); @@ -467,7 +475,7 @@ mod wasm_tests { page_number: NonZeroUsize::new(2).unwrap(), from_uuid: None, }; - let actual = take_according_to_paging_opts(uuids.clone(), &paging).unwrap(); + let actual = take_according_to_paging_opts(uuids.clone(), &paging, &pubkey).unwrap(); let expected = MyRecentSwapsUuids { uuids_and_types: vec![ ( @@ -485,6 +493,7 @@ mod wasm_tests { ], total_count: uuids.len(), skipped: 3, + pubkey: pubkey.clone(), }; assert_eq!(actual, expected); @@ -497,7 +506,7 @@ mod wasm_tests { // unknown UUID from_uuid: Some(from_uuid), }; - let actual = take_according_to_paging_opts(uuids, &paging) + let actual = take_according_to_paging_opts(uuids, &paging, &pubkey) .expect_err("'take_according_to_paging_opts' must return an error"); assert_eq!(actual.into_inner(), MySwapsError::FromUuidNotFound(from_uuid)); } diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs index 096a7d017c..8d7abd1f44 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs @@ -37,7 +37,7 @@ pub struct ActiveSwapV2Info { } /// DB representation of tx preimage with signature -#[derive(Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize)] pub struct StoredTxPreimage { pub preimage: BytesJson, pub signature: BytesJson, diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs index 3635c54c76..d361189be4 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs @@ -5,6 +5,7 @@ use super::taker_swap::TakerSavedSwap; use super::taker_swap_v2::TakerSwapEvent; use super::{active_swaps, MySwapsFilter, SavedSwap, SavedSwapError, SavedSwapIo, LEGACY_SWAP_TYPE, MAKER_SWAP_V2_TYPE, TAKER_SWAP_V2_TYPE}; +use coins::find_unique_active_account_ids; use common::log::{error, warn}; use common::{calc_total_pages, HttpStatusCode, PagingOptions}; use derive_more::Display; @@ -95,7 +96,7 @@ pub(super) async fn get_swap_type( } /// Represents data of the swap used for RPC, omits fields that should be kept in secret -#[derive(Debug, Serialize)] +#[derive(Clone, Debug, Serialize)] pub(crate) struct MySwapForRpc { my_coin: String, other_coin: String, @@ -270,7 +271,7 @@ pub(super) async fn get_taker_swap_data_for_rpc( })) } -#[derive(Serialize)] +#[derive(Clone, Serialize)] #[serde(tag = "swap_type", content = "swap_data")] pub(crate) enum SwapRpcData { MakerV1(MakerSavedSwap), @@ -389,7 +390,13 @@ pub(crate) struct MyRecentSwapsRequest { pub filter: MySwapsFilter, } -#[derive(Serialize)] +#[derive(Clone, Serialize)] +pub(crate) struct MyRecentSwapsMultiResponse { + swaps: MyRecentSwapsResponse, + pubkey: String, +} + +#[derive(Clone, Serialize)] pub(crate) struct MyRecentSwapsResponse { swaps: Vec, from_uuid: Option, @@ -407,6 +414,8 @@ pub(crate) enum MyRecentSwapsErr { FromUuidSwapNotFound(Uuid), InvalidTimeStampRange, DbError(String), + #[display(fmt = "No active coin pubkey not found")] + CoinNotFound, } impl From for MyRecentSwapsErr { @@ -422,9 +431,9 @@ impl From for MyRecentSwapsErr { impl HttpStatusCode for MyRecentSwapsErr { fn status_code(&self) -> StatusCode { match self { - MyRecentSwapsErr::FromUuidSwapNotFound(_) | MyRecentSwapsErr::InvalidTimeStampRange => { - StatusCode::BAD_REQUEST - }, + MyRecentSwapsErr::FromUuidSwapNotFound(_) + | MyRecentSwapsErr::InvalidTimeStampRange + | MyRecentSwapsErr::CoinNotFound => StatusCode::BAD_REQUEST, MyRecentSwapsErr::DbError(_) => StatusCode::INTERNAL_SERVER_ERROR, } } @@ -433,30 +442,41 @@ impl HttpStatusCode for MyRecentSwapsErr { pub(crate) async fn my_recent_swaps_rpc( ctx: MmArc, req: MyRecentSwapsRequest, -) -> MmResult { - // TODO: db_id - let db_result = MySwapsStorage::new(ctx.clone()) - .my_recent_swaps_with_filters(&req.filter, Some(&req.paging_options), None) - .await?; - let mut swaps = Vec::with_capacity(db_result.uuids_and_types.len()); - for (uuid, swap_type) in db_result.uuids_and_types.iter() { - match get_swap_data_by_uuid_and_type(&ctx, None, *uuid, *swap_type).await { - Ok(Some(data)) => swaps.push(data), - Ok(None) => warn!("Swap {} data doesn't exist in DB", uuid), - Err(e) => error!("Error {} while trying to get swap {} data", e, uuid), - }; +) -> MmResult, MyRecentSwapsErr> { + let db_ids = find_unique_active_account_ids(&ctx) + .await + .map_to_mm(|_| MyRecentSwapsErr::CoinNotFound)?; + + let mut db_results = vec![]; + for db_id in db_ids { + let db_result = MySwapsStorage::new(ctx.clone()) + .my_recent_swaps_with_filters(&req.filter, Some(&req.paging_options), &db_id) + .await?; + let mut swaps = Vec::with_capacity(db_result.uuids_and_types.len()); + for (uuid, swap_type) in db_result.uuids_and_types.iter() { + match get_swap_data_by_uuid_and_type(&ctx, Some(&db_id), *uuid, *swap_type).await { + Ok(Some(data)) => swaps.push(data), + Ok(None) => warn!("Swap {} data doesn't exist in DB", uuid), + Err(e) => error!("Error {} while trying to get swap {} data", e, uuid), + }; + + db_results.push(MyRecentSwapsMultiResponse { + swaps: MyRecentSwapsResponse { + swaps: swaps.clone(), + from_uuid: req.paging_options.from_uuid, + skipped: db_result.skipped, + limit: req.paging_options.limit, + total: db_result.total_count, + page_number: req.paging_options.page_number, + total_pages: calc_total_pages(db_result.total_count, req.paging_options.limit), + found_records: db_result.uuids_and_types.len(), + }, + pubkey: db_id.to_string(), + }) + } } - Ok(MyRecentSwapsResponse { - swaps, - from_uuid: req.paging_options.from_uuid, - skipped: db_result.skipped, - limit: req.paging_options.limit, - total: db_result.total_count, - page_number: req.paging_options.page_number, - total_pages: calc_total_pages(db_result.total_count, req.paging_options.limit), - found_records: db_result.uuids_and_types.len(), - }) + Ok(db_results) } #[derive(Deserialize)] diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 0bc0364261..088c48e93d 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -152,7 +152,7 @@ async fn save_my_taker_swap_event(ctx: &MmArc, swap: &TakerSwap, event: TakerSav } } -#[derive(Debug, Deserialize, PartialEq, Serialize)] +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub struct TakerSavedEvent { pub timestamp: u64, pub event: TakerSwapEvent, @@ -197,7 +197,7 @@ impl TakerSavedEvent { } } -#[derive(Debug, Deserialize, PartialEq, Serialize)] +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub struct TakerSavedSwap { pub uuid: Uuid, pub my_order_uuid: Option, diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index 94ee88a506..84ccbee2ad 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -46,7 +46,7 @@ cfg_wasm32!( #[allow(unused_imports)] use prost::Message; /// Negotiation data representation to be stored in DB. -#[derive(Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize)] pub struct StoredNegotiationData { maker_payment_locktime: u64, maker_secret_hash: BytesJson, @@ -58,7 +58,7 @@ pub struct StoredNegotiationData { } /// Represents events produced by taker swap states. -#[derive(Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize)] #[serde(tag = "event_type", content = "event_data")] pub enum TakerSwapEvent { /// Swap has been successfully initialized. @@ -163,7 +163,7 @@ pub enum TakerSwapEvent { #[derive(Clone)] pub struct TakerSwapStorage { ctx: MmArc, - pub db_id: Option, + db_id: Option, } impl TakerSwapStorage { From 23f0566b09b3241af05f4382b2625f5f3108c4de Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 12 Apr 2024 04:29:45 +0100 Subject: [PATCH 024/186] fix field assignment outside of initializer --- mm2src/mm2_main/src/database/my_swaps.rs | 10 ++++++---- mm2src/mm2_main/src/lp_swap.rs | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index 2c58fe0935..eaae73475a 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -217,10 +217,12 @@ pub fn select_uuids_by_my_swaps_filter( let total_count: isize = conn.query_row_named(&count_query, params_as_trait.as_slice(), |row| row.get(0))?; let total_count = total_count.try_into().expect("COUNT should always be >= 0"); if total_count == 0 { - let mut default = MyRecentSwapsUuids::default(); - default.pubkey = db_id.to_string(); - drop_mutability!(default); - return Ok(default); + return Ok(MyRecentSwapsUuids { + pubkey: db_id.to_string(), + uuids_and_types: vec![], + skipped: 0, + total_count: 0, + }); } // query the uuids and types finally diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 1f65bc9920..95e8d28239 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -1257,7 +1257,7 @@ pub struct MyRecentSwapsReq { #[derive(Debug, Default, PartialEq)] pub struct MyRecentSwapsUuids { - /// Pubkey i which swaps belongs to. + /// Pubkey which swaps belongs to. pub pubkey: String, /// UUIDs and types of swaps matching the query pub uuids_and_types: Vec<(Uuid, u8)>, From 65a07d2a2145a16d5663c6565bab3fe489ab5ee5 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 12 Apr 2024 08:51:55 +0100 Subject: [PATCH 025/186] WIP - uxto_block_header_storage --- .../utxo/utxo_block_header_storage/mod.rs | 32 +++++++++++++------ .../wasm/indexeddb_block_header_storage.rs | 4 +-- .../utxo/utxo_builder/utxo_coin_builder.rs | 17 +++++++--- .../mm2_main/src/lp_swap/my_swaps_storage.rs | 2 +- 4 files changed, 38 insertions(+), 17 deletions(-) diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index 89266af2f6..d74fa547b6 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -26,7 +26,11 @@ impl Debug for BlockHeaderStorage { impl BlockHeaderStorage { #[cfg(all(not(test), not(target_arch = "wasm32")))] - pub(crate) fn new_from_ctx(ctx: MmArc, ticker: String) -> Result { + pub(crate) fn new_from_ctx( + ctx: MmArc, + ticker: String, + _db_id: Option<&str>, + ) -> Result { let sqlite_connection = ctx.sqlite_connection.ok_or(BlockHeaderStorageError::Internal( "sqlite_connection is not initialized".to_owned(), ))?; @@ -39,14 +43,22 @@ impl BlockHeaderStorage { } #[cfg(target_arch = "wasm32")] - pub(crate) fn new_from_ctx(ctx: MmArc, ticker: String) -> Result { + pub(crate) fn new_from_ctx( + ctx: MmArc, + ticker: String, + db_id: Option<&str>, + ) -> Result { Ok(BlockHeaderStorage { - inner: Box::new(IDBBlockHeadersStorage::new(&ctx, ticker)), + inner: Box::new(IDBBlockHeadersStorage::new(&ctx, ticker, db_id)), }) } #[cfg(all(test, not(target_arch = "wasm32")))] - pub(crate) fn new_from_ctx(ctx: MmArc, ticker: String) -> Result { + pub(crate) fn new_from_ctx( + ctx: MmArc, + ticker: String, + _db_id: Option<&str>, + ) -> Result { use db_common::sqlite::rusqlite::Connection; use std::sync::{Arc, Mutex}; @@ -132,7 +144,7 @@ mod block_headers_storage_tests { pub(crate) async fn test_add_block_headers_impl(for_coin: &str) { let ctx = mm_ctx_with_custom_db(); - let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string()) + let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string(), None) .unwrap() .into_inner(); storage.init().await.unwrap(); @@ -146,7 +158,7 @@ mod block_headers_storage_tests { pub(crate) async fn test_get_block_header_impl(for_coin: &str) { let ctx = mm_ctx_with_custom_db(); - let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string()) + let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string(), None) .unwrap() .into_inner(); storage.init().await.unwrap(); @@ -171,7 +183,7 @@ mod block_headers_storage_tests { pub(crate) async fn test_get_last_block_header_with_non_max_bits_impl(for_coin: &str) { let ctx = mm_ctx_with_custom_db(); - let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string()) + let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string(), None) .unwrap() .into_inner(); storage.init().await.unwrap(); @@ -206,7 +218,7 @@ mod block_headers_storage_tests { pub(crate) async fn test_get_last_block_height_impl(for_coin: &str) { let ctx = mm_ctx_with_custom_db(); - let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string()) + let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string(), None) .unwrap() .into_inner(); storage.init().await.unwrap(); @@ -234,7 +246,7 @@ mod block_headers_storage_tests { pub(crate) async fn test_remove_headers_from_storage_impl(for_coin: &str) { let ctx = mm_ctx_with_custom_db(); - let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string()) + let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string(), None) .unwrap() .into_inner(); storage.init().await.unwrap(); @@ -338,7 +350,7 @@ mod wasm_test { #[wasm_bindgen_test] async fn test_storage_init() { let ctx = mm_ctx_with_custom_db(); - let storage = IDBBlockHeadersStorage::new(&ctx, "RICK".to_string()); + let storage = IDBBlockHeadersStorage::new(&ctx, "RICK".to_string(), None); register_wasm_log(); diff --git a/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs b/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs index b0be84ac7e..60fc3e02f4 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs @@ -47,9 +47,9 @@ pub struct IDBBlockHeadersStorage { } impl IDBBlockHeadersStorage { - pub fn new(ctx: &MmArc, ticker: String) -> Self { + pub fn new(ctx: &MmArc, ticker: String, db_id: Option<&str>) -> Self { Self { - db: ConstructibleDb::new(ctx, None).into_shared(), + db: ConstructibleDb::new(ctx, db_id).into_shared(), ticker, } } diff --git a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs index dcd6ca10ee..2a14b76873 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs @@ -9,8 +9,9 @@ use crate::utxo::{output_script, utxo_common, ElectrumBuilderArgs, ElectrumProto RecentlySpentOutPoints, ScripthashNotification, ScripthashNotificationSender, TxFee, UtxoCoinConf, UtxoCoinFields, UtxoHDAccount, UtxoHDWallet, UtxoRpcMode, UtxoSyncStatus, UtxoSyncStatusLoopHandle, DEFAULT_GAP_LIMIT, UTXO_DUST_AMOUNT}; -use crate::{BlockchainNetwork, CoinTransportMetrics, DerivationMethod, HistorySyncState, IguanaPrivKey, - PrivKeyBuildPolicy, PrivKeyPolicy, PrivKeyPolicyNotAllowed, RpcClientType, UtxoActivationParams}; +use crate::{lp_coinfind_any, BlockchainNetwork, CoinTransportMetrics, DerivationMethod, HistorySyncState, + IguanaPrivKey, PrivKeyBuildPolicy, PrivKeyPolicy, PrivKeyPolicyNotAllowed, RpcClientType, + UtxoActivationParams}; use async_trait::async_trait; use chain::TxHashAlgo; use common::custom_futures::repeatable::{Ready, Retry}; @@ -571,9 +572,17 @@ pub trait UtxoCoinBuilderCommonOps { event_handlers.push(ElectrumProtoVerifier { on_event_tx }.into_shared()); } + let db_id = match lp_coinfind_any(ctx, self.ticker()) + .await + .map_to_mm(UtxoCoinBuildError::Internal)? + { + Some(coin) => coin.inner.account_db_id().map_to_mm(UtxoCoinBuildError::Internal)?, + None => None, + }; let storage_ticker = self.ticker().replace('-', "_"); - let block_headers_storage = BlockHeaderStorage::new_from_ctx(self.ctx().clone(), storage_ticker) - .map_to_mm(|e| UtxoCoinBuildError::Internal(e.to_string()))?; + let block_headers_storage = + BlockHeaderStorage::new_from_ctx(self.ctx().clone(), storage_ticker, db_id.as_deref()) + .map_to_mm(|e| UtxoCoinBuildError::Internal(e.to_string()))?; if !block_headers_storage.is_initialized_for().await? { block_headers_storage.init().await?; } diff --git a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs index 77f283c2df..174bc870ce 100644 --- a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs +++ b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs @@ -206,7 +206,7 @@ mod wasm_impl { paging_options: Option<&PagingOptions>, db_id: &str, ) -> MySwapsResult { - let swap_ctx = SwapsContext::from_ctx(&self.ctx, Some(&db_id)).map_to_mm(MySwapsError::InternalError)?; + let swap_ctx = SwapsContext::from_ctx(&self.ctx, Some(db_id)).map_to_mm(MySwapsError::InternalError)?; let db = swap_ctx.swap_db().await?; let transaction = db.transaction().await?; let my_swaps_table = transaction.table::().await?; From 71ae0b11275b4426e80187d0ee6af0b368785a71 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 12 Apr 2024 19:23:02 +0100 Subject: [PATCH 026/186] wip sqlite --- mm2src/coins/lp_coins.rs | 28 ++++++++++++ .../utxo/utxo_block_header_storage/mod.rs | 2 +- mm2src/mm2_main/src/database.rs | 44 +++++++++++-------- mm2src/mm2_main/src/lp_native_dex.rs | 33 +++++++------- mm2src/mm2_main/src/lp_swap.rs | 4 +- 5 files changed, 74 insertions(+), 37 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index e5945e6dd9..af77662742 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -4158,6 +4158,34 @@ pub async fn find_unique_active_account_ids(ctx: &MmArc) -> Result Result, String> { + let mut account_ids = HashSet::new(); + account_ids.insert(hex::encode(ctx.rmd160().as_slice())); + Ok(account_ids) +} + +#[cfg(target_arch = "wasm32")] +pub async fn find_unique_account_ids(ctx: &MmArc) -> Result, String> { + let cctx = try_s!(CoinsContext::from_ctx(ctx)); + let coins = cctx.coins.lock().await; + let coins = coins.values().collect::>(); + + // Using a HashSet to ensure uniqueness efficiently + let mut account_ids = HashSet::new(); + // Add default wallet pubkey + account_ids.insert(hex::encode(ctx.rmd160().as_slice())); + + for coin in coins { + if let Some(account) = try_s!(coin.inner.account_db_id()) { + account_ids.insert(account); + }; + } + + Ok(account_ids) +} + #[cfg(not(target_arch = "wasm32"))] // TODO: complete impl when implementing multikey support for sqlite/native pub async fn find_unique_active_account_ids(ctx: &MmArc) -> Result, String> { diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index d74fa547b6..29a65948f1 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -300,7 +300,7 @@ mod native_tests { fn test_init_collection() { let for_coin = "init_collection"; let ctx = mm_ctx_with_custom_db(); - let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string()) + let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string(), None) .unwrap() .into_inner(); diff --git a/mm2src/mm2_main/src/database.rs b/mm2src/mm2_main/src/database.rs index 4ffe035546..8aaa2fe4a2 100644 --- a/mm2src/mm2_main/src/database.rs +++ b/mm2src/mm2_main/src/database.rs @@ -7,6 +7,7 @@ pub mod my_orders; #[path = "database/stats_swaps.rs"] pub mod stats_swaps; use crate::CREATE_MY_SWAPS_TABLE; +use coins::find_unique_account_ids; use common::log::{debug, error, info}; use db_common::sqlite::run_optimization_pragmas; use db_common::sqlite::rusqlite::{params_from_iter, Result as SqlResult}; @@ -22,7 +23,7 @@ fn get_current_migration(ctx: &MmArc) -> SqlResult { conn.query_row(SELECT_MIGRATION, [], |row| row.get(0)) } -pub async fn init_and_migrate_sql_db(ctx: &MmArc) -> SqlResult<()> { +pub async fn init_and_migrate_sql_db(ctx: &MmArc, db_id: Option<&str>) -> SqlResult<()> { info!("Checking the current SQLite migration"); match get_current_migration(ctx) { Ok(current_migration) => { @@ -43,13 +44,13 @@ pub async fn init_and_migrate_sql_db(ctx: &MmArc) -> SqlResult<()> { info!("Trying to initialize the SQLite database"); - init_db(ctx)?; + init_db(ctx, db_id)?; migrate_sqlite_database(ctx, 1).await?; info!("SQLite database initialization is successful"); Ok(()) } -fn init_db(ctx: &MmArc) -> SqlResult<()> { +fn init_db(ctx: &MmArc, _db_id: Option<&str>) -> SqlResult<()> { let conn = ctx.sqlite_connection(); run_optimization_pragmas(&conn)?; let init_batch = concat!( @@ -140,23 +141,28 @@ async fn statements_for_migration(ctx: &MmArc, current_migration: i64) -> Option } } -pub async fn migrate_sqlite_database(ctx: &MmArc, mut current_migration: i64) -> SqlResult<()> { - info!("migrate_sqlite_database, current migration {}", current_migration); - while let Some(statements_with_params) = statements_for_migration(ctx, current_migration).await { - // `statements_for_migration` locks the [`MmCtx::sqlite_connection`] mutex, - // so we can't create a transaction outside of this loop. - let conn = ctx.sqlite_connection(); - let transaction = conn.unchecked_transaction()?; - for (statement, params) in statements_with_params { - debug!("Executing SQL statement {:?} with params {:?}", statement, params); - transaction.execute(statement, params_from_iter(params.iter()))?; +pub async fn migrate_sqlite_database(ctx: &MmArc, current_migration: i64) -> SqlResult<()> { + let db_ids = find_unique_account_ids(ctx).await.expect("successful coin find"); + for db_id in db_ids { + let mut current_migration = current_migration; + info!("migrate_sqlite_database for db_id=({db_id}), current migration {current_migration}"); + while let Some(statements_with_params) = statements_for_migration(ctx, current_migration).await { + // `statements_for_migration` locks the [`MmCtx::sqlite_connection`] mutex, + // so we can't create a transaction outside of this loop. + let conn = ctx.sqlite_connection(); + let transaction = conn.unchecked_transaction()?; + for (statement, params) in statements_with_params { + debug!("Executing SQL statement {statement:?} with params {params:?} for db_id: {db_id}"); + transaction.execute(statement, params_from_iter(params.iter()))?; + } + current_migration += 1; + transaction.execute("INSERT INTO migration (current_migration) VALUES (?1);", [ + current_migration, + ])?; + transaction.commit()?; } - current_migration += 1; - transaction.execute("INSERT INTO migration (current_migration) VALUES (?1);", [ - current_migration, - ])?; - transaction.commit()?; + info!("migrate_sqlite_database complete for db_id=({db_id}), migrated to {current_migration}"); } - info!("migrate_sqlite_database complete, migrated to {}", current_migration); + Ok(()) } diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 7f7714d3c5..00bc08b9d5 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -19,7 +19,7 @@ // use bitcrypto::sha256; -use coins::register_balance_update_handler; +use coins::{find_unique_account_ids, register_balance_update_handler}; use common::executor::{SpawnFuture, Timer}; use common::log::{info, warn}; use crypto::{from_hw_error, CryptoCtx, HwError, HwProcessingError, HwRpcError, WithHwRpcError}; @@ -331,10 +331,10 @@ fn default_seednodes(netid: u16) -> Vec { } #[cfg(not(target_arch = "wasm32"))] -pub fn fix_directories(ctx: &MmCtx) -> MmInitResult<()> { +pub fn fix_directories(ctx: &MmCtx, db_id: Option<&str>) -> MmInitResult<()> { fix_shared_dbdir(ctx)?; - let dbdir = ctx.dbdir(None); + let dbdir = ctx.dbdir(db_id); fs::create_dir_all(&dbdir).map_to_mm(|e| MmInitError::ErrorCreatingDbDir { path: dbdir.clone(), error: e.to_string(), @@ -403,8 +403,8 @@ fn fix_shared_dbdir(ctx: &MmCtx) -> MmInitResult<()> { } #[cfg(not(target_arch = "wasm32"))] -fn migrate_db(ctx: &MmArc) -> MmInitResult<()> { - let migration_num_path = ctx.dbdir(None).join(".migration"); +fn migrate_db(ctx: &MmArc, db_id: Option<&str>) -> MmInitResult<()> { + let migration_num_path = ctx.dbdir(db_id).join(".migration"); let mut current_migration = match std::fs::read(&migration_num_path) { Ok(bytes) => { let mut num_bytes = [0; 8]; @@ -462,16 +462,19 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { #[cfg(not(target_arch = "wasm32"))] { - fix_directories(&ctx)?; - ctx.init_sqlite_connection(None) - .map_to_mm(MmInitError::ErrorSqliteInitializing)?; - ctx.init_shared_sqlite_conn() - .map_to_mm(MmInitError::ErrorSqliteInitializing)?; - ctx.init_async_sqlite_connection(None) - .await - .map_to_mm(MmInitError::ErrorSqliteInitializing)?; - init_and_migrate_sql_db(&ctx).await?; - migrate_db(&ctx)?; + let db_ids = find_unique_account_ids(&ctx).await.map_to_mm(MmInitError::Internal)?; + for db_id in db_ids.iter() { + fix_directories(&ctx, Some(db_id))?; + ctx.init_sqlite_connection(None) + .map_to_mm(MmInitError::ErrorSqliteInitializing)?; + ctx.init_shared_sqlite_conn() + .map_to_mm(MmInitError::ErrorSqliteInitializing)?; + ctx.init_async_sqlite_connection(None) + .await + .map_to_mm(MmInitError::ErrorSqliteInitializing)?; + init_and_migrate_sql_db(&ctx, Some(db_id)).await?; + migrate_db(&ctx, Some(db_id))?; + } } init_message_service(&ctx).await?; diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 95e8d28239..1a608d52f4 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -2325,7 +2325,7 @@ mod lp_swap_tests { .unwrap() .mm2_internal_key_pair(); - fix_directories(&maker_ctx).unwrap(); + fix_directories(&maker_ctx, None).unwrap(); block_on(init_p2p(maker_ctx.clone())).unwrap(); maker_ctx.init_sqlite_connection(None).unwrap(); @@ -2363,7 +2363,7 @@ mod lp_swap_tests { .unwrap() .mm2_internal_key_pair(); - fix_directories(&taker_ctx).unwrap(); + fix_directories(&taker_ctx, None).unwrap(); block_on(init_p2p(taker_ctx.clone())).unwrap(); taker_ctx.init_sqlite_connection(None).unwrap(); From 61c49bb3d9e127013923c6f38dae0decc428fa02 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sat, 13 Apr 2024 21:25:38 +0100 Subject: [PATCH 027/186] merge find_unique_account_ids and find_unique_active_account_ids --- mm2src/coins/lp_coins.rs | 54 ++++++++------------- mm2src/mm2_core/src/mm_ctx.rs | 6 +-- mm2src/mm2_main/src/database.rs | 4 +- mm2src/mm2_main/src/lp_native_dex.rs | 4 +- mm2src/mm2_main/src/lp_swap.rs | 12 ++--- mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs | 4 +- 6 files changed, 36 insertions(+), 48 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index af77662742..d222c4b379 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -4128,6 +4128,11 @@ pub async fn lp_coinfind(ctx: &MmArc, ticker: &str) -> Result Ok(None) } +pub enum UniqueAccountIdKind { + Active, + ActivePassive, +} + /// Returns coins even if they are on the passive mode pub async fn lp_coinfind_any(ctx: &MmArc, ticker: &str) -> Result, String> { let cctx = try_s!(CoinsContext::from_ctx(ctx)); @@ -4136,38 +4141,16 @@ pub async fn lp_coinfind_any(ctx: &MmArc, ticker: &str) -> Result Result, String> { - let cctx = try_s!(CoinsContext::from_ctx(ctx)); - let coins = cctx.coins.lock().await; - let coins = coins.values().collect::>(); - - // Using a HashSet to ensure uniqueness efficiently - let mut account_ids = HashSet::new(); - // Add default wallet pubkey - account_ids.insert(hex::encode(ctx.rmd160().as_slice())); - - for coin in coins { - if let Some(account) = try_s!(coin.inner.account_db_id()) { - if coin.is_available() { - account_ids.insert(account); - } - }; - } - - Ok(account_ids) -} - #[cfg(not(target_arch = "wasm32"))] // TODO: complete impl when implementing multikey support for sqlite/native -pub async fn find_unique_account_ids(ctx: &MmArc) -> Result, String> { +pub async fn find_unique_account_ids(ctx: &MmArc, _kind: UniqueAccountIdKind) -> Result, String> { let mut account_ids = HashSet::new(); - account_ids.insert(hex::encode(ctx.rmd160().as_slice())); + account_ids.insert(ctx.rmd160_hex()); Ok(account_ids) } #[cfg(target_arch = "wasm32")] -pub async fn find_unique_account_ids(ctx: &MmArc) -> Result, String> { +pub async fn find_unique_account_ids(ctx: &MmArc, kind: UniqueAccountIdKind) -> Result, String> { let cctx = try_s!(CoinsContext::from_ctx(ctx)); let coins = cctx.coins.lock().await; let coins = coins.values().collect::>(); @@ -4175,25 +4158,26 @@ pub async fn find_unique_account_ids(ctx: &MmArc) -> Result, Str // Using a HashSet to ensure uniqueness efficiently let mut account_ids = HashSet::new(); // Add default wallet pubkey - account_ids.insert(hex::encode(ctx.rmd160().as_slice())); + account_ids.insert(ctx.rmd160_hex()); for coin in coins { if let Some(account) = try_s!(coin.inner.account_db_id()) { - account_ids.insert(account); + match kind { + UniqueAccountIdKind::ActivePassive => { + account_ids.insert(account); + }, + UniqueAccountIdKind::Active => { + if coin.is_available() { + account_ids.insert(account); + }; + }, + } }; } Ok(account_ids) } -#[cfg(not(target_arch = "wasm32"))] -// TODO: complete impl when implementing multikey support for sqlite/native -pub async fn find_unique_active_account_ids(ctx: &MmArc) -> Result, String> { - let mut account_ids = HashSet::new(); - account_ids.insert(hex::encode(ctx.rmd160().as_slice())); - Ok(account_ids) -} - /// Attempts to find a pair of active coins returning None if one is not enabled pub async fn find_pair(ctx: &MmArc, base: &str, rel: &str) -> Result, String> { let fut_base = lp_coinfind(ctx, base); diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 2f98667bbc..8383db5822 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -199,6 +199,8 @@ impl MmCtx { self.rmd160.or(&|| &*DEFAULT) } + pub fn rmd160_hex(&self) -> String { hex::encode(self.rmd160().as_slice()) } + pub fn shared_db_id(&self) -> &H160 { lazy_static! { static ref DEFAULT: H160 = [0; 20].into(); @@ -302,9 +304,7 @@ impl MmCtx { /// No checks in this method, the paths should be checked in the `fn fix_directories` instead. #[cfg(not(target_arch = "wasm32"))] pub fn dbdir(&self, db_id: Option<&str>) -> PathBuf { - let db_id = db_id - .map(|t| t.to_owned()) - .unwrap_or_else(|| hex::encode(self.rmd160().as_slice())); + let db_id = db_id.map(|t| t.to_owned()).unwrap_or_else(|| self.rmd160_hex()); path_to_dbdir(self.conf["dbdir"].as_str(), &db_id) } diff --git a/mm2src/mm2_main/src/database.rs b/mm2src/mm2_main/src/database.rs index 8aaa2fe4a2..08f06a131e 100644 --- a/mm2src/mm2_main/src/database.rs +++ b/mm2src/mm2_main/src/database.rs @@ -142,7 +142,9 @@ async fn statements_for_migration(ctx: &MmArc, current_migration: i64) -> Option } pub async fn migrate_sqlite_database(ctx: &MmArc, current_migration: i64) -> SqlResult<()> { - let db_ids = find_unique_account_ids(ctx).await.expect("successful coin find"); + let db_ids = find_unique_account_ids(ctx, coins::UniqueAccountIdKind::ActivePassive) + .await + .expect("successful coin find"); for db_id in db_ids { let mut current_migration = current_migration; info!("migrate_sqlite_database for db_id=({db_id}), current migration {current_migration}"); diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 00bc08b9d5..de387884ba 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -462,7 +462,9 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { #[cfg(not(target_arch = "wasm32"))] { - let db_ids = find_unique_account_ids(&ctx).await.map_to_mm(MmInitError::Internal)?; + let db_ids = find_unique_account_ids(&ctx, coins::UniqueAccountIdKind::ActivePassive) + .await + .map_to_mm(MmInitError::Internal)?; for db_id in db_ids.iter() { fix_directories(&ctx, Some(db_id))?; ctx.init_sqlite_connection(None) diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 1a608d52f4..04096922a2 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -62,8 +62,8 @@ use crate::mm2::lp_network::{broadcast_p2p_msg, Libp2pPeerId, P2PProcessError, P use crate::mm2::lp_swap::maker_swap_v2::{MakerSwapStateMachine, MakerSwapStorage}; use crate::mm2::lp_swap::taker_swap_v2::{TakerSwapStateMachine, TakerSwapStorage}; use bitcrypto::{dhash160, sha256}; -use coins::{find_unique_active_account_ids, lp_coinfind, lp_coinfind_or_err, CoinFindError, DexFee, MmCoin, - MmCoinEnum, TradeFee, TransactionEnum}; +use coins::{find_unique_account_ids, lp_coinfind, lp_coinfind_or_err, CoinFindError, DexFee, MmCoin, MmCoinEnum, + TradeFee, TransactionEnum}; use common::log::{debug, warn}; use common::now_sec; use common::time_cache::DuplicateCache; @@ -1219,7 +1219,7 @@ pub struct MySwapsFilter { /// Returns *all* uuids of swaps, which match the selected filter. pub async fn all_swaps_uuids_by_filter(ctx: MmArc, req: Json) -> Result>, String> { let filter: MySwapsFilter = try_s!(json::from_value(req)); - let db_ids = try_s!(find_unique_active_account_ids(&ctx).await); + let db_ids = try_s!(find_unique_account_ids(&ctx, coins::UniqueAccountIdKind::Active).await); let mut res_js = vec![]; for db_id in db_ids { @@ -1296,7 +1296,7 @@ pub async fn latest_swaps_for_pair( other_coin: String, limit: usize, ) -> Result, MmError> { - let db_ids = find_unique_active_account_ids(&ctx) + let db_ids = find_unique_account_ids(&ctx, coins::UniqueAccountIdKind::Active) .await .map_to_mm(|_| LatestSwapsErr::CoinNotFound)?; let mut swaps = vec![]; @@ -1342,7 +1342,7 @@ pub async fn latest_swaps_for_pair( /// Returns the data of recent swaps of `my` node. pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result>, String> { let req: MyRecentSwapsReq = try_s!(json::from_value(req)); - let db_ids = try_s!(find_unique_active_account_ids(&ctx).await); + let db_ids = try_s!(find_unique_account_ids(&ctx, coins::UniqueAccountIdKind::Active).await); let mut res_js = vec![]; for db_id in db_ids { @@ -1403,7 +1403,7 @@ pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result Result, String> { - let db_ids = try_s!(find_unique_active_account_ids(&ctx).await); + let db_ids = try_s!(find_unique_account_ids(&ctx, coins::UniqueAccountIdKind::Active).await); let mut coins = HashSet::new(); for db_id in db_ids { diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs index d361189be4..9bf5824fa7 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs @@ -5,7 +5,7 @@ use super::taker_swap::TakerSavedSwap; use super::taker_swap_v2::TakerSwapEvent; use super::{active_swaps, MySwapsFilter, SavedSwap, SavedSwapError, SavedSwapIo, LEGACY_SWAP_TYPE, MAKER_SWAP_V2_TYPE, TAKER_SWAP_V2_TYPE}; -use coins::find_unique_active_account_ids; +use coins::find_unique_account_ids; use common::log::{error, warn}; use common::{calc_total_pages, HttpStatusCode, PagingOptions}; use derive_more::Display; @@ -443,7 +443,7 @@ pub(crate) async fn my_recent_swaps_rpc( ctx: MmArc, req: MyRecentSwapsRequest, ) -> MmResult, MyRecentSwapsErr> { - let db_ids = find_unique_active_account_ids(&ctx) + let db_ids = find_unique_account_ids(&ctx, coins::UniqueAccountIdKind::Active) .await .map_to_mm(|_| MyRecentSwapsErr::CoinNotFound)?; From bd114fc894e424cb8871d1e3a09e31fbfa190e7e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sun, 14 Apr 2024 19:46:08 +0100 Subject: [PATCH 028/186] refactor find_unique_account_ids --- mm2src/coins/lp_coins.rs | 45 ++++++++++----------- mm2src/mm2_main/src/database.rs | 6 +-- mm2src/mm2_main/src/lp_native_dex.rs | 4 +- mm2src/mm2_main/src/lp_swap.rs | 12 +++--- mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs | 4 +- 5 files changed, 33 insertions(+), 38 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 20b01892aa..96707d651e 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -4193,11 +4193,6 @@ pub async fn lp_coinfind(ctx: &MmArc, ticker: &str) -> Result Ok(None) } -pub enum UniqueAccountIdKind { - Active, - ActivePassive, -} - /// Returns coins even if they are on the passive mode pub async fn lp_coinfind_any(ctx: &MmArc, ticker: &str) -> Result, String> { let cctx = try_s!(CoinsContext::from_ctx(ctx)); @@ -4206,18 +4201,22 @@ pub async fn lp_coinfind_any(ctx: &MmArc, ticker: &str) -> Result Result, String> { - let mut account_ids = HashSet::new(); - account_ids.insert(ctx.rmd160_hex()); - Ok(account_ids) +pub async fn find_unique_account_ids_any(ctx: &MmArc) -> Result, String> { + find_unique_account_ids(ctx, false).await } -#[cfg(target_arch = "wasm32")] -pub async fn find_unique_account_ids(ctx: &MmArc, kind: UniqueAccountIdKind) -> Result, String> { +pub async fn find_unique_account_ids_active(ctx: &MmArc) -> Result, String> { + find_unique_account_ids(ctx, true).await +} + +#[allow(unused)] +async fn find_unique_account_ids(ctx: &MmArc, active_only: bool) -> Result, String> { + // TODO: removee target_arch after implementing native/sqlite + #[cfg(target_arch = "wasm32")] let cctx = try_s!(CoinsContext::from_ctx(ctx)); + #[cfg(target_arch = "wasm32")] let coins = cctx.coins.lock().await; + #[cfg(target_arch = "wasm32")] let coins = coins.values().collect::>(); // Using a HashSet to ensure uniqueness efficiently @@ -4225,21 +4224,19 @@ pub async fn find_unique_account_ids(ctx: &MmArc, kind: UniqueAccountIdKind) -> // Add default wallet pubkey account_ids.insert(ctx.rmd160_hex()); - for coin in coins { + // TODO: removee target_arch after implementing native/sqlite + #[cfg(target_arch = "wasm32")] + for coin in coins.iter() { if let Some(account) = try_s!(coin.inner.account_db_id()) { - match kind { - UniqueAccountIdKind::ActivePassive => { - account_ids.insert(account); - }, - UniqueAccountIdKind::Active => { - if coin.is_available() { - account_ids.insert(account); - }; - }, + if active_only && coin.is_available() { + account_ids.insert(account); + } else { + account_ids.insert(account); } - }; + } } + info!("{account_ids:?}"); Ok(account_ids) } diff --git a/mm2src/mm2_main/src/database.rs b/mm2src/mm2_main/src/database.rs index 08f06a131e..0ea9d362bb 100644 --- a/mm2src/mm2_main/src/database.rs +++ b/mm2src/mm2_main/src/database.rs @@ -7,7 +7,7 @@ pub mod my_orders; #[path = "database/stats_swaps.rs"] pub mod stats_swaps; use crate::CREATE_MY_SWAPS_TABLE; -use coins::find_unique_account_ids; +use coins::find_unique_account_ids_any; use common::log::{debug, error, info}; use db_common::sqlite::run_optimization_pragmas; use db_common::sqlite::rusqlite::{params_from_iter, Result as SqlResult}; @@ -142,9 +142,7 @@ async fn statements_for_migration(ctx: &MmArc, current_migration: i64) -> Option } pub async fn migrate_sqlite_database(ctx: &MmArc, current_migration: i64) -> SqlResult<()> { - let db_ids = find_unique_account_ids(ctx, coins::UniqueAccountIdKind::ActivePassive) - .await - .expect("successful coin find"); + let db_ids = find_unique_account_ids_any(ctx).await.expect("successful coin find"); for db_id in db_ids { let mut current_migration = current_migration; info!("migrate_sqlite_database for db_id=({db_id}), current migration {current_migration}"); diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index de387884ba..ec164f5a3d 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -19,7 +19,7 @@ // use bitcrypto::sha256; -use coins::{find_unique_account_ids, register_balance_update_handler}; +use coins::{find_unique_account_ids_any, register_balance_update_handler}; use common::executor::{SpawnFuture, Timer}; use common::log::{info, warn}; use crypto::{from_hw_error, CryptoCtx, HwError, HwProcessingError, HwRpcError, WithHwRpcError}; @@ -462,7 +462,7 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { #[cfg(not(target_arch = "wasm32"))] { - let db_ids = find_unique_account_ids(&ctx, coins::UniqueAccountIdKind::ActivePassive) + let db_ids = find_unique_account_ids_any(&ctx) .await .map_to_mm(MmInitError::Internal)?; for db_id in db_ids.iter() { diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 04096922a2..b0832c573d 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -62,8 +62,8 @@ use crate::mm2::lp_network::{broadcast_p2p_msg, Libp2pPeerId, P2PProcessError, P use crate::mm2::lp_swap::maker_swap_v2::{MakerSwapStateMachine, MakerSwapStorage}; use crate::mm2::lp_swap::taker_swap_v2::{TakerSwapStateMachine, TakerSwapStorage}; use bitcrypto::{dhash160, sha256}; -use coins::{find_unique_account_ids, lp_coinfind, lp_coinfind_or_err, CoinFindError, DexFee, MmCoin, MmCoinEnum, - TradeFee, TransactionEnum}; +use coins::{find_unique_account_ids_active, lp_coinfind, lp_coinfind_or_err, CoinFindError, DexFee, MmCoin, + MmCoinEnum, TradeFee, TransactionEnum}; use common::log::{debug, warn}; use common::now_sec; use common::time_cache::DuplicateCache; @@ -1219,7 +1219,7 @@ pub struct MySwapsFilter { /// Returns *all* uuids of swaps, which match the selected filter. pub async fn all_swaps_uuids_by_filter(ctx: MmArc, req: Json) -> Result>, String> { let filter: MySwapsFilter = try_s!(json::from_value(req)); - let db_ids = try_s!(find_unique_account_ids(&ctx, coins::UniqueAccountIdKind::Active).await); + let db_ids = try_s!(find_unique_account_ids_active(&ctx).await); let mut res_js = vec![]; for db_id in db_ids { @@ -1296,7 +1296,7 @@ pub async fn latest_swaps_for_pair( other_coin: String, limit: usize, ) -> Result, MmError> { - let db_ids = find_unique_account_ids(&ctx, coins::UniqueAccountIdKind::Active) + let db_ids = find_unique_account_ids_active(&ctx) .await .map_to_mm(|_| LatestSwapsErr::CoinNotFound)?; let mut swaps = vec![]; @@ -1342,7 +1342,7 @@ pub async fn latest_swaps_for_pair( /// Returns the data of recent swaps of `my` node. pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result>, String> { let req: MyRecentSwapsReq = try_s!(json::from_value(req)); - let db_ids = try_s!(find_unique_account_ids(&ctx, coins::UniqueAccountIdKind::Active).await); + let db_ids = try_s!(find_unique_account_ids_active(&ctx).await); let mut res_js = vec![]; for db_id in db_ids { @@ -1403,7 +1403,7 @@ pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result Result, String> { - let db_ids = try_s!(find_unique_account_ids(&ctx, coins::UniqueAccountIdKind::Active).await); + let db_ids = try_s!(find_unique_account_ids_active(&ctx).await); let mut coins = HashSet::new(); for db_id in db_ids { diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs index 9bf5824fa7..dcbd72cc8e 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs @@ -5,7 +5,7 @@ use super::taker_swap::TakerSavedSwap; use super::taker_swap_v2::TakerSwapEvent; use super::{active_swaps, MySwapsFilter, SavedSwap, SavedSwapError, SavedSwapIo, LEGACY_SWAP_TYPE, MAKER_SWAP_V2_TYPE, TAKER_SWAP_V2_TYPE}; -use coins::find_unique_account_ids; +use coins::find_unique_account_ids_active; use common::log::{error, warn}; use common::{calc_total_pages, HttpStatusCode, PagingOptions}; use derive_more::Display; @@ -443,7 +443,7 @@ pub(crate) async fn my_recent_swaps_rpc( ctx: MmArc, req: MyRecentSwapsRequest, ) -> MmResult, MyRecentSwapsErr> { - let db_ids = find_unique_account_ids(&ctx, coins::UniqueAccountIdKind::Active) + let db_ids = find_unique_account_ids_active(&ctx) .await .map_to_mm(|_| MyRecentSwapsErr::CoinNotFound)?; From f58c80ab1361efc4848ad66b2858703167e65408 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sun, 14 Apr 2024 19:47:34 +0100 Subject: [PATCH 029/186] debug with logger --- mm2src/coins/lp_coins.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 96707d651e..927e501bb4 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -4236,7 +4236,7 @@ async fn find_unique_account_ids(ctx: &MmArc, active_only: bool) -> Result Date: Sun, 14 Apr 2024 20:29:35 +0100 Subject: [PATCH 030/186] wasm clipy --- mm2src/coins/lp_coins.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 927e501bb4..7b40434107 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -4229,8 +4229,10 @@ async fn find_unique_account_ids(ctx: &MmArc, active_only: bool) -> Result Date: Mon, 15 Apr 2024 06:40:47 +0100 Subject: [PATCH 031/186] WIP nft --- mm2src/coins/lp_coins.rs | 41 +++++++++++++++++++++++++++++++++++++++- mm2src/coins/nft.rs | 37 ++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 1 deletion(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 7b40434107..57a27617e2 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -4209,8 +4209,47 @@ pub async fn find_unique_account_ids_active(ctx: &MmArc) -> Result Result, String> { + // Using a HashSet to ensure uniqueness efficiently + let mut account_ids = HashSet::new(); + // Add default wallet pubkey + account_ids.insert(ctx.rmd160_hex()); + + Ok(account_ids) +} + +#[cfg(target_arch = "wasm32")] async fn find_unique_account_ids(ctx: &MmArc, active_only: bool) -> Result, String> { + // Using a HashSet to ensure uniqueness efficiently + let mut account_ids = HashSet::new(); + // Add default wallet pubkey + account_ids.insert(ctx.rmd160_hex()); + + let cctx = try_s!(CoinsContext::from_ctx(ctx)); + let coins = cctx.coins.lock().await; + let coins = coins.values().collect::>(); + + for coin in coins.iter() { + if let Some(account) = try_s!(coin.inner.account_db_id()) { + if active_only && coin.is_available() { + account_ids.insert(account.clone()); + continue; + }; + + if !active_only { + account_ids.insert(account); + continue; + } + } + } + + common::log::info!("coin account_ids=({account_ids:?})"); + Ok(account_ids) +} + +#[allow(unused)] +async fn find_unique_nft_account_ids(ctx: &MmArc, active_only: bool) -> Result, String> { // TODO: removee target_arch after implementing native/sqlite #[cfg(target_arch = "wasm32")] let cctx = try_s!(CoinsContext::from_ctx(ctx)); diff --git a/mm2src/coins/nft.rs b/mm2src/coins/nft.rs index 9d16247a7a..76ae029109 100644 --- a/mm2src/coins/nft.rs +++ b/mm2src/coins/nft.rs @@ -90,14 +90,17 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult Result, String> { + find_unique_nft_account_ids(ctx, false).await +} + +pub async fn find_unique_nft_account_ids_active(ctx: &MmArc) -> Result, String> { + find_unique_nft_account_ids(ctx, true).await +} + +#[cfg(not(target_arch = "wasm32"))] +async fn find_unique_nft_account_ids(_ctx: &MmArc, _active_only: bool) -> Result, String> { todo!() } + +#[cfg(target_arch = "wasm32")] +async fn find_unique_nft_account_ids(ctx: &MmArc, active_only: bool) -> Result, String> { + // Using a HashSet to ensure uniqueness efficiently + let mut account_ids = HashSet::new(); + // Add default wallet pubkey + account_ids.insert(ctx.rmd160_hex()); + + let cctx = try_s!(CoinsContext::from_ctx(ctx)); + let coins = cctx.coins.lock().await; + let coins = coins.values().collect::>(); + + for coin in coins.iter() { + if let Some(account) = try_s!(coin.inner.account_db_id()) { + if let Ok(chain) = Chain::from_ticker(coin.inner.ticker()) { + todo!() + }; + } + } + + common::log::info!("nft account_ids=({account_ids:?})"); + Ok(account_ids) +} From d9d8e53845ddc88cca5ca30b701102d98e1a1528 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 16 Apr 2024 20:13:18 +0100 Subject: [PATCH 032/186] use a single find_unique_nft_account_ids for all targets --- mm2src/coins/lp_coins.rs | 47 +++------------------------------------- mm2src/coins/nft.rs | 19 +++++++--------- 2 files changed, 11 insertions(+), 55 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 57a27617e2..9ed674308d 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -4209,17 +4209,8 @@ pub async fn find_unique_account_ids_active(ctx: &MmArc) -> Result Result, String> { - // Using a HashSet to ensure uniqueness efficiently - let mut account_ids = HashSet::new(); - // Add default wallet pubkey - account_ids.insert(ctx.rmd160_hex()); - - Ok(account_ids) -} - -#[cfg(target_arch = "wasm32")] +// TODO: remove early return and cfg +#[allow(unused)] async fn find_unique_account_ids(ctx: &MmArc, active_only: bool) -> Result, String> { // Using a HashSet to ensure uniqueness efficiently let mut account_ids = HashSet::new(); @@ -4230,6 +4221,7 @@ async fn find_unique_account_ids(ctx: &MmArc, active_only: bool) -> Result>(); + #[cfg(not(target_arch = "wasm32"))] for coin in coins.iter() { if let Some(account) = try_s!(coin.inner.account_db_id()) { if active_only && coin.is_available() { @@ -4248,39 +4240,6 @@ async fn find_unique_account_ids(ctx: &MmArc, active_only: bool) -> Result Result, String> { - // TODO: removee target_arch after implementing native/sqlite - #[cfg(target_arch = "wasm32")] - let cctx = try_s!(CoinsContext::from_ctx(ctx)); - #[cfg(target_arch = "wasm32")] - let coins = cctx.coins.lock().await; - #[cfg(target_arch = "wasm32")] - let coins = coins.values().collect::>(); - - // Using a HashSet to ensure uniqueness efficiently - let mut account_ids = HashSet::new(); - // Add default wallet pubkey - account_ids.insert(ctx.rmd160_hex()); - - // TODO: removee target_arch after implementing native/sqlite - #[cfg(target_arch = "wasm32")] - for coin in coins.iter() { - if let Some(account) = try_s!(coin.inner.account_db_id()) { - if active_only && coin.is_available() { - account_ids.insert(account.clone()); - }; - - if !active_only { - account_ids.insert(account); - } - } - } - - common::log::info!("{account_ids:?}"); - Ok(account_ids) -} - /// Attempts to find a pair of active coins returning None if one is not enabled pub async fn find_pair(ctx: &MmArc, base: &str, rel: &str) -> Result, String> { let fut_base = lp_coinfind(ctx, base); diff --git a/mm2src/coins/nft.rs b/mm2src/coins/nft.rs index 76ae029109..0931a69cfd 100644 --- a/mm2src/coins/nft.rs +++ b/mm2src/coins/nft.rs @@ -1536,14 +1536,8 @@ pub async fn find_unique_nft_account_ids_any(ctx: &MmArc) -> Result Result, String> { - find_unique_nft_account_ids(ctx, true).await -} - -#[cfg(not(target_arch = "wasm32"))] -async fn find_unique_nft_account_ids(_ctx: &MmArc, _active_only: bool) -> Result, String> { todo!() } - -#[cfg(target_arch = "wasm32")] +// TODO: remove early return and cfg +#[allow(unused)] async fn find_unique_nft_account_ids(ctx: &MmArc, active_only: bool) -> Result, String> { // Using a HashSet to ensure uniqueness efficiently let mut account_ids = HashSet::new(); @@ -1554,11 +1548,14 @@ async fn find_unique_nft_account_ids(ctx: &MmArc, active_only: bool) -> Result>(); + #[cfg(not(target_arch = "wasm32"))] for coin in coins.iter() { if let Some(account) = try_s!(coin.inner.account_db_id()) { - if let Ok(chain) = Chain::from_ticker(coin.inner.ticker()) { - todo!() - }; + if coin.is_available() { + if let Ok(chain) = Chain::from_ticker(coin.inner.ticker()) { + account_ids.insert(account); + }; + } } } From 1538f81ee7cd45e5d4d61fb065cdf2ba2d9aa4b3 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 16 Apr 2024 20:24:25 +0100 Subject: [PATCH 033/186] make account_db_id return None by default --- mm2src/coins/lp_coins.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 9ed674308d..5503177ebb 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -3031,20 +3031,8 @@ pub trait MmCoin: /// Loop collecting coin transaction history and saving it to local DB fn process_history_loop(&self, ctx: MmArc) -> Box + Send>; - #[cfg(not(target_arch = "wasm32"))] fn account_db_id(&self) -> Result, String> { Ok(None) } - #[cfg(target_arch = "wasm32")] - fn account_db_id(&self) -> Result, String> { - Ok(Some( - try_s!(Public::from_slice( - try_s!(hex::decode(try_s!(self.get_public_key()))).as_slice() - )) - .address_hash() - .to_string(), - )) - } - /// Path to tx history file #[cfg(not(target_arch = "wasm32"))] fn tx_history_path(&self, ctx: &MmArc) -> PathBuf { From c0355fa518d78506083f5e4c5bd160e19caab24e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 16 Apr 2024 22:25:09 +0100 Subject: [PATCH 034/186] WIP shared, async, sqlite connection manager impl --- mm2src/mm2_core/src/lib.rs | 1 + mm2src/mm2_core/src/mm_ctx.rs | 14 +++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/mm2src/mm2_core/src/lib.rs b/mm2src/mm2_core/src/lib.rs index 3eb5ecc6ae..8d20fffd76 100644 --- a/mm2src/mm2_core/src/lib.rs +++ b/mm2src/mm2_core/src/lib.rs @@ -3,6 +3,7 @@ use rand::{thread_rng, Rng}; pub mod event_dispatcher; pub mod mm_ctx; +#[cfg(not(target_arch = "wasm32"))] pub mod sql_ctx; #[derive(Clone, Copy, Display, PartialEq)] pub enum DbNamespaceId { diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 8383db5822..4bd29fd6fe 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -140,6 +140,12 @@ pub struct MmCtx { /// asynchronous handle for rusqlite connection. #[cfg(not(target_arch = "wasm32"))] pub async_sqlite_connection: Constructible>>, + #[cfg(not(target_arch = "wasm32"))] + pub async_sqlite_connection_ctx: Mutex>>, + #[cfg(not(target_arch = "wasm32"))] + pub shared_sqlite_connection_ctx: Mutex>>, + #[cfg(not(target_arch = "wasm32"))] + pub sqlite_connection_ctx: Mutex>>, } impl MmCtx { @@ -189,6 +195,12 @@ impl MmCtx { nft_ctx: Mutex::new(None), #[cfg(not(target_arch = "wasm32"))] async_sqlite_connection: Constructible::default(), + #[cfg(not(target_arch = "wasm32"))] + async_sqlite_connection_ctx: Mutex::new(None), + #[cfg(not(target_arch = "wasm32"))] + sqlite_connection_ctx: Mutex::new(None), + #[cfg(not(target_arch = "wasm32"))] + shared_sqlite_connection_ctx: Mutex::new(None), } } @@ -773,7 +785,7 @@ impl MmCtxBuilder { } #[cfg(not(target_arch = "wasm32"))] -fn log_sqlite_file_open_attempt(sqlite_file_path: &Path) { +pub(super) fn log_sqlite_file_open_attempt(sqlite_file_path: &Path) { match sqlite_file_path.canonicalize() { Ok(absolute_path) => { log::debug!("Trying to open SQLite database file {}", absolute_path.display()); From a27ec3e558f2e5937e7bd0e13b12ee1cfd7ccfe5 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 16 Apr 2024 22:26:06 +0100 Subject: [PATCH 035/186] WIP sql_ctx --- mm2src/mm2_core/src/sql_ctx.rs | 121 +++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 mm2src/mm2_core/src/sql_ctx.rs diff --git a/mm2src/mm2_core/src/sql_ctx.rs b/mm2src/mm2_core/src/sql_ctx.rs new file mode 100644 index 0000000000..187482a974 --- /dev/null +++ b/mm2src/mm2_core/src/sql_ctx.rs @@ -0,0 +1,121 @@ +use std::{collections::HashMap, + sync::{Arc, Mutex}}; + +use db_common::{async_sql_conn::AsyncConnection, sqlite::rusqlite::Connection}; +use futures::lock::Mutex as AsyncMutex; +use gstuff::try_s; + +use crate::mm_ctx::{from_ctx, log_sqlite_file_open_attempt, MmArc}; + +pub struct AsyncSqlConnectionCtx { + connections: Arc>>>>, + ctx: MmArc, +} + +impl AsyncSqlConnectionCtx { + pub fn from_ctx(ctx: &MmArc) -> Result, String> { + let res = try_s!(from_ctx(&ctx.async_sqlite_connection_ctx, move || Ok(Self { + connections: Arc::new(AsyncMutex::new(HashMap::new())), + ctx: ctx.clone() + }))); + + Ok(res) + } + + pub async fn init(&self, db_id: Option<&str>) -> Result<(), String> { + let sqlite_file_path = self.ctx.dbdir(db_id).join("KOMODEFI.db"); + log_sqlite_file_open_attempt(&sqlite_file_path); + let async_conn = try_s!(AsyncConnection::open(sqlite_file_path).await); + + let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); + let mut connections = self.connections.lock().await; + connections.insert(db_id, Arc::new(AsyncMutex::new(async_conn))); + + Ok(()) + } +} + +pub struct SyncSqlConnectionCtx { + connections: Arc>>>>, + ctx: MmArc, +} + +impl SyncSqlConnectionCtx { + pub fn from_ctx(ctx: &MmArc) -> Result, String> { + let res = try_s!(from_ctx(&ctx.sqlite_connection_ctx, move || Ok(Self { + connections: Arc::new(Mutex::new(HashMap::new())), + ctx: ctx.clone() + }))); + + Ok(res) + } + + pub fn init(&self, db_id: Option<&str>) -> Result<(), String> { + let sqlite_file_path = self.ctx.dbdir(db_id).join("KOMODEFI.db"); + log_sqlite_file_open_attempt(&sqlite_file_path); + let connection = try_s!(Connection::open(sqlite_file_path)); + + let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); + let mut connections = self.connections.lock().unwrap(); + connections.insert(db_id, Arc::new(Mutex::new(connection))); + + Ok(()) + } + + pub fn sqlite_conn_opt(&self, db_id: Option<&str>) -> Option>> { + let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); + if let Ok(connections) = self.connections.lock() { + return connections.get(&db_id).cloned(); + }; + + None + } + + pub fn connection(&self, db_id: Option<&str>) -> Arc> { + let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); + let connection = self.connections.lock().unwrap(); + + connection + .get(&db_id) + .cloned() + .expect("sqlite_connection is not initialized") + } +} + +pub struct SharedSqlConnectionCtx { + connections: Arc>>>>, + ctx: MmArc, +} + +impl SharedSqlConnectionCtx { + pub fn from_ctx(ctx: &MmArc) -> Result, String> { + let res = try_s!(from_ctx(&ctx.shared_sqlite_connection_ctx, move || Ok(Self { + connections: Arc::new(Mutex::new(HashMap::new())), + ctx: ctx.clone() + }))); + + Ok(res) + } + + pub fn init(&self, db_id: Option<&str>) -> Result<(), String> { + let sqlite_file_path = self.ctx.dbdir(db_id).join("MM2-shared.db"); + log_sqlite_file_open_attempt(&sqlite_file_path); + let connection = try_s!(Connection::open(sqlite_file_path)); + + let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); + let mut connections = self.connections.lock().unwrap(); + connections.insert(db_id, Arc::new(Mutex::new(connection))); + + Ok(()) + } + + pub fn connection(&self, db_id: Option<&str>) -> Arc> { + let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); + let connection = self.connections.lock().unwrap(); + + connection + .get(&db_id) + .cloned() + .expect("sqlite_connection is not initialized") + } +} From 34078356435323f097d36afa9d0d16d3d5a6a263 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 17 Apr 2024 03:12:13 +0100 Subject: [PATCH 036/186] WIP sqlite connection manager --- .../utxo/utxo_block_header_storage/mod.rs | 15 ++-- mm2src/mm2_core/src/sql_ctx.rs | 79 +++++++++---------- mm2src/mm2_main/src/database/my_swaps.rs | 13 ++- 3 files changed, 56 insertions(+), 51 deletions(-) diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index 29a65948f1..f5f7c976f8 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -29,16 +29,15 @@ impl BlockHeaderStorage { pub(crate) fn new_from_ctx( ctx: MmArc, ticker: String, - _db_id: Option<&str>, + db_id: Option<&str>, ) -> Result { - let sqlite_connection = ctx.sqlite_connection.ok_or(BlockHeaderStorageError::Internal( - "sqlite_connection is not initialized".to_owned(), - ))?; + use mm2_core::sql_ctx::SyncSqlConnectionCtx; + + let conn = SyncSqlConnectionCtx::from_ctx(&ctx, db_id) + .map_err(BlockHeaderStorageError::Internal)? + .connection(db_id); Ok(BlockHeaderStorage { - inner: Box::new(SqliteBlockHeadersStorage { - ticker, - conn: sqlite_connection.clone(), - }), + inner: Box::new(SqliteBlockHeadersStorage { ticker, conn }), }) } diff --git a/mm2src/mm2_core/src/sql_ctx.rs b/mm2src/mm2_core/src/sql_ctx.rs index 187482a974..b71de3d9d4 100644 --- a/mm2src/mm2_core/src/sql_ctx.rs +++ b/mm2src/mm2_core/src/sql_ctx.rs @@ -13,11 +13,13 @@ pub struct AsyncSqlConnectionCtx { } impl AsyncSqlConnectionCtx { - pub fn from_ctx(ctx: &MmArc) -> Result, String> { - let res = try_s!(from_ctx(&ctx.async_sqlite_connection_ctx, move || Ok(Self { - connections: Arc::new(AsyncMutex::new(HashMap::new())), - ctx: ctx.clone() - }))); + pub async fn from_ctx(ctx: &MmArc) -> Result, String> { + let res = try_s!(from_ctx(&ctx.async_sqlite_connection_ctx, move || { + Ok(Self { + connections: Arc::new(AsyncMutex::new(HashMap::new())), + ctx: ctx.clone(), + }) + })); Ok(res) } @@ -35,34 +37,33 @@ impl AsyncSqlConnectionCtx { } } +#[derive(Clone)] pub struct SyncSqlConnectionCtx { connections: Arc>>>>, ctx: MmArc, } impl SyncSqlConnectionCtx { - pub fn from_ctx(ctx: &MmArc) -> Result, String> { - let res = try_s!(from_ctx(&ctx.sqlite_connection_ctx, move || Ok(Self { - connections: Arc::new(Mutex::new(HashMap::new())), - ctx: ctx.clone() - }))); + pub fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { + let res = try_s!(from_ctx(&ctx.sqlite_connection_ctx, move || { + let sqlite_file_path = ctx.dbdir(db_id).join("KOMODEFI.db"); + log_sqlite_file_open_attempt(&sqlite_file_path); + let connection = try_s!(Connection::open(sqlite_file_path)); + + let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| ctx.rmd160_hex()); + let mut connections = HashMap::new(); + connections.insert(db_id, Arc::new(Mutex::new(connection))); + + Ok(Self { + connections: Arc::new(Mutex::new(connections)), + ctx: ctx.clone(), + }) + })); Ok(res) } - pub fn init(&self, db_id: Option<&str>) -> Result<(), String> { - let sqlite_file_path = self.ctx.dbdir(db_id).join("KOMODEFI.db"); - log_sqlite_file_open_attempt(&sqlite_file_path); - let connection = try_s!(Connection::open(sqlite_file_path)); - - let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); - let mut connections = self.connections.lock().unwrap(); - connections.insert(db_id, Arc::new(Mutex::new(connection))); - - Ok(()) - } - - pub fn sqlite_conn_opt(&self, db_id: Option<&str>) -> Option>> { + pub fn connection_opt(&self, db_id: Option<&str>) -> Option>> { let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); if let Ok(connections) = self.connections.lock() { return connections.get(&db_id).cloned(); @@ -88,27 +89,25 @@ pub struct SharedSqlConnectionCtx { } impl SharedSqlConnectionCtx { - pub fn from_ctx(ctx: &MmArc) -> Result, String> { - let res = try_s!(from_ctx(&ctx.shared_sqlite_connection_ctx, move || Ok(Self { - connections: Arc::new(Mutex::new(HashMap::new())), - ctx: ctx.clone() - }))); + pub fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { + let res = try_s!(from_ctx(&ctx.shared_sqlite_connection_ctx, move || { + let sqlite_file_path = ctx.dbdir(db_id).join("MM2-shared.db"); + log_sqlite_file_open_attempt(&sqlite_file_path); + let connection = try_s!(Connection::open(sqlite_file_path)); + + let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| ctx.rmd160_hex()); + let mut connections = HashMap::new(); + connections.insert(db_id, Arc::new(Mutex::new(connection))); + + Ok(Self { + connections: Arc::new(Mutex::new(connections)), + ctx: ctx.clone(), + }) + })); Ok(res) } - pub fn init(&self, db_id: Option<&str>) -> Result<(), String> { - let sqlite_file_path = self.ctx.dbdir(db_id).join("MM2-shared.db"); - log_sqlite_file_open_attempt(&sqlite_file_path); - let connection = try_s!(Connection::open(sqlite_file_path)); - - let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); - let mut connections = self.connections.lock().unwrap(); - connections.insert(db_id, Arc::new(Mutex::new(connection))); - - Ok(()) - } - pub fn connection(&self, db_id: Option<&str>) -> Arc> { let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); let connection = self.connections.lock().unwrap(); diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index eaae73475a..c56d1100ad 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -8,6 +8,7 @@ use db_common::sqlite::rusqlite::{Connection, Error as SqlError, Result as SqlRe use db_common::sqlite::sql_builder::SqlBuilder; use db_common::sqlite::{offset_by_uuid, query_single_row}; use mm2_core::mm_ctx::MmArc; +use mm2_core::sql_ctx::SyncSqlConnectionCtx; use std::convert::TryInto; use uuid::{Error as UuidError, Uuid}; @@ -70,10 +71,13 @@ pub fn insert_new_swap( uuid: &str, started_at: &str, swap_type: u8, - _db_id: Option<&str>, + db_id: Option<&str>, ) -> SqlResult<()> { debug!("Inserting new swap {} to the SQLite database", uuid); - let conn = ctx.sqlite_connection(); + let conn = SyncSqlConnectionCtx::from_ctx(ctx, db_id) + .expect("sqlite_connection is not initialized") + .connection(db_id); + let conn = conn.lock().unwrap(); let params = [my_coin, other_coin, uuid, started_at, &swap_type.to_string()]; conn.execute(INSERT_MY_SWAP, params).map(|_| ()) } @@ -123,7 +127,10 @@ const INSERT_MY_SWAP_V2: &str = r#"INSERT INTO my_swaps ( );"#; pub fn insert_new_swap_v2(ctx: &MmArc, params: &[(&str, &dyn ToSql)]) -> SqlResult<()> { - let conn = ctx.sqlite_connection(); + let conn = SyncSqlConnectionCtx::from_ctx(ctx, db_id) + .expect("sqlite_connection is not initialized") + .connection(db_id); + let conn = conn.lock().unwrap(); conn.execute(INSERT_MY_SWAP_V2, params).map(|_| ()) } From c27206c0ba4f279ac9f00a4c455bff143635d24a Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 17 Apr 2024 03:46:26 +0100 Subject: [PATCH 037/186] make account_id optional and return None by default --- mm2src/coins/lp_coins.rs | 4 +-- mm2src/coins/nft.rs | 2 +- .../utxo/utxo_builder/utxo_coin_builder.rs | 2 +- mm2src/mm2_main/src/database/my_swaps.rs | 2 +- mm2src/mm2_main/src/lp_ordermatch.rs | 4 +-- mm2src/mm2_main/src/lp_swap.rs | 4 +-- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 12 ++++----- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 23 ++++++----------- mm2src/mm2_main/src/lp_swap/saved_swap.rs | 23 ++++++----------- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 12 ++++----- mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 25 ++++++------------- 11 files changed, 45 insertions(+), 68 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 5503177ebb..3d86ded9c6 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -3031,7 +3031,7 @@ pub trait MmCoin: /// Loop collecting coin transaction history and saving it to local DB fn process_history_loop(&self, ctx: MmArc) -> Box + Send>; - fn account_db_id(&self) -> Result, String> { Ok(None) } + fn account_db_id(&self) -> Option { None } /// Path to tx history file #[cfg(not(target_arch = "wasm32"))] @@ -4211,7 +4211,7 @@ async fn find_unique_account_ids(ctx: &MmArc, active_only: bool) -> Result Result coin.inner.account_db_id().map_to_mm(UtxoCoinBuildError::Internal)?, + Some(coin) => coin.inner.account_db_id(), None => None, }; let storage_ticker = self.ticker().replace('-', "_"); diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index c56d1100ad..a71455b7e9 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -126,7 +126,7 @@ const INSERT_MY_SWAP_V2: &str = r#"INSERT INTO my_swaps ( :other_p2p_pub );"#; -pub fn insert_new_swap_v2(ctx: &MmArc, params: &[(&str, &dyn ToSql)]) -> SqlResult<()> { +pub fn insert_new_swap_v2(ctx: &MmArc, params: &[(&str, &dyn ToSql)], db_id: Option<&str>) -> SqlResult<()> { let conn = SyncSqlConnectionCtx::from_ctx(ctx, db_id) .expect("sqlite_connection is not initialized") .connection(db_id); diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 9e5b29e7c7..7136643976 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -2974,7 +2974,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO }, }; - let account_db_id = maker_coin.account_db_id().expect("Valid coin pubkey"); + let account_db_id = maker_coin.account_db_id(); if ctx.use_trading_proto_v2() { let secret_hash_algo = detect_secret_hash_algo(&maker_coin, &taker_coin); match (maker_coin, taker_coin) { @@ -3127,7 +3127,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat ); let now = now_sec(); - let account_db_id = taker_coin.account_db_id().expect("Valid taker coin pubkey"); + let account_db_id = taker_coin.account_db_id(); if ctx.use_trading_proto_v2() { let taker_secret = match generate_secret() { Ok(s) => s.into(), diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index b0832c573d..fe59805baf 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -337,7 +337,7 @@ pub async fn process_swap_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PRequest return match json::from_slice::(msg) { Ok(mut status) => { status.data.fetch_and_set_usd_prices().await; - let account_id = status.data.account_db_id(&ctx).await.expect("Valid coin pubkey"); + let account_id = status.data.account_db_id(&ctx).await; if let Err(e) = save_stats_swap(&ctx, &status.data, account_id.as_deref()).await { error!("Error saving the swap {} status: {}", status.data.uuid(), e); } @@ -1595,7 +1595,7 @@ pub async fn import_swaps(ctx: MmArc, req: Json) -> Result>, St let mut imported = vec![]; let mut skipped = HashMap::new(); for swap in swaps { - let accound_id = swap.account_db_id(&ctx).await?; + let accound_id = swap.account_db_id(&ctx).await; match swap.save_to_db(&ctx, accound_id.as_deref()).await { Ok(_) => { if let Some(info) = swap.get_my_info() { diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index 3a46f51c5a..829cb497fb 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -87,7 +87,7 @@ pub fn stats_maker_swap_file_path(ctx: &MmArc, db_id: Option<&str>, uuid: &Uuid) } async fn save_my_maker_swap_event(ctx: &MmArc, swap: &MakerSwap, event: MakerSavedEvent) -> Result<(), String> { - let db_id = try_s!(swap.maker_coin.account_db_id()); + let db_id = swap.maker_coin.account_db_id(); let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id.as_deref(), swap.uuid).await { Ok(Some(swap)) => swap, Ok(None) => SavedSwap::Maker(MakerSavedSwap { @@ -594,7 +594,7 @@ impl MakerSwap { NEGOTIATION_TIMEOUT_SEC as f64 / 6., self.p2p_privkey, ); - let db_id = try_s!(self.maker_coin.account_db_id()); + let db_id = self.maker_coin.account_db_id(); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.negotiation_reply.take(), @@ -700,7 +700,7 @@ impl MakerSwap { self.p2p_privkey, ); - let db_id = try_s!(self.maker_coin.account_db_id()); + let db_id = self.maker_coin.account_db_id(); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.taker_fee.take(), @@ -937,7 +937,7 @@ impl MakerSwap { // wait for 3/5, we need to leave some time space for transaction to be confirmed let wait_duration = (self.r().data.lock_duration * 3) / 5; - let db_id = try_s!(self.maker_coin.account_db_id()); + let db_id = self.maker_coin.account_db_id(); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.taker_payment.take(), @@ -1299,7 +1299,7 @@ impl MakerSwap { taker_coin: MmCoinEnum, swap_uuid: &Uuid, ) -> Result<(Self, Option), String> { - let account_key = try_s!(maker_coin.account_db_id()); + let account_key = maker_coin.account_db_id(); let saved = match SavedSwap::load_my_swap_from_db(&ctx, account_key.as_deref(), *swap_uuid).await { Ok(Some(saved)) => saved, Ok(None) => return ERR!("Couldn't find a swap with the uuid '{}'", swap_uuid), @@ -2095,7 +2095,7 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { }; } let running_swap = Arc::new(swap); - let account_id = running_swap.maker_coin.account_db_id().expect("Valid maker pubkey"); + let account_id = running_swap.maker_coin.account_db_id(); let weak_ref = Arc::downgrade(&running_swap); let swap_ctx = SwapsContext::from_ctx(&ctx, account_id.as_deref()).unwrap(); swap_ctx.init_msg_store(running_swap.uuid, running_swap.taker); diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index ad586693f5..d800883dd5 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -153,7 +153,7 @@ impl StateMachineStorage for MakerSwapStorage { #[cfg(not(target_arch = "wasm32"))] async fn store_repr(&mut self, _id: Self::MachineId, repr: Self::DbRepr) -> Result<(), Self::Error> { let ctx = self.ctx.clone(); - + let db_id = self.db_id.clone(); async_blocking(move || { let sql_params = named_params! { ":my_coin": &repr.maker_coin, @@ -177,7 +177,7 @@ impl StateMachineStorage for MakerSwapStorage { ":taker_coin_nota": repr.conf_settings.taker_coin_nota, ":other_p2p_pub": repr.taker_p2p_pub.to_bytes(), }; - insert_new_swap_v2(&ctx, sql_params)?; + insert_new_swap_v2(&ctx, sql_params, db_id.as_deref())?; Ok(()) }) .await @@ -689,11 +689,8 @@ impl { - let swaps_ctx = SwapsContext::from_ctx( - &self.ctx, - self.maker_coin.account_db_id().expect("Valid maker pubkey").as_deref(), - ) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.maker_coin.account_db_id().as_deref()) + .expect("from_ctx should not fail at this point"); let maker_coin_ticker: String = self.maker_coin.ticker().into(); let new_locked = LockedAmountInfo { swap_uuid: self.uuid, @@ -712,11 +709,8 @@ impl { - let swaps_ctx = SwapsContext::from_ctx( - &self.ctx, - self.maker_coin.account_db_id().expect("Valid maker pubkey").as_deref(), - ) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.maker_coin.account_db_id().as_deref()) + .expect("from_ctx should not fail at this point"); let ticker = self.maker_coin.ticker(); if let Some(maker_coin_locked) = swaps_ctx.locked_amounts.lock().unwrap().get_mut(ticker) { maker_coin_locked.retain(|locked| locked.swap_uuid != self.uuid); @@ -750,9 +744,8 @@ impl { - // TODO: db_id - let swaps_ctx = - SwapsContext::from_ctx(&self.ctx, None).expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.maker_coin.account_db_id().as_deref()) + .expect("from_ctx should not fail at this point"); let maker_coin_ticker: String = self.maker_coin.ticker().into(); let new_locked = LockedAmountInfo { swap_uuid: self.uuid, diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index 2e0dfa1dc6..3163fd3ade 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -154,29 +154,22 @@ impl SavedSwap { } } - #[cfg(not(target_arch = "wasm32"))] - pub async fn account_db_id(&self, _ctx: &MmArc) -> Result, String> { - // TODO - Ok(None) - } - - #[cfg(target_arch = "wasm32")] - pub async fn account_db_id(&self, ctx: &MmArc) -> Result, String> { - use coins::lp_coinfind_any; - + pub async fn account_db_id(&self, ctx: &MmArc) -> Option { let coin_ticker = match self { SavedSwap::Maker(swap) => &swap.maker_coin, SavedSwap::Taker(swap) => &swap.taker_coin, }; if let Some(ticker) = coin_ticker { - let coin = lp_coinfind_any(ctx, ticker).await?.map(|c| c.inner); - if let Some(coin) = coin { - return coin.account_db_id(); - } + if let Ok(coin) = coins::lp_coinfind_any(ctx, ticker).await { + let coin = coin.map(|c| c.inner); + if let Some(coin) = coin { + return coin.account_db_id(); + } + }; } - Ok(None) + None } } diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 088c48e93d..875d6c7529 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -108,7 +108,7 @@ pub fn stats_taker_swap_file_path(ctx: &MmArc, db_id: Option<&str>, uuid: &Uuid) } async fn save_my_taker_swap_event(ctx: &MmArc, swap: &TakerSwap, event: TakerSavedEvent) -> Result<(), String> { - let db_id = try_s!(swap.taker_coin.account_db_id()); + let db_id = swap.taker_coin.account_db_id(); let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id.as_deref(), swap.uuid).await { Ok(Some(swap)) => swap, Ok(None) => SavedSwap::Taker(TakerSavedSwap { @@ -445,7 +445,7 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { let uuid = swap.uuid.to_string(); let to_broadcast = !(swap.maker_coin.is_privacy() || swap.taker_coin.is_privacy()); let running_swap = Arc::new(swap); - let account_id = running_swap.taker_coin.account_db_id().expect("Valid maker pubkey"); + let account_id = running_swap.taker_coin.account_db_id(); info!("USING COIN PUBKEY: {account_id:?}"); let weak_ref = Arc::downgrade(&running_swap); let swap_ctx = SwapsContext::from_ctx(&ctx, account_id.as_deref()).unwrap(); @@ -1123,7 +1123,7 @@ impl TakerSwap { async fn negotiate(&self) -> Result<(Option, Vec), String> { const NEGOTIATE_TIMEOUT_SEC: u64 = 90; - let db_id = try_s!(self.maker_coin.account_db_id()); + let db_id = self.maker_coin.account_db_id(); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.negotiation.take(), @@ -1236,7 +1236,7 @@ impl TakerSwap { NEGOTIATE_TIMEOUT_SEC as f64 / 6., self.p2p_privkey, ); - let db_id = try_s!(self.taker_coin.account_db_id()); + let db_id = self.taker_coin.account_db_id(); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.negotiated.take(), @@ -1333,7 +1333,7 @@ impl TakerSwap { self.p2p_privkey, ); - let db_id = try_s!(self.maker_coin.account_db_id()); + let db_id = self.maker_coin.account_db_id(); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.maker_payment.take(), @@ -1964,7 +1964,7 @@ impl TakerSwap { taker_coin: MmCoinEnum, swap_uuid: &Uuid, ) -> Result<(Self, Option), String> { - let account_key = try_s!(taker_coin.account_db_id()); + let account_key = taker_coin.account_db_id(); let saved = match SavedSwap::load_my_swap_from_db(&ctx, account_key.as_deref(), *swap_uuid).await { Ok(Some(saved)) => saved, Ok(None) => return ERR!("Couldn't find a swap with the uuid '{}'", swap_uuid), diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index c1720a5e11..44a20af720 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -185,7 +185,7 @@ impl StateMachineStorage for TakerSwapStorage { #[cfg(not(target_arch = "wasm32"))] async fn store_repr(&mut self, _id: Self::MachineId, repr: Self::DbRepr) -> Result<(), Self::Error> { let ctx = self.ctx.clone(); - + let db_id = self.db_id.clone(); async_blocking(move || { let sql_params = named_params! { ":my_coin": repr.taker_coin, @@ -209,7 +209,7 @@ impl StateMachineStorage for TakerSwapStorage { ":taker_coin_nota": repr.conf_settings.taker_coin_nota, ":other_p2p_pub": repr.maker_p2p_pub.to_bytes(), }; - insert_new_swap_v2(&ctx, sql_params)?; + insert_new_swap_v2(&ctx, sql_params, db_id.as_deref())?; Ok(()) }) .await @@ -805,11 +805,8 @@ impl { - let swaps_ctx = SwapsContext::from_ctx( - &self.ctx, - self.taker_coin.account_db_id().expect("Valid maker pubkey").as_deref(), - ) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.taker_coin.account_db_id().as_deref()) + .expect("from_ctx should not fail at this point"); let taker_coin_ticker: String = self.taker_coin.ticker().into(); let new_locked = LockedAmountInfo { swap_uuid: self.uuid, @@ -828,11 +825,8 @@ impl { - let swaps_ctx = SwapsContext::from_ctx( - &self.ctx, - self.taker_coin.account_db_id().expect("Valid maker pubkey").as_deref(), - ) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.taker_coin.account_db_id().as_deref()) + .expect("from_ctx should not fail at this point"); let ticker = self.taker_coin.ticker(); if let Some(taker_coin_locked) = swaps_ctx.locked_amounts.lock().unwrap().get_mut(ticker) { taker_coin_locked.retain(|locked| locked.swap_uuid != self.uuid); @@ -860,11 +854,8 @@ impl { - let swaps_ctx = SwapsContext::from_ctx( - &self.ctx, - self.taker_coin.account_db_id().expect("Valid maker pubkey").as_deref(), - ) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.taker_coin.account_db_id().as_deref()) + .expect("from_ctx should not fail at this point"); let taker_coin_ticker: String = self.taker_coin.ticker().into(); let new_locked = LockedAmountInfo { swap_uuid: self.uuid, From ca13b41a51baf0ab752a183dfdb17498ec6336b1 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 17 Apr 2024 08:52:26 +0100 Subject: [PATCH 038/186] WIP rollback sql_ctx --- .../utxo/utxo_block_header_storage/mod.rs | 20 ++-- mm2src/mm2_core/src/sql_ctx.rs | 91 ++----------------- mm2src/mm2_main/src/database/my_swaps.rs | 18 ++-- 3 files changed, 27 insertions(+), 102 deletions(-) diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index f5f7c976f8..6a09bbfe1c 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -1,8 +1,12 @@ -#[cfg(not(target_arch = "wasm32"))] mod sql_block_header_storage; +#[cfg(not(target_arch = "wasm32"))] +mod sql_block_header_storage; + #[cfg(not(target_arch = "wasm32"))] pub use sql_block_header_storage::SqliteBlockHeadersStorage; -#[cfg(target_arch = "wasm32")] mod wasm; +#[cfg(target_arch = "wasm32")] +mod wasm; + #[cfg(target_arch = "wasm32")] pub use wasm::IDBBlockHeadersStorage; @@ -29,15 +33,14 @@ impl BlockHeaderStorage { pub(crate) fn new_from_ctx( ctx: MmArc, ticker: String, - db_id: Option<&str>, + _db_id: Option<&str>, ) -> Result { - use mm2_core::sql_ctx::SyncSqlConnectionCtx; + let sqlite_connection = ctx.sqlite_connection.ok_or(BlockHeaderStorageError::Internal( + "sqlite_connection is not initialized".to_owned(), + ))?; - let conn = SyncSqlConnectionCtx::from_ctx(&ctx, db_id) - .map_err(BlockHeaderStorageError::Internal)? - .connection(db_id); Ok(BlockHeaderStorage { - inner: Box::new(SqliteBlockHeadersStorage { ticker, conn }), + inner: Box::new(SqliteBlockHeadersStorage { ticker, conn: sqlite_connection.clone() }), }) } @@ -316,6 +319,7 @@ mod native_tests { const FOR_COIN_GET: &str = "get"; const FOR_COIN_INSERT: &str = "insert"; + #[test] fn test_add_block_headers() { block_on(test_add_block_headers_impl(FOR_COIN_INSERT)) } diff --git a/mm2src/mm2_core/src/sql_ctx.rs b/mm2src/mm2_core/src/sql_ctx.rs index b71de3d9d4..a2693f54e4 100644 --- a/mm2src/mm2_core/src/sql_ctx.rs +++ b/mm2src/mm2_core/src/sql_ctx.rs @@ -1,14 +1,13 @@ -use std::{collections::HashMap, - sync::{Arc, Mutex}}; +use std::{collections::HashMap, sync::Arc}; -use db_common::{async_sql_conn::AsyncConnection, sqlite::rusqlite::Connection}; +use db_common::async_sql_conn::AsyncConnection; use futures::lock::Mutex as AsyncMutex; use gstuff::try_s; use crate::mm_ctx::{from_ctx, log_sqlite_file_open_attempt, MmArc}; pub struct AsyncSqlConnectionCtx { - connections: Arc>>>>, + pub connections: Arc>>>>, ctx: MmArc, } @@ -24,93 +23,21 @@ impl AsyncSqlConnectionCtx { Ok(res) } - pub async fn init(&self, db_id: Option<&str>) -> Result<(), String> { + pub async fn init(&self, db_id: Option<&str>) -> Result>, String> { let sqlite_file_path = self.ctx.dbdir(db_id).join("KOMODEFI.db"); log_sqlite_file_open_attempt(&sqlite_file_path); - let async_conn = try_s!(AsyncConnection::open(sqlite_file_path).await); + let async_conn = Arc::new(AsyncMutex::new(try_s!(AsyncConnection::open(sqlite_file_path).await))); let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); let mut connections = self.connections.lock().await; - connections.insert(db_id, Arc::new(AsyncMutex::new(async_conn))); + connections.insert(db_id, async_conn.clone()); - Ok(()) - } -} - -#[derive(Clone)] -pub struct SyncSqlConnectionCtx { - connections: Arc>>>>, - ctx: MmArc, -} - -impl SyncSqlConnectionCtx { - pub fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { - let res = try_s!(from_ctx(&ctx.sqlite_connection_ctx, move || { - let sqlite_file_path = ctx.dbdir(db_id).join("KOMODEFI.db"); - log_sqlite_file_open_attempt(&sqlite_file_path); - let connection = try_s!(Connection::open(sqlite_file_path)); - - let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| ctx.rmd160_hex()); - let mut connections = HashMap::new(); - connections.insert(db_id, Arc::new(Mutex::new(connection))); - - Ok(Self { - connections: Arc::new(Mutex::new(connections)), - ctx: ctx.clone(), - }) - })); - - Ok(res) - } - - pub fn connection_opt(&self, db_id: Option<&str>) -> Option>> { - let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); - if let Ok(connections) = self.connections.lock() { - return connections.get(&db_id).cloned(); - }; - - None - } - - pub fn connection(&self, db_id: Option<&str>) -> Arc> { - let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); - let connection = self.connections.lock().unwrap(); - - connection - .get(&db_id) - .cloned() - .expect("sqlite_connection is not initialized") - } -} - -pub struct SharedSqlConnectionCtx { - connections: Arc>>>>, - ctx: MmArc, -} - -impl SharedSqlConnectionCtx { - pub fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { - let res = try_s!(from_ctx(&ctx.shared_sqlite_connection_ctx, move || { - let sqlite_file_path = ctx.dbdir(db_id).join("MM2-shared.db"); - log_sqlite_file_open_attempt(&sqlite_file_path); - let connection = try_s!(Connection::open(sqlite_file_path)); - - let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| ctx.rmd160_hex()); - let mut connections = HashMap::new(); - connections.insert(db_id, Arc::new(Mutex::new(connection))); - - Ok(Self { - connections: Arc::new(Mutex::new(connections)), - ctx: ctx.clone(), - }) - })); - - Ok(res) + Ok(async_conn) } - pub fn connection(&self, db_id: Option<&str>) -> Arc> { + pub async fn connection(&self, db_id: Option<&str>) -> Arc> { let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); - let connection = self.connections.lock().unwrap(); + let connection = self.connections.lock().await; connection .get(&db_id) diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index a71455b7e9..83748158a8 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -8,7 +8,6 @@ use db_common::sqlite::rusqlite::{Connection, Error as SqlError, Result as SqlRe use db_common::sqlite::sql_builder::SqlBuilder; use db_common::sqlite::{offset_by_uuid, query_single_row}; use mm2_core::mm_ctx::MmArc; -use mm2_core::sql_ctx::SyncSqlConnectionCtx; use std::convert::TryInto; use uuid::{Error as UuidError, Uuid}; @@ -71,13 +70,10 @@ pub fn insert_new_swap( uuid: &str, started_at: &str, swap_type: u8, - db_id: Option<&str>, + _db_id: Option<&str>, ) -> SqlResult<()> { debug!("Inserting new swap {} to the SQLite database", uuid); - let conn = SyncSqlConnectionCtx::from_ctx(ctx, db_id) - .expect("sqlite_connection is not initialized") - .connection(db_id); - let conn = conn.lock().unwrap(); + let conn = ctx.sqlite_connection(); let params = [my_coin, other_coin, uuid, started_at, &swap_type.to_string()]; conn.execute(INSERT_MY_SWAP, params).map(|_| ()) } @@ -126,11 +122,8 @@ const INSERT_MY_SWAP_V2: &str = r#"INSERT INTO my_swaps ( :other_p2p_pub );"#; -pub fn insert_new_swap_v2(ctx: &MmArc, params: &[(&str, &dyn ToSql)], db_id: Option<&str>) -> SqlResult<()> { - let conn = SyncSqlConnectionCtx::from_ctx(ctx, db_id) - .expect("sqlite_connection is not initialized") - .connection(db_id); - let conn = conn.lock().unwrap(); +pub fn insert_new_swap_v2(ctx: &MmArc, params: &[(&str, &dyn ToSql)], _db_id: Option<&str>) -> SqlResult<()> { + let conn = ctx.sqlite_connection(); conn.execute(INSERT_MY_SWAP_V2, params).map(|_| ()) } @@ -247,7 +240,7 @@ pub fn select_uuids_by_my_swaps_filter( query_builder.limit(paging.limit); query_builder.offset(offset); offset - }, + } None => 0, }; @@ -295,6 +288,7 @@ pub fn update_swap_events(conn: &Connection, uuid: &str, events_json: &str, _db_ } const UPDATE_SWAP_IS_FINISHED_BY_UUID: &str = "UPDATE my_swaps SET is_finished = 1 WHERE uuid = :uuid;"; + pub fn set_swap_is_finished(conn: &Connection, uuid: &str, _db_id: Option<&str>) -> SqlResult<()> { let mut stmt = conn.prepare(UPDATE_SWAP_IS_FINISHED_BY_UUID)?; stmt.execute(&[(":uuid", uuid)]).map(|_| ()) From 3f053bde4d06af5e882479887376cf120f8a2bae Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 17 Apr 2024 08:59:57 +0100 Subject: [PATCH 039/186] WIP rollback sql_ctx --- mm2src/mm2_core/src/mm_ctx.rs | 36 ++++++++++++++-------------------- mm2src/mm2_core/src/sql_ctx.rs | 20 +++++++++++++++++++ 2 files changed, 35 insertions(+), 21 deletions(-) diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 4bd29fd6fe..44ffb8c269 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -142,10 +142,6 @@ pub struct MmCtx { pub async_sqlite_connection: Constructible>>, #[cfg(not(target_arch = "wasm32"))] pub async_sqlite_connection_ctx: Mutex>>, - #[cfg(not(target_arch = "wasm32"))] - pub shared_sqlite_connection_ctx: Mutex>>, - #[cfg(not(target_arch = "wasm32"))] - pub sqlite_connection_ctx: Mutex>>, } impl MmCtx { @@ -197,10 +193,6 @@ impl MmCtx { async_sqlite_connection: Constructible::default(), #[cfg(not(target_arch = "wasm32"))] async_sqlite_connection_ctx: Mutex::new(None), - #[cfg(not(target_arch = "wasm32"))] - sqlite_connection_ctx: Mutex::new(None), - #[cfg(not(target_arch = "wasm32"))] - shared_sqlite_connection_ctx: Mutex::new(None), } } @@ -234,7 +226,7 @@ impl MmCtx { rpcport ) })? - }, + } None => 7783, // Default port if `rpcport` does not exist in the config }; if port < 1000 { @@ -249,7 +241,7 @@ impl MmCtx { } else { "127.0.0.1" } - .to_string(); + .to_string(); let ip: IpAddr = try_s!(rpcip.parse()); Ok(SocketAddr::new(ip, port as u16)) } @@ -465,6 +457,7 @@ pub struct MmArc(pub SharedRc); // after we finish the initial port and replace the C values with the corresponding Rust alternatives. #[allow(clippy::non_send_fields_in_send_ty)] unsafe impl Send for MmArc {} + unsafe impl Sync for MmArc {} impl Clone for MmArc { @@ -483,6 +476,7 @@ pub struct MmWeak(WeakRc); // Same as `MmArc`. #[allow(clippy::non_send_fields_in_send_ty)] unsafe impl Send for MmWeak {} + unsafe impl Sync for MmWeak {} impl MmWeak { @@ -539,7 +533,7 @@ impl MmArc { None => { log::info!("MmCtx was dropped. Stop the loop"); break; - }, + } } } }; @@ -573,7 +567,7 @@ impl MmArc { ve.insert(self.weak()); try_s!(self.ffi_handle.pin(rid)); return Ok(rid); - }, + } } } } @@ -670,8 +664,8 @@ impl MmFutSpawner { impl SpawnFuture for MmFutSpawner { fn spawn(&self, f: F) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn(f) } @@ -679,8 +673,8 @@ impl SpawnFuture for MmFutSpawner { impl SpawnAbortable for MmFutSpawner { fn spawn_with_settings(&self, fut: F, settings: AbortSettings) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn_with_settings(fut, settings) } @@ -694,9 +688,9 @@ pub fn from_ctx( ctx_field: &Mutex>>, constructor: C, ) -> Result, String> -where - C: FnOnce() -> Result, - T: 'static + Send + Sync, + where + C: FnOnce() -> Result, + T: 'static + Send + Sync, { let mut ctx_field = try_s!(ctx_field.lock()); if let Some(ref ctx) = *ctx_field { @@ -789,9 +783,9 @@ pub(super) fn log_sqlite_file_open_attempt(sqlite_file_path: &Path) { match sqlite_file_path.canonicalize() { Ok(absolute_path) => { log::debug!("Trying to open SQLite database file {}", absolute_path.display()); - }, + } Err(_) => { log::debug!("Trying to open SQLite database file {}", sqlite_file_path.display()); - }, + } } } diff --git a/mm2src/mm2_core/src/sql_ctx.rs b/mm2src/mm2_core/src/sql_ctx.rs index a2693f54e4..de37620a83 100644 --- a/mm2src/mm2_core/src/sql_ctx.rs +++ b/mm2src/mm2_core/src/sql_ctx.rs @@ -3,6 +3,7 @@ use std::{collections::HashMap, sync::Arc}; use db_common::async_sql_conn::AsyncConnection; use futures::lock::Mutex as AsyncMutex; use gstuff::try_s; +use common::log::error; use crate::mm_ctx::{from_ctx, log_sqlite_file_open_attempt, MmArc}; @@ -35,6 +36,16 @@ impl AsyncSqlConnectionCtx { Ok(async_conn) } + pub async fn get_or_init(&self, db_id: Option<&str>) -> Result>, String> { + let db_id_str = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); + let connections = self.connections.lock().await; + if let Some(connection) = connections.get(&db_id_str) { + return Ok(connection.clone()); + }; + + self.init(db_id) + } + pub async fn connection(&self, db_id: Option<&str>) -> Arc> { let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); let connection = self.connections.lock().await; @@ -44,4 +55,13 @@ impl AsyncSqlConnectionCtx { .cloned() .expect("sqlite_connection is not initialized") } + + pub async fn close_connectionsn(&self, ctx: &MmArc) { + let mut connections = self.connections.lock().await; + for connection in connections.values_mut() { + if let Err(err) = connection.lock().await.close() { + error!("Error stopping AsyncConnection: {}", e); + }; + } + } } From f0e801607e130a00749869b73738d37c8a2937fb Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 17 Apr 2024 10:23:19 +0100 Subject: [PATCH 040/186] rollback sql_ctx --- mm2src/mm2_core/src/lib.rs | 1 - mm2src/mm2_core/src/sql_ctx.rs | 67 ---------------------------------- 2 files changed, 68 deletions(-) delete mode 100644 mm2src/mm2_core/src/sql_ctx.rs diff --git a/mm2src/mm2_core/src/lib.rs b/mm2src/mm2_core/src/lib.rs index 8d20fffd76..3eb5ecc6ae 100644 --- a/mm2src/mm2_core/src/lib.rs +++ b/mm2src/mm2_core/src/lib.rs @@ -3,7 +3,6 @@ use rand::{thread_rng, Rng}; pub mod event_dispatcher; pub mod mm_ctx; -#[cfg(not(target_arch = "wasm32"))] pub mod sql_ctx; #[derive(Clone, Copy, Display, PartialEq)] pub enum DbNamespaceId { diff --git a/mm2src/mm2_core/src/sql_ctx.rs b/mm2src/mm2_core/src/sql_ctx.rs deleted file mode 100644 index de37620a83..0000000000 --- a/mm2src/mm2_core/src/sql_ctx.rs +++ /dev/null @@ -1,67 +0,0 @@ -use std::{collections::HashMap, sync::Arc}; - -use db_common::async_sql_conn::AsyncConnection; -use futures::lock::Mutex as AsyncMutex; -use gstuff::try_s; -use common::log::error; - -use crate::mm_ctx::{from_ctx, log_sqlite_file_open_attempt, MmArc}; - -pub struct AsyncSqlConnectionCtx { - pub connections: Arc>>>>, - ctx: MmArc, -} - -impl AsyncSqlConnectionCtx { - pub async fn from_ctx(ctx: &MmArc) -> Result, String> { - let res = try_s!(from_ctx(&ctx.async_sqlite_connection_ctx, move || { - Ok(Self { - connections: Arc::new(AsyncMutex::new(HashMap::new())), - ctx: ctx.clone(), - }) - })); - - Ok(res) - } - - pub async fn init(&self, db_id: Option<&str>) -> Result>, String> { - let sqlite_file_path = self.ctx.dbdir(db_id).join("KOMODEFI.db"); - log_sqlite_file_open_attempt(&sqlite_file_path); - let async_conn = Arc::new(AsyncMutex::new(try_s!(AsyncConnection::open(sqlite_file_path).await))); - - let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); - let mut connections = self.connections.lock().await; - connections.insert(db_id, async_conn.clone()); - - Ok(async_conn) - } - - pub async fn get_or_init(&self, db_id: Option<&str>) -> Result>, String> { - let db_id_str = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); - let connections = self.connections.lock().await; - if let Some(connection) = connections.get(&db_id_str) { - return Ok(connection.clone()); - }; - - self.init(db_id) - } - - pub async fn connection(&self, db_id: Option<&str>) -> Arc> { - let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); - let connection = self.connections.lock().await; - - connection - .get(&db_id) - .cloned() - .expect("sqlite_connection is not initialized") - } - - pub async fn close_connectionsn(&self, ctx: &MmArc) { - let mut connections = self.connections.lock().await; - for connection in connections.values_mut() { - if let Err(err) = connection.lock().await.close() { - error!("Error stopping AsyncConnection: {}", e); - }; - } - } -} From f8d08cfd0bde2b2e41d3d23b8bfcf9c26eea22b1 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 17 Apr 2024 12:38:29 +0100 Subject: [PATCH 041/186] fix lint --- .../utxo/utxo_block_header_storage/mod.rs | 11 ++++---- mm2src/mm2_core/src/mm_ctx.rs | 26 +++++++++---------- mm2src/mm2_main/src/database/my_swaps.rs | 2 +- 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index 6a09bbfe1c..4fe1bfa2dc 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -1,11 +1,9 @@ -#[cfg(not(target_arch = "wasm32"))] -mod sql_block_header_storage; +#[cfg(not(target_arch = "wasm32"))] mod sql_block_header_storage; #[cfg(not(target_arch = "wasm32"))] pub use sql_block_header_storage::SqliteBlockHeadersStorage; -#[cfg(target_arch = "wasm32")] -mod wasm; +#[cfg(target_arch = "wasm32")] mod wasm; #[cfg(target_arch = "wasm32")] pub use wasm::IDBBlockHeadersStorage; @@ -40,7 +38,10 @@ impl BlockHeaderStorage { ))?; Ok(BlockHeaderStorage { - inner: Box::new(SqliteBlockHeadersStorage { ticker, conn: sqlite_connection.clone() }), + inner: Box::new(SqliteBlockHeadersStorage { + ticker, + conn: sqlite_connection.clone(), + }), }) } diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index e7f72ee317..8768b63002 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -225,7 +225,7 @@ impl MmCtx { rpcport ) })? - } + }, None => 7783, // Default port if `rpcport` does not exist in the config }; if port < 1000 { @@ -240,7 +240,7 @@ impl MmCtx { } else { "127.0.0.1" } - .to_string(); + .to_string(); let ip: IpAddr = try_s!(rpcip.parse()); Ok(SocketAddr::new(ip, port as u16)) } @@ -531,7 +531,7 @@ impl MmArc { None => { log::info!("MmCtx was dropped. Stop the loop"); break; - } + }, } } }; @@ -565,7 +565,7 @@ impl MmArc { ve.insert(self.weak()); try_s!(self.ffi_handle.pin(rid)); return Ok(rid); - } + }, } } } @@ -662,8 +662,8 @@ impl MmFutSpawner { impl SpawnFuture for MmFutSpawner { fn spawn(&self, f: F) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn(f) } @@ -671,8 +671,8 @@ impl SpawnFuture for MmFutSpawner { impl SpawnAbortable for MmFutSpawner { fn spawn_with_settings(&self, fut: F, settings: AbortSettings) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn_with_settings(fut, settings) } @@ -686,9 +686,9 @@ pub fn from_ctx( ctx_field: &Mutex>>, constructor: C, ) -> Result, String> - where - C: FnOnce() -> Result, - T: 'static + Send + Sync, +where + C: FnOnce() -> Result, + T: 'static + Send + Sync, { let mut ctx_field = try_s!(ctx_field.lock()); if let Some(ref ctx) = *ctx_field { @@ -781,9 +781,9 @@ pub(super) fn log_sqlite_file_open_attempt(sqlite_file_path: &Path) { match sqlite_file_path.canonicalize() { Ok(absolute_path) => { log::debug!("Trying to open SQLite database file {}", absolute_path.display()); - } + }, Err(_) => { log::debug!("Trying to open SQLite database file {}", sqlite_file_path.display()); - } + }, } } diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index 83748158a8..fcae009af4 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -240,7 +240,7 @@ pub fn select_uuids_by_my_swaps_filter( query_builder.limit(paging.limit); query_builder.offset(offset); offset - } + }, None => 0, }; From 2d9623245d909b8f5f896cb50a20b95a24e79b37 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 17 Apr 2024 19:45:55 +0100 Subject: [PATCH 042/186] find_unique_nft_account_ids --- mm2src/coins/nft.rs | 159 ++++++++++++++++---------------- mm2src/coins/nft/nft_structs.rs | 29 ++++-- 2 files changed, 100 insertions(+), 88 deletions(-) diff --git a/mm2src/coins/nft.rs b/mm2src/coins/nft.rs index 0d725370b5..d74061bfd5 100644 --- a/mm2src/coins/nft.rs +++ b/mm2src/coins/nft.rs @@ -6,12 +6,13 @@ pub(crate) mod nft_errors; pub mod nft_structs; pub(crate) mod storage; -#[cfg(any(test, target_arch = "wasm32"))] mod nft_tests; +#[cfg(any(test, target_arch = "wasm32"))] +mod nft_tests; use crate::{coin_conf, get_my_address, lp_coinfind_or_err, CoinsContext, MarketCoinOps, MmCoinEnum, MmCoinStruct, MyAddressReq, WithdrawError}; use nft_errors::{GetNftInfoError, UpdateNftError}; -use nft_structs::{Chain, ContractType, ConvertChain, Nft, NftFromMoralis, NftList, NftListReq, NftMetadataReq, +use nft_structs::{Chain, ContractType, ConvertChain, Nft, NftFromMoralis, NftListReq, NftMetadataReq, NftTransferHistory, NftTransferHistoryFromMoralis, NftTransfersReq, NftsTransferHistoryList, TransactionNftDetails, UpdateNftReq, WithdrawNftReq}; @@ -19,9 +20,7 @@ use crate::eth::{eth_addr_to_hex, get_eth_address, withdraw_erc1155, withdraw_er EthTxFeeDetails}; use crate::nft::nft_errors::{ClearNftDbError, MetaFromUrlError, ProtectFromSpamError, TransferConfirmationsError, UpdateSpamPhishingError}; -use crate::nft::nft_structs::{build_nft_with_empty_meta, BuildNftFields, ClearNftDbReq, NftCommon, NftCtx, NftInfo, - NftTransferCommon, PhishingDomainReq, PhishingDomainRes, RefreshMetadataReq, - SpamContractReq, SpamContractRes, TransferMeta, TransferStatus, UriMeta}; +use crate::nft::nft_structs::{build_nft_with_empty_meta, BuildNftFields, ClearNftDbReq, NftCommon, NftCtx, NftInfo, NftLists, NftTransferCommon, PhishingDomainReq, PhishingDomainRes, RefreshMetadataReq, SpamContractReq, SpamContractRes, TransferMeta, TransferStatus, UriMeta}; use crate::nft::storage::{NftListStorageOps, NftTransferHistoryStorageOps}; use common::parse_rfc3339_to_timestamp; use crypto::StandardHDCoinAddress; @@ -79,29 +78,34 @@ pub type WithdrawNftResult = Result MmResult { - // TODO: db_id - let db_id: Option = None; - let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; +pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult, GetNftInfoError> { + let db_ids = find_unique_nft_account_ids_active(&ctx).await.map_to_mm(GetNftInfoError::Internal)?; - let storage = nft_ctx.lock_db().await?; - for chain in req.chains.iter() { - if !NftListStorageOps::is_initialized(&storage, chain).await? { - NftListStorageOps::init(&storage, chain).await?; + let mut nft_lists = vec![]; + for db_id in db_ids { + let nft_ctx = NftCtx::from_ctx(&ctx, Some(&db_id)).map_to_mm(GetNftInfoError::Internal)?; + + let storage = nft_ctx.lock_db().await?; + for chain in req.chains.iter() { + if !NftListStorageOps::is_initialized(&storage, chain).await? { + NftListStorageOps::init(&storage, chain).await?; + } } - } - let mut nft_list = storage - .get_nft_list(req.chains, req.max, req.limit, req.page_number, req.filters) - .await?; + let mut nft_list = storage + .get_nft_list(req.chains.clone(), req.max, req.limit, req.page_number, req.filters) + .await?; - if req.protect_from_spam { - for nft in &mut nft_list.nfts { - protect_from_nft_spam_links(nft, true)?; + if req.protect_from_spam { + for nft in &mut nft_list.nfts { + protect_from_nft_spam_links(nft, true)?; + } } + + nft_lists.push(NftLists { nft_list, pubkey: db_id }); } - Ok(nft_list) + Ok(nft_lists) } /// Retrieves detailed metadata for a specified NFT. @@ -193,7 +197,7 @@ async fn process_transfers_confirmations( MmCoinEnum::EthCoin(eth_coin) => { let current_block = current_block_impl(eth_coin).await?; Ok((ticker, current_block)) - }, + } _ => MmError::err(TransferConfirmationsError::CoinDoesntSupportNft { coin: coin_enum.ticker().to_owned(), }), @@ -242,8 +246,8 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft _ => { return MmError::err(UpdateNftError::CoinDoesntSupportNft { coin: coin_enum.ticker().to_owned(), - }) - }, + }); + } }; let nft_transfers = get_moralis_nft_transfers(&ctx, chain, from_block, &req.url, eth_coin).await?; storage.add_transfers_to_history(*chain, nft_transfers).await?; @@ -258,7 +262,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft update_spam(&storage, *chain, &req.url_antispam).await?; update_phishing(&storage, chain, &req.url_antispam).await?; continue; - }, + } Err(_) => { // if there is an error, then NFT LIST table doesnt exist, so we need to cache nft list from moralis. NftListStorageOps::init(&storage, chain).await?; @@ -268,7 +272,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft update_spam(&storage, *chain, &req.url_antispam).await?; update_phishing(&storage, chain, &req.url_antispam).await?; continue; - }, + } }; let scanned_block = storage @@ -293,7 +297,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft &req.url, &req.url_antispam, ) - .await?; + .await?; update_nft_global_in_coins_ctx(&ctx, &storage, *chain).await?; update_transfers_with_empty_meta(&storage, chain, &req.url, &req.url_antispam).await?; update_spam(&storage, *chain, &req.url_antispam).await?; @@ -307,17 +311,17 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft /// This function uses the up-to-date NFT list for a given chain and updates the /// corresponding global NFT information in the coins context. async fn update_nft_global_in_coins_ctx(ctx: &MmArc, storage: &T, chain: Chain) -> MmResult<(), UpdateNftError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let coins_ctx = CoinsContext::from_ctx(ctx).map_to_mm(UpdateNftError::Internal)?; let mut coins = coins_ctx.coins.lock().await; let ticker = chain.to_nft_ticker(); if let Some(MmCoinStruct { - inner: MmCoinEnum::EthCoin(nft_global), - .. - }) = coins.get_mut(ticker) + inner: MmCoinEnum::EthCoin(nft_global), + .. + }) = coins.get_mut(ticker) { let nft_list = storage.get_nft_list(vec![chain], true, 1, None, None).await?; update_nft_infos(nft_global, nft_list.nfts).await; @@ -361,8 +365,8 @@ async fn update_nft_infos(nft_global: &mut EthCoin, nft_list: Vec) { /// `update_spam` function updates spam contracts info in NFT list and NFT transfers. async fn update_spam(storage: &T, chain: Chain, url_antispam: &Url) -> MmResult<(), UpdateSpamPhishingError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let token_addresses = storage.get_token_addresses(chain).await?; if !token_addresses.is_empty() { @@ -388,8 +392,8 @@ where } async fn update_phishing(storage: &T, chain: &Chain, url_antispam: &Url) -> MmResult<(), UpdateSpamPhishingError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let transfer_domains = storage.get_domains(chain).await?; let nft_domains = storage.get_animation_external_domains(chain).await?; @@ -478,7 +482,7 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu &req.url, &req.url_antispam, ) - .await + .await { Ok(moralis_meta) => moralis_meta, Err(_) => { @@ -489,7 +493,7 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu .update_transfer_spam_by_token_address(&req.chain, token_address_str.clone(), true) .await?; return Ok(()); - }, + } }; let mut nft_db = storage .get_nft(&req.chain, token_address_str.clone(), req.token_id.clone()) @@ -505,7 +509,7 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu moralis_meta.common.metadata.as_deref(), &req.url_antispam, ) - .await; + .await; // Gather domains for phishing checks let domains = gather_domains(&token_domain, &uri_meta); nft_db.common.collection_name = moralis_meta.common.collection_name; @@ -533,8 +537,8 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu /// The `update_transfer_meta_using_nft` function updates the transfer metadata associated with the given NFT. /// If metadata info contains potential spam links, function sets `possible_spam` true. async fn update_transfer_meta_using_nft(storage: &T, chain: &Chain, nft: &mut Nft) -> MmResult<(), UpdateNftError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let transfer_meta = TransferMeta::from(nft.clone()); storage @@ -568,8 +572,8 @@ async fn refresh_possible_spam( nft_db: &mut Nft, url_antispam: &Url, ) -> MmResult<(), UpdateNftError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let address_hex = eth_addr_to_hex(&nft_db.common.token_address); let spam_res = send_spam_request(chain, url_antispam, address_hex.clone()).await?; @@ -593,8 +597,8 @@ async fn refresh_possible_phishing( nft_db: &mut Nft, url_antispam: &Url, ) -> MmResult<(), UpdateNftError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { if !domains.is_empty() { let domain_list = domains.into_iter().collect::>().join(","); @@ -846,9 +850,9 @@ async fn get_fee_details(eth_coin: &EthCoin, transaction_hash: &str) -> Option None, } } @@ -1006,11 +1010,11 @@ async fn handle_nft_transfer handle_send_erc721(storage, chain, transfer).await, (TransferStatus::Receive, ContractType::Erc721) => { handle_receive_erc721(storage, chain, transfer, url, url_antispam, my_address).await - }, + } (TransferStatus::Send, ContractType::Erc1155) => handle_send_erc1155(storage, chain, transfer).await, (TransferStatus::Receive, ContractType::Erc1155) => { handle_receive_erc1155(storage, chain, transfer, url, url_antispam, my_address).await - }, + } } } @@ -1067,7 +1071,7 @@ async fn handle_receive_erc721 { let mut nft = match get_moralis_metadata( token_address_str.clone(), @@ -1076,7 +1080,7 @@ async fn handle_receive_erc721 { // sometimes moralis updates Get All NFTs (which also affects Get Metadata) later @@ -1085,16 +1089,16 @@ async fn handle_receive_erc721 { mark_as_spam_and_build_empty_meta(storage, chain, token_address_str, &transfer, my_address).await? - }, + } }; storage .add_nfts_to_list(*chain, vec![nft.clone()], transfer.block_number) .await?; update_transfer_meta_using_nft(storage, chain, &mut nft).await?; - }, + } } Ok(()) } @@ -1117,19 +1121,19 @@ async fn handle_send_erc1155 { nft_db.common.amount -= transfer.common.amount; storage .update_nft_amount(chain, nft_db.clone(), transfer.block_number) .await?; - }, + } Ordering::Less => { return MmError::err(UpdateNftError::InsufficientAmountInCache { amount_list: nft_db.common.amount.to_string(), amount_history: transfer.common.amount.to_string(), }); - }, + } } Ok(()) } @@ -1159,7 +1163,7 @@ async fn handle_receive_erc1155 { let nft = match get_moralis_metadata( @@ -1169,20 +1173,20 @@ async fn handle_receive_erc1155 { create_nft_from_moralis_metadata(moralis_meta, &transfer, my_address, chain, url_antispam).await? - }, + } Err(_) => { mark_as_spam_and_build_empty_meta(storage, chain, token_address_str, &transfer, my_address).await? - }, + } }; storage .add_nfts_to_list(*chain, [nft.clone()], transfer.block_number) .await?; nft - }, + } }; update_transfer_meta_using_nft(storage, chain, &mut nft).await?; Ok(()) @@ -1202,7 +1206,7 @@ async fn create_nft_from_moralis_metadata( moralis_meta.common.metadata.as_deref(), url_antispam, ) - .await; + .await; let nft = Nft { common: NftCommon { token_address: moralis_meta.common.token_address, @@ -1275,8 +1279,8 @@ async fn cache_nfts_from_moralis(storage: &T, chain: &Chain, nfts: Vec) -> MmResult<(), UpdateNftError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { for mut nft in nfts.into_iter() { update_transfer_meta_using_nft(storage, chain, &mut nft).await?; @@ -1291,8 +1295,8 @@ async fn update_transfers_with_empty_meta( url: &Url, url_antispam: &Url, ) -> MmResult<(), UpdateNftError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let token_addr_id = storage.get_transfers_with_empty_meta(*chain).await?; for addr_id_pair in token_addr_id.into_iter() { @@ -1303,7 +1307,7 @@ where url, url_antispam, ) - .await + .await { Ok(nft_meta) => nft_meta, Err(_) => { @@ -1314,7 +1318,7 @@ where .update_transfer_spam_by_token_address(chain, addr_id_pair.token_address, true) .await?; continue; - }, + } }; update_transfer_meta_using_nft(storage, chain, &mut nft_meta).await?; } @@ -1339,7 +1343,7 @@ fn process_text_for_spam_link(text: &mut Option, redact: bool) -> Result *text = Some("URL redacted for user protection".to_string()); } Ok(true) - }, + } _ => Ok(false), } } @@ -1418,7 +1422,7 @@ fn process_metadata_field( ); } Ok(true) - }, + } _ => Ok(false), } } @@ -1435,7 +1439,7 @@ async fn build_nft_from_moralis( nft_moralis.common.metadata.as_deref(), url_antispam, ) - .await; + .await; let token_domain = get_domain_from_url(token_uri.as_deref()); Nft { common: NftCommon { @@ -1503,8 +1507,8 @@ pub async fn clear_nft_db(ctx: MmArc, req: ClearNftDbReq) -> MmResult<(), ClearN } async fn clear_data_for_chain(storage: &T, chain: &Chain) -> MmResult<(), ClearNftDbError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let (is_nft_list_init, is_history_init) = ( NftListStorageOps::is_initialized(storage, chain).await?, @@ -1532,11 +1536,10 @@ fn construct_moralis_uri_for_nft(base_url: &Url, address: &str, chain: &Chain) - Ok(uri) } -pub async fn find_unique_nft_account_ids_any(ctx: &MmArc) -> Result, String> { - find_unique_nft_account_ids(ctx, false).await +pub async fn find_unique_nft_account_ids_active(ctx: &MmArc) -> Result, String> { + find_unique_nft_account_ids(ctx, true).await } -// TODO: remove early return and cfg #[allow(unused)] async fn find_unique_nft_account_ids(ctx: &MmArc, active_only: bool) -> Result, String> { // Using a HashSet to ensure uniqueness efficiently @@ -1551,7 +1554,7 @@ async fn find_unique_nft_account_ids(ctx: &MmArc, active_only: bool) -> Result Deserialize<'de> for Chain { fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, + where + D: Deserializer<'de>, { let s = String::deserialize(deserializer)?; s.parse().map_err(de::Error::custom) @@ -395,9 +395,9 @@ pub(crate) struct NftFromMoralis { pub(crate) struct SerdeStringWrap(pub(crate) T); impl<'de, T> Deserialize<'de> for SerdeStringWrap -where - T: std::str::FromStr, - T::Err: std::fmt::Debug + std::fmt::Display, + where + T: std::str::FromStr, + T::Err: std::fmt::Debug + std::fmt::Display, { fn deserialize>(deserializer: D) -> Result { let value: &str = Deserialize::deserialize(deserializer)?; @@ -416,6 +416,12 @@ impl std::ops::Deref for SerdeStringWrap { /// Represents a detailed list of NFTs, including the total number of NFTs and the number of skipped NFTs. /// It is used as response of `get_nft_list` if it is successful. +#[derive(Debug, Serialize)] +pub struct NftLists { + pub(crate) nft_list: NftList, + pub(crate) pubkey: String, +} + #[derive(Debug, Serialize)] pub struct NftList { pub(crate) nfts: Vec, @@ -718,6 +724,7 @@ pub(crate) struct NftCtx { pub(crate) nft_cache_db: SharedDb, #[cfg(not(target_arch = "wasm32"))] pub(crate) nft_cache_db: Arc>, + _db_id: Option, } impl NftCtx { @@ -725,13 +732,14 @@ impl NftCtx { /// /// If an `NftCtx` instance doesn't already exist in the MM context, it gets created and cached for subsequent use. #[cfg(not(target_arch = "wasm32"))] - pub(crate) fn from_ctx(ctx: &MmArc, _db_id: Option<&str>) -> Result, String> { + pub(crate) fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { Ok(try_s!(from_ctx(&ctx.nft_ctx, move || { let async_sqlite_connection = ctx .async_sqlite_connection .ok_or("async_sqlite_connection is not initialized".to_owned())?; Ok(NftCtx { nft_cache_db: async_sqlite_connection.clone(), + _db_id: db_id.map(|e|e.to_string()) }) }))) } @@ -741,6 +749,7 @@ impl NftCtx { Ok(try_s!(from_ctx(&ctx.nft_ctx, move || { Ok(NftCtx { nft_cache_db: ConstructibleDb::new(ctx, db_id).into_shared(), + _db_id: db_id.map(|e|e.to_string()) }) }))) } @@ -787,16 +796,16 @@ pub(crate) struct PhishingDomainRes { } fn serialize_token_id(token_id: &BigUint, serializer: S) -> Result -where - S: Serializer, + where + S: Serializer, { let token_id_str = token_id.to_string(); serializer.serialize_str(&token_id_str) } fn deserialize_token_id<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, + where + D: Deserializer<'de>, { let s = String::deserialize(deserializer)?; BigUint::from_str(&s).map_err(serde::de::Error::custom) From d2152f857f4b86e88755b6c8b9ad56c4d4e64ba4 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 18 Apr 2024 07:34:03 +0100 Subject: [PATCH 043/186] improve find_unique_nft_account_ids_active and update get_nft_list to fetch for only active chains concurrently --- mm2src/coins/nft.rs | 217 +++++++++++--------- mm2src/coins/nft/nft_structs.rs | 24 +-- mm2src/coins/nft/nft_tests.rs | 1 + mm2src/coins/nft/storage/db_test_helpers.rs | 8 +- 4 files changed, 143 insertions(+), 107 deletions(-) diff --git a/mm2src/coins/nft.rs b/mm2src/coins/nft.rs index d74061bfd5..e361cb1562 100644 --- a/mm2src/coins/nft.rs +++ b/mm2src/coins/nft.rs @@ -6,8 +6,7 @@ pub(crate) mod nft_errors; pub mod nft_structs; pub(crate) mod storage; -#[cfg(any(test, target_arch = "wasm32"))] -mod nft_tests; +#[cfg(any(test, target_arch = "wasm32"))] mod nft_tests; use crate::{coin_conf, get_my_address, lp_coinfind_or_err, CoinsContext, MarketCoinOps, MmCoinEnum, MmCoinStruct, MyAddressReq, WithdrawError}; @@ -20,13 +19,16 @@ use crate::eth::{eth_addr_to_hex, get_eth_address, withdraw_erc1155, withdraw_er EthTxFeeDetails}; use crate::nft::nft_errors::{ClearNftDbError, MetaFromUrlError, ProtectFromSpamError, TransferConfirmationsError, UpdateSpamPhishingError}; -use crate::nft::nft_structs::{build_nft_with_empty_meta, BuildNftFields, ClearNftDbReq, NftCommon, NftCtx, NftInfo, NftLists, NftTransferCommon, PhishingDomainReq, PhishingDomainRes, RefreshMetadataReq, SpamContractReq, SpamContractRes, TransferMeta, TransferStatus, UriMeta}; +use crate::nft::nft_structs::{build_nft_with_empty_meta, BuildNftFields, ClearNftDbReq, NftCommon, NftCtx, NftInfo, + NftLists, NftTransferCommon, PhishingDomainReq, PhishingDomainRes, RefreshMetadataReq, + SpamContractReq, SpamContractRes, TransferMeta, TransferStatus, UriMeta}; use crate::nft::storage::{NftListStorageOps, NftTransferHistoryStorageOps}; use common::parse_rfc3339_to_timestamp; use crypto::StandardHDCoinAddress; use ethereum_types::{Address, H256}; use futures::compat::Future01CompatExt; use futures::future::try_join_all; +use futures_util::future::join_all; use mm2_err_handle::map_to_mm::MapToMmResult; use mm2_net::transport::send_post_request_to_uri; use mm2_number::BigUint; @@ -35,6 +37,8 @@ use serde::Deserialize; use serde_json::Value as Json; use std::cmp::Ordering; use std::collections::{HashMap, HashSet}; +use std::future::Future; +use std::pin::Pin; use std::str::FromStr; use web3::types::TransactionId; @@ -79,30 +83,63 @@ pub type WithdrawNftResult = Result MmResult, GetNftInfoError> { - let db_ids = find_unique_nft_account_ids_active(&ctx).await.map_to_mm(GetNftInfoError::Internal)?; + let db_ids = find_unique_nft_account_ids(&ctx, req.chains.clone()) + .await + .map_to_mm(GetNftInfoError::Internal)?; + + let get_nft_for_id = + |val: (String, Vec)| -> Pin> + Send>> { + let ctx_clone = ctx.clone(); + let req = req.clone(); + + let res = async move { + let nft_ctx = NftCtx::from_ctx(&ctx_clone, Some(&val.0)).map_to_mm(GetNftInfoError::Internal)?; + + let chains = req + .chains + .clone() + .into_iter() + .filter(|c| val.1.contains(&c)) + .collect::>(); + let storage = nft_ctx.lock_db().await?; + for chain in req.chains.iter() { + if !NftListStorageOps::is_initialized(&storage, chain).await? { + NftListStorageOps::init(&storage, chain).await?; + } + } - let mut nft_lists = vec![]; - for db_id in db_ids { - let nft_ctx = NftCtx::from_ctx(&ctx, Some(&db_id)).map_to_mm(GetNftInfoError::Internal)?; + let mut nft_list = storage + .get_nft_list(chains, req.max, req.limit, req.page_number, req.filters) + .await?; - let storage = nft_ctx.lock_db().await?; - for chain in req.chains.iter() { - if !NftListStorageOps::is_initialized(&storage, chain).await? { - NftListStorageOps::init(&storage, chain).await?; - } - } + if req.protect_from_spam { + for nft in &mut nft_list.nfts { + protect_from_nft_spam_links(nft, true)?; + } + } - let mut nft_list = storage - .get_nft_list(req.chains.clone(), req.max, req.limit, req.page_number, req.filters) - .await?; + Ok(NftLists { + nft_list, + pubkey: val.0, + }) + }; - if req.protect_from_spam { - for nft in &mut nft_list.nfts { - protect_from_nft_spam_links(nft, true)?; + Box::pin(res) + }; + let future_list = db_ids + .into_iter() + .filter_map(|re| { + if !re.1.is_empty() { + Some(get_nft_for_id(re)) + } else { + None } - } + }) + .collect::>(); - nft_lists.push(NftLists { nft_list, pubkey: db_id }); + let mut nft_lists = vec![]; + for res in join_all(future_list).await { + nft_lists.push(res?); } Ok(nft_lists) @@ -197,7 +234,7 @@ async fn process_transfers_confirmations( MmCoinEnum::EthCoin(eth_coin) => { let current_block = current_block_impl(eth_coin).await?; Ok((ticker, current_block)) - } + }, _ => MmError::err(TransferConfirmationsError::CoinDoesntSupportNft { coin: coin_enum.ticker().to_owned(), }), @@ -247,7 +284,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft return MmError::err(UpdateNftError::CoinDoesntSupportNft { coin: coin_enum.ticker().to_owned(), }); - } + }, }; let nft_transfers = get_moralis_nft_transfers(&ctx, chain, from_block, &req.url, eth_coin).await?; storage.add_transfers_to_history(*chain, nft_transfers).await?; @@ -262,7 +299,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft update_spam(&storage, *chain, &req.url_antispam).await?; update_phishing(&storage, chain, &req.url_antispam).await?; continue; - } + }, Err(_) => { // if there is an error, then NFT LIST table doesnt exist, so we need to cache nft list from moralis. NftListStorageOps::init(&storage, chain).await?; @@ -272,7 +309,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft update_spam(&storage, *chain, &req.url_antispam).await?; update_phishing(&storage, chain, &req.url_antispam).await?; continue; - } + }, }; let scanned_block = storage @@ -297,7 +334,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft &req.url, &req.url_antispam, ) - .await?; + .await?; update_nft_global_in_coins_ctx(&ctx, &storage, *chain).await?; update_transfers_with_empty_meta(&storage, chain, &req.url, &req.url_antispam).await?; update_spam(&storage, *chain, &req.url_antispam).await?; @@ -311,17 +348,17 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft /// This function uses the up-to-date NFT list for a given chain and updates the /// corresponding global NFT information in the coins context. async fn update_nft_global_in_coins_ctx(ctx: &MmArc, storage: &T, chain: Chain) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let coins_ctx = CoinsContext::from_ctx(ctx).map_to_mm(UpdateNftError::Internal)?; let mut coins = coins_ctx.coins.lock().await; let ticker = chain.to_nft_ticker(); if let Some(MmCoinStruct { - inner: MmCoinEnum::EthCoin(nft_global), - .. - }) = coins.get_mut(ticker) + inner: MmCoinEnum::EthCoin(nft_global), + .. + }) = coins.get_mut(ticker) { let nft_list = storage.get_nft_list(vec![chain], true, 1, None, None).await?; update_nft_infos(nft_global, nft_list.nfts).await; @@ -365,8 +402,8 @@ async fn update_nft_infos(nft_global: &mut EthCoin, nft_list: Vec) { /// `update_spam` function updates spam contracts info in NFT list and NFT transfers. async fn update_spam(storage: &T, chain: Chain, url_antispam: &Url) -> MmResult<(), UpdateSpamPhishingError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let token_addresses = storage.get_token_addresses(chain).await?; if !token_addresses.is_empty() { @@ -392,8 +429,8 @@ async fn update_spam(storage: &T, chain: Chain, url_antispam: &Url) -> MmResu } async fn update_phishing(storage: &T, chain: &Chain, url_antispam: &Url) -> MmResult<(), UpdateSpamPhishingError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let transfer_domains = storage.get_domains(chain).await?; let nft_domains = storage.get_animation_external_domains(chain).await?; @@ -482,7 +519,7 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu &req.url, &req.url_antispam, ) - .await + .await { Ok(moralis_meta) => moralis_meta, Err(_) => { @@ -493,7 +530,7 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu .update_transfer_spam_by_token_address(&req.chain, token_address_str.clone(), true) .await?; return Ok(()); - } + }, }; let mut nft_db = storage .get_nft(&req.chain, token_address_str.clone(), req.token_id.clone()) @@ -509,7 +546,7 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu moralis_meta.common.metadata.as_deref(), &req.url_antispam, ) - .await; + .await; // Gather domains for phishing checks let domains = gather_domains(&token_domain, &uri_meta); nft_db.common.collection_name = moralis_meta.common.collection_name; @@ -537,8 +574,8 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu /// The `update_transfer_meta_using_nft` function updates the transfer metadata associated with the given NFT. /// If metadata info contains potential spam links, function sets `possible_spam` true. async fn update_transfer_meta_using_nft(storage: &T, chain: &Chain, nft: &mut Nft) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let transfer_meta = TransferMeta::from(nft.clone()); storage @@ -572,8 +609,8 @@ async fn refresh_possible_spam( nft_db: &mut Nft, url_antispam: &Url, ) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let address_hex = eth_addr_to_hex(&nft_db.common.token_address); let spam_res = send_spam_request(chain, url_antispam, address_hex.clone()).await?; @@ -597,8 +634,8 @@ async fn refresh_possible_phishing( nft_db: &mut Nft, url_antispam: &Url, ) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { if !domains.is_empty() { let domain_list = domains.into_iter().collect::>().join(","); @@ -850,9 +887,9 @@ async fn get_fee_details(eth_coin: &EthCoin, transaction_hash: &str) -> Option None, } } @@ -1010,11 +1047,11 @@ async fn handle_nft_transfer handle_send_erc721(storage, chain, transfer).await, (TransferStatus::Receive, ContractType::Erc721) => { handle_receive_erc721(storage, chain, transfer, url, url_antispam, my_address).await - } + }, (TransferStatus::Send, ContractType::Erc1155) => handle_send_erc1155(storage, chain, transfer).await, (TransferStatus::Receive, ContractType::Erc1155) => { handle_receive_erc1155(storage, chain, transfer, url, url_antispam, my_address).await - } + }, } } @@ -1071,7 +1108,7 @@ async fn handle_receive_erc721 { let mut nft = match get_moralis_metadata( token_address_str.clone(), @@ -1080,7 +1117,7 @@ async fn handle_receive_erc721 { // sometimes moralis updates Get All NFTs (which also affects Get Metadata) later @@ -1089,16 +1126,16 @@ async fn handle_receive_erc721 { mark_as_spam_and_build_empty_meta(storage, chain, token_address_str, &transfer, my_address).await? - } + }, }; storage .add_nfts_to_list(*chain, vec![nft.clone()], transfer.block_number) .await?; update_transfer_meta_using_nft(storage, chain, &mut nft).await?; - } + }, } Ok(()) } @@ -1121,19 +1158,19 @@ async fn handle_send_erc1155 { nft_db.common.amount -= transfer.common.amount; storage .update_nft_amount(chain, nft_db.clone(), transfer.block_number) .await?; - } + }, Ordering::Less => { return MmError::err(UpdateNftError::InsufficientAmountInCache { amount_list: nft_db.common.amount.to_string(), amount_history: transfer.common.amount.to_string(), }); - } + }, } Ok(()) } @@ -1163,7 +1200,7 @@ async fn handle_receive_erc1155 { let nft = match get_moralis_metadata( @@ -1173,20 +1210,20 @@ async fn handle_receive_erc1155 { create_nft_from_moralis_metadata(moralis_meta, &transfer, my_address, chain, url_antispam).await? - } + }, Err(_) => { mark_as_spam_and_build_empty_meta(storage, chain, token_address_str, &transfer, my_address).await? - } + }, }; storage .add_nfts_to_list(*chain, [nft.clone()], transfer.block_number) .await?; nft - } + }, }; update_transfer_meta_using_nft(storage, chain, &mut nft).await?; Ok(()) @@ -1206,7 +1243,7 @@ async fn create_nft_from_moralis_metadata( moralis_meta.common.metadata.as_deref(), url_antispam, ) - .await; + .await; let nft = Nft { common: NftCommon { token_address: moralis_meta.common.token_address, @@ -1279,8 +1316,8 @@ async fn cache_nfts_from_moralis(storage: &T, chain: &Chain, nfts: Vec) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { for mut nft in nfts.into_iter() { update_transfer_meta_using_nft(storage, chain, &mut nft).await?; @@ -1295,8 +1332,8 @@ async fn update_transfers_with_empty_meta( url: &Url, url_antispam: &Url, ) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let token_addr_id = storage.get_transfers_with_empty_meta(*chain).await?; for addr_id_pair in token_addr_id.into_iter() { @@ -1307,7 +1344,7 @@ async fn update_transfers_with_empty_meta( url, url_antispam, ) - .await + .await { Ok(nft_meta) => nft_meta, Err(_) => { @@ -1318,7 +1355,7 @@ async fn update_transfers_with_empty_meta( .update_transfer_spam_by_token_address(chain, addr_id_pair.token_address, true) .await?; continue; - } + }, }; update_transfer_meta_using_nft(storage, chain, &mut nft_meta).await?; } @@ -1343,7 +1380,7 @@ fn process_text_for_spam_link(text: &mut Option, redact: bool) -> Result *text = Some("URL redacted for user protection".to_string()); } Ok(true) - } + }, _ => Ok(false), } } @@ -1422,7 +1459,7 @@ fn process_metadata_field( ); } Ok(true) - } + }, _ => Ok(false), } } @@ -1439,7 +1476,7 @@ async fn build_nft_from_moralis( nft_moralis.common.metadata.as_deref(), url_antispam, ) - .await; + .await; let token_domain = get_domain_from_url(token_uri.as_deref()); Nft { common: NftCommon { @@ -1507,8 +1544,8 @@ pub async fn clear_nft_db(ctx: MmArc, req: ClearNftDbReq) -> MmResult<(), ClearN } async fn clear_data_for_chain(storage: &T, chain: &Chain) -> MmResult<(), ClearNftDbError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let (is_nft_list_init, is_history_init) = ( NftListStorageOps::is_initialized(storage, chain).await?, @@ -1536,32 +1573,28 @@ fn construct_moralis_uri_for_nft(base_url: &Url, address: &str, chain: &Chain) - Ok(uri) } -pub async fn find_unique_nft_account_ids_active(ctx: &MmArc) -> Result, String> { - find_unique_nft_account_ids(ctx, true).await -} - -#[allow(unused)] -async fn find_unique_nft_account_ids(ctx: &MmArc, active_only: bool) -> Result, String> { - // Using a HashSet to ensure uniqueness efficiently - let mut account_ids = HashSet::new(); - // Add default wallet pubkey - account_ids.insert(ctx.rmd160_hex()); - +pub async fn find_unique_nft_account_ids( + ctx: &MmArc, + chains: Vec, +) -> Result>, String> { let cctx = try_s!(CoinsContext::from_ctx(ctx)); let coins = cctx.coins.lock().await; let coins = coins.values().collect::>(); - #[cfg(not(target_arch = "wasm32"))] + let mut active_id_chains = HashMap::new(); for coin in coins.iter() { - if let Some(account) = coin.inner.account_db_id() { - if coin.is_available() && active_only { - if let Ok(chain) = Chain::from_ticker(coin.inner.ticker()) { - account_ids.insert(account); - }; + if coin.is_available() { + // Use default if no db_id + let db_id = coin.inner.account_db_id().unwrap_or_else(|| ctx.rmd160_hex()); + let entry = active_id_chains.entry(db_id).or_insert_with(Vec::new); + if let Ok(chain) = Chain::from_ticker(coin.inner.ticker()) { + if chains.contains(&chain) { + entry.push(chain); + } } } } - common::log::info!("nft account_ids=({account_ids:?})"); - Ok(account_ids) + common::log::info!("nft account_ids=({active_id_chains:>2?})"); + Ok(active_id_chains) } diff --git a/mm2src/coins/nft/nft_structs.rs b/mm2src/coins/nft/nft_structs.rs index 327c730eb4..9b2342246a 100644 --- a/mm2src/coins/nft/nft_structs.rs +++ b/mm2src/coins/nft/nft_structs.rs @@ -36,7 +36,7 @@ cfg_wasm32! { /// /// The request provides options such as pagination, limiting the number of results, /// and applying specific filters to the list. -#[derive(Debug, Deserialize)] +#[derive(Debug, Deserialize, Clone)] pub struct NftListReq { /// List of chains to fetch the NFTs from. pub(crate) chains: Vec, @@ -202,8 +202,8 @@ impl FromStr for Chain { /// This implementation will use `FromStr` to deserialize `Chain`. impl<'de> Deserialize<'de> for Chain { fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, + where + D: Deserializer<'de>, { let s = String::deserialize(deserializer)?; s.parse().map_err(de::Error::custom) @@ -395,9 +395,9 @@ pub(crate) struct NftFromMoralis { pub(crate) struct SerdeStringWrap(pub(crate) T); impl<'de, T> Deserialize<'de> for SerdeStringWrap - where - T: std::str::FromStr, - T::Err: std::fmt::Debug + std::fmt::Display, +where + T: std::str::FromStr, + T::Err: std::fmt::Debug + std::fmt::Display, { fn deserialize>(deserializer: D) -> Result { let value: &str = Deserialize::deserialize(deserializer)?; @@ -739,7 +739,7 @@ impl NftCtx { .ok_or("async_sqlite_connection is not initialized".to_owned())?; Ok(NftCtx { nft_cache_db: async_sqlite_connection.clone(), - _db_id: db_id.map(|e|e.to_string()) + _db_id: db_id.map(|e| e.to_string()), }) }))) } @@ -749,7 +749,7 @@ impl NftCtx { Ok(try_s!(from_ctx(&ctx.nft_ctx, move || { Ok(NftCtx { nft_cache_db: ConstructibleDb::new(ctx, db_id).into_shared(), - _db_id: db_id.map(|e|e.to_string()) + _db_id: db_id.map(|e| e.to_string()), }) }))) } @@ -796,16 +796,16 @@ pub(crate) struct PhishingDomainRes { } fn serialize_token_id(token_id: &BigUint, serializer: S) -> Result - where - S: Serializer, +where + S: Serializer, { let token_id_str = token_id.to_string(); serializer.serialize_str(&token_id_str) } fn deserialize_token_id<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, +where + D: Deserializer<'de>, { let s = String::deserialize(deserializer)?; BigUint::from_str(&s).map_err(serde::de::Error::custom) diff --git a/mm2src/coins/nft/nft_tests.rs b/mm2src/coins/nft/nft_tests.rs index f0dd57603c..926932ec3f 100644 --- a/mm2src/coins/nft/nft_tests.rs +++ b/mm2src/coins/nft/nft_tests.rs @@ -199,6 +199,7 @@ cross_test!(test_nft_list, { .get_nft_list(vec![chain], false, 1, Some(NonZeroUsize::new(3).unwrap()), None) .await .unwrap(); + assert_eq!(nft_list.nfts.len(), 1); let nft = nft_list.nfts.get(0).unwrap(); assert_eq!(nft.block_number, 28056721); diff --git a/mm2src/coins/nft/storage/db_test_helpers.rs b/mm2src/coins/nft/storage/db_test_helpers.rs index 75c7b248c2..e188f66c8f 100644 --- a/mm2src/coins/nft/storage/db_test_helpers.rs +++ b/mm2src/coins/nft/storage/db_test_helpers.rs @@ -355,8 +355,10 @@ pub(crate) fn nft_transfer_history() -> Vec { pub(crate) async fn get_nft_ctx(_chain: &Chain) -> Arc { #[cfg(not(target_arch = "wasm32"))] - let ctx = mm_ctx_with_custom_async_db().await; + let ctx = mm_ctx_with_custom_async_db().await; #[cfg(target_arch = "wasm32")] - let ctx = mm_ctx_with_custom_db(); - NftCtx::from_ctx(&ctx, None).unwrap() + let ctx = mm_ctx_with_custom_db(); + let nft_ctx = NftCtx::from_ctx(&ctx, None).unwrap(); + + nft_ctx } From 04d2896d08d6cd95e4dc74e906f5ce820e367331 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 18 Apr 2024 08:06:37 +0100 Subject: [PATCH 044/186] update get_nft_transfers to fetch transfers concurrently for multi db_id --- mm2src/coins/nft.rs | 197 ++++++++++++++++++-------------- mm2src/coins/nft/nft_structs.rs | 27 +++-- 2 files changed, 131 insertions(+), 93 deletions(-) diff --git a/mm2src/coins/nft.rs b/mm2src/coins/nft.rs index e361cb1562..5673274aae 100644 --- a/mm2src/coins/nft.rs +++ b/mm2src/coins/nft.rs @@ -6,7 +6,8 @@ pub(crate) mod nft_errors; pub mod nft_structs; pub(crate) mod storage; -#[cfg(any(test, target_arch = "wasm32"))] mod nft_tests; +#[cfg(any(test, target_arch = "wasm32"))] +mod nft_tests; use crate::{coin_conf, get_my_address, lp_coinfind_or_err, CoinsContext, MarketCoinOps, MmCoinEnum, MmCoinStruct, MyAddressReq, WithdrawError}; @@ -19,9 +20,7 @@ use crate::eth::{eth_addr_to_hex, get_eth_address, withdraw_erc1155, withdraw_er EthTxFeeDetails}; use crate::nft::nft_errors::{ClearNftDbError, MetaFromUrlError, ProtectFromSpamError, TransferConfirmationsError, UpdateSpamPhishingError}; -use crate::nft::nft_structs::{build_nft_with_empty_meta, BuildNftFields, ClearNftDbReq, NftCommon, NftCtx, NftInfo, - NftLists, NftTransferCommon, PhishingDomainReq, PhishingDomainRes, RefreshMetadataReq, - SpamContractReq, SpamContractRes, TransferMeta, TransferStatus, UriMeta}; +use crate::nft::nft_structs::{build_nft_with_empty_meta, BuildNftFields, ClearNftDbReq, NftCommon, NftCtx, NftInfo, NftLists, NftsTransferHistoryLists, NftTransferCommon, PhishingDomainReq, PhishingDomainRes, RefreshMetadataReq, SpamContractReq, SpamContractRes, TransferMeta, TransferStatus, UriMeta}; use crate::nft::storage::{NftListStorageOps, NftTransferHistoryStorageOps}; use common::parse_rfc3339_to_timestamp; use crypto::StandardHDCoinAddress; @@ -87,19 +86,19 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult .await .map_to_mm(GetNftInfoError::Internal)?; - let get_nft_for_id = - |val: (String, Vec)| -> Pin> + Send>> { + let get_nfts = + |id: String, chains: Vec| -> Pin> + Send>> { let ctx_clone = ctx.clone(); let req = req.clone(); let res = async move { - let nft_ctx = NftCtx::from_ctx(&ctx_clone, Some(&val.0)).map_to_mm(GetNftInfoError::Internal)?; + let nft_ctx = NftCtx::from_ctx(&ctx_clone, Some(&id)).map_to_mm(GetNftInfoError::Internal)?; let chains = req .chains .clone() .into_iter() - .filter(|c| val.1.contains(&c)) + .filter(|c| chains.contains(c)) .collect::>(); let storage = nft_ctx.lock_db().await?; for chain in req.chains.iter() { @@ -120,17 +119,18 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult Ok(NftLists { nft_list, - pubkey: val.0, + pubkey: id, }) }; Box::pin(res) }; + let future_list = db_ids .into_iter() - .filter_map(|re| { - if !re.1.is_empty() { - Some(get_nft_for_id(re)) + .filter_map(|(id, chains)| { + if !chains.is_empty() { + Some(get_nfts(id, chains)) } else { None } @@ -192,27 +192,58 @@ pub async fn get_nft_metadata(ctx: MmArc, req: NftMetadataReq) -> MmResult MmResult { - // TODO: db_id - let db_id: Option = None; - let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; +pub async fn get_nft_transfers(ctx: MmArc, req: NftTransfersReq) -> MmResult, GetNftInfoError> { + let db_ids = find_unique_nft_account_ids(&ctx, req.chains.clone()) + .await + .map_to_mm(GetNftInfoError::Internal)?; - let storage = nft_ctx.lock_db().await?; - for chain in req.chains.iter() { - if !NftTransferHistoryStorageOps::is_initialized(&storage, chain).await? { - NftTransferHistoryStorageOps::init(&storage, chain).await?; - } - } - let mut transfer_history_list = storage - .get_transfer_history(req.chains.clone(), req.max, req.limit, req.page_number, req.filters) - .await?; - if req.protect_from_spam { - for transfer in &mut transfer_history_list.transfer_history { - protect_from_history_spam_links(transfer, true)?; - } + let get_nft_transfers = + |db_id: String, chains: Vec| -> Pin> + Send>> { + let ctx = ctx.clone(); + let req = req.clone(); + + let res = async move { + let nft_ctx = NftCtx::from_ctx(&ctx, Some(&db_id)).map_to_mm(GetNftInfoError::Internal)?; + + let storage = nft_ctx.lock_db().await?; + for chain in req.chains.iter() { + if !NftTransferHistoryStorageOps::is_initialized(&storage, chain).await? { + NftTransferHistoryStorageOps::init(&storage, chain).await?; + } + } + let mut transfer_history = storage + .get_transfer_history(chains, req.max, req.limit, req.page_number, req.filters) + .await?; + if req.protect_from_spam { + for transfer in &mut transfer_history.transfer_history { + protect_from_history_spam_links(transfer, true)?; + } + } + process_transfers_confirmations(&ctx, req.chains, &mut transfer_history).await?; + + Ok(NftsTransferHistoryLists { transfer_history, pubkey: db_id }) + }; + + Box::pin(res) + }; + + let future_list = db_ids + .into_iter() + .filter_map(|(id, chains)| { + if !chains.is_empty() { + Some(get_nft_transfers(id, chains)) + } else { + None + } + }) + .collect::>(); + + let mut nft_transfers = vec![]; + for res in join_all(future_list).await { + nft_transfers.push(res?); } - process_transfers_confirmations(&ctx, req.chains, &mut transfer_history_list).await?; - Ok(transfer_history_list) + + Ok(nft_transfers) } async fn process_transfers_confirmations( @@ -234,7 +265,7 @@ async fn process_transfers_confirmations( MmCoinEnum::EthCoin(eth_coin) => { let current_block = current_block_impl(eth_coin).await?; Ok((ticker, current_block)) - }, + } _ => MmError::err(TransferConfirmationsError::CoinDoesntSupportNft { coin: coin_enum.ticker().to_owned(), }), @@ -284,7 +315,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft return MmError::err(UpdateNftError::CoinDoesntSupportNft { coin: coin_enum.ticker().to_owned(), }); - }, + } }; let nft_transfers = get_moralis_nft_transfers(&ctx, chain, from_block, &req.url, eth_coin).await?; storage.add_transfers_to_history(*chain, nft_transfers).await?; @@ -299,7 +330,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft update_spam(&storage, *chain, &req.url_antispam).await?; update_phishing(&storage, chain, &req.url_antispam).await?; continue; - }, + } Err(_) => { // if there is an error, then NFT LIST table doesnt exist, so we need to cache nft list from moralis. NftListStorageOps::init(&storage, chain).await?; @@ -309,7 +340,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft update_spam(&storage, *chain, &req.url_antispam).await?; update_phishing(&storage, chain, &req.url_antispam).await?; continue; - }, + } }; let scanned_block = storage @@ -334,7 +365,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft &req.url, &req.url_antispam, ) - .await?; + .await?; update_nft_global_in_coins_ctx(&ctx, &storage, *chain).await?; update_transfers_with_empty_meta(&storage, chain, &req.url, &req.url_antispam).await?; update_spam(&storage, *chain, &req.url_antispam).await?; @@ -348,17 +379,17 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft /// This function uses the up-to-date NFT list for a given chain and updates the /// corresponding global NFT information in the coins context. async fn update_nft_global_in_coins_ctx(ctx: &MmArc, storage: &T, chain: Chain) -> MmResult<(), UpdateNftError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let coins_ctx = CoinsContext::from_ctx(ctx).map_to_mm(UpdateNftError::Internal)?; let mut coins = coins_ctx.coins.lock().await; let ticker = chain.to_nft_ticker(); if let Some(MmCoinStruct { - inner: MmCoinEnum::EthCoin(nft_global), - .. - }) = coins.get_mut(ticker) + inner: MmCoinEnum::EthCoin(nft_global), + .. + }) = coins.get_mut(ticker) { let nft_list = storage.get_nft_list(vec![chain], true, 1, None, None).await?; update_nft_infos(nft_global, nft_list.nfts).await; @@ -402,8 +433,8 @@ async fn update_nft_infos(nft_global: &mut EthCoin, nft_list: Vec) { /// `update_spam` function updates spam contracts info in NFT list and NFT transfers. async fn update_spam(storage: &T, chain: Chain, url_antispam: &Url) -> MmResult<(), UpdateSpamPhishingError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let token_addresses = storage.get_token_addresses(chain).await?; if !token_addresses.is_empty() { @@ -429,8 +460,8 @@ where } async fn update_phishing(storage: &T, chain: &Chain, url_antispam: &Url) -> MmResult<(), UpdateSpamPhishingError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let transfer_domains = storage.get_domains(chain).await?; let nft_domains = storage.get_animation_external_domains(chain).await?; @@ -519,7 +550,7 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu &req.url, &req.url_antispam, ) - .await + .await { Ok(moralis_meta) => moralis_meta, Err(_) => { @@ -530,7 +561,7 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu .update_transfer_spam_by_token_address(&req.chain, token_address_str.clone(), true) .await?; return Ok(()); - }, + } }; let mut nft_db = storage .get_nft(&req.chain, token_address_str.clone(), req.token_id.clone()) @@ -546,7 +577,7 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu moralis_meta.common.metadata.as_deref(), &req.url_antispam, ) - .await; + .await; // Gather domains for phishing checks let domains = gather_domains(&token_domain, &uri_meta); nft_db.common.collection_name = moralis_meta.common.collection_name; @@ -574,8 +605,8 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu /// The `update_transfer_meta_using_nft` function updates the transfer metadata associated with the given NFT. /// If metadata info contains potential spam links, function sets `possible_spam` true. async fn update_transfer_meta_using_nft(storage: &T, chain: &Chain, nft: &mut Nft) -> MmResult<(), UpdateNftError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let transfer_meta = TransferMeta::from(nft.clone()); storage @@ -609,8 +640,8 @@ async fn refresh_possible_spam( nft_db: &mut Nft, url_antispam: &Url, ) -> MmResult<(), UpdateNftError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let address_hex = eth_addr_to_hex(&nft_db.common.token_address); let spam_res = send_spam_request(chain, url_antispam, address_hex.clone()).await?; @@ -634,8 +665,8 @@ async fn refresh_possible_phishing( nft_db: &mut Nft, url_antispam: &Url, ) -> MmResult<(), UpdateNftError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { if !domains.is_empty() { let domain_list = domains.into_iter().collect::>().join(","); @@ -887,9 +918,9 @@ async fn get_fee_details(eth_coin: &EthCoin, transaction_hash: &str) -> Option None, } } @@ -1047,11 +1078,11 @@ async fn handle_nft_transfer handle_send_erc721(storage, chain, transfer).await, (TransferStatus::Receive, ContractType::Erc721) => { handle_receive_erc721(storage, chain, transfer, url, url_antispam, my_address).await - }, + } (TransferStatus::Send, ContractType::Erc1155) => handle_send_erc1155(storage, chain, transfer).await, (TransferStatus::Receive, ContractType::Erc1155) => { handle_receive_erc1155(storage, chain, transfer, url, url_antispam, my_address).await - }, + } } } @@ -1108,7 +1139,7 @@ async fn handle_receive_erc721 { let mut nft = match get_moralis_metadata( token_address_str.clone(), @@ -1117,7 +1148,7 @@ async fn handle_receive_erc721 { // sometimes moralis updates Get All NFTs (which also affects Get Metadata) later @@ -1126,16 +1157,16 @@ async fn handle_receive_erc721 { mark_as_spam_and_build_empty_meta(storage, chain, token_address_str, &transfer, my_address).await? - }, + } }; storage .add_nfts_to_list(*chain, vec![nft.clone()], transfer.block_number) .await?; update_transfer_meta_using_nft(storage, chain, &mut nft).await?; - }, + } } Ok(()) } @@ -1158,19 +1189,19 @@ async fn handle_send_erc1155 { nft_db.common.amount -= transfer.common.amount; storage .update_nft_amount(chain, nft_db.clone(), transfer.block_number) .await?; - }, + } Ordering::Less => { return MmError::err(UpdateNftError::InsufficientAmountInCache { amount_list: nft_db.common.amount.to_string(), amount_history: transfer.common.amount.to_string(), }); - }, + } } Ok(()) } @@ -1200,7 +1231,7 @@ async fn handle_receive_erc1155 { let nft = match get_moralis_metadata( @@ -1210,20 +1241,20 @@ async fn handle_receive_erc1155 { create_nft_from_moralis_metadata(moralis_meta, &transfer, my_address, chain, url_antispam).await? - }, + } Err(_) => { mark_as_spam_and_build_empty_meta(storage, chain, token_address_str, &transfer, my_address).await? - }, + } }; storage .add_nfts_to_list(*chain, [nft.clone()], transfer.block_number) .await?; nft - }, + } }; update_transfer_meta_using_nft(storage, chain, &mut nft).await?; Ok(()) @@ -1243,7 +1274,7 @@ async fn create_nft_from_moralis_metadata( moralis_meta.common.metadata.as_deref(), url_antispam, ) - .await; + .await; let nft = Nft { common: NftCommon { token_address: moralis_meta.common.token_address, @@ -1316,8 +1347,8 @@ async fn cache_nfts_from_moralis(storage: &T, chain: &Chain, nfts: Vec) -> MmResult<(), UpdateNftError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { for mut nft in nfts.into_iter() { update_transfer_meta_using_nft(storage, chain, &mut nft).await?; @@ -1332,8 +1363,8 @@ async fn update_transfers_with_empty_meta( url: &Url, url_antispam: &Url, ) -> MmResult<(), UpdateNftError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let token_addr_id = storage.get_transfers_with_empty_meta(*chain).await?; for addr_id_pair in token_addr_id.into_iter() { @@ -1344,7 +1375,7 @@ where url, url_antispam, ) - .await + .await { Ok(nft_meta) => nft_meta, Err(_) => { @@ -1355,7 +1386,7 @@ where .update_transfer_spam_by_token_address(chain, addr_id_pair.token_address, true) .await?; continue; - }, + } }; update_transfer_meta_using_nft(storage, chain, &mut nft_meta).await?; } @@ -1380,7 +1411,7 @@ fn process_text_for_spam_link(text: &mut Option, redact: bool) -> Result *text = Some("URL redacted for user protection".to_string()); } Ok(true) - }, + } _ => Ok(false), } } @@ -1459,7 +1490,7 @@ fn process_metadata_field( ); } Ok(true) - }, + } _ => Ok(false), } } @@ -1476,7 +1507,7 @@ async fn build_nft_from_moralis( nft_moralis.common.metadata.as_deref(), url_antispam, ) - .await; + .await; let token_domain = get_domain_from_url(token_uri.as_deref()); Nft { common: NftCommon { @@ -1544,8 +1575,8 @@ pub async fn clear_nft_db(ctx: MmArc, req: ClearNftDbReq) -> MmResult<(), ClearN } async fn clear_data_for_chain(storage: &T, chain: &Chain) -> MmResult<(), ClearNftDbError> -where - T: NftListStorageOps + NftTransferHistoryStorageOps, + where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let (is_nft_list_init, is_history_init) = ( NftListStorageOps::is_initialized(storage, chain).await?, diff --git a/mm2src/coins/nft/nft_structs.rs b/mm2src/coins/nft/nft_structs.rs index 9b2342246a..edf9954fc8 100644 --- a/mm2src/coins/nft/nft_structs.rs +++ b/mm2src/coins/nft/nft_structs.rs @@ -202,8 +202,8 @@ impl FromStr for Chain { /// This implementation will use `FromStr` to deserialize `Chain`. impl<'de> Deserialize<'de> for Chain { fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, + where + D: Deserializer<'de>, { let s = String::deserialize(deserializer)?; s.parse().map_err(de::Error::custom) @@ -395,9 +395,9 @@ pub(crate) struct NftFromMoralis { pub(crate) struct SerdeStringWrap(pub(crate) T); impl<'de, T> Deserialize<'de> for SerdeStringWrap -where - T: std::str::FromStr, - T::Err: std::fmt::Debug + std::fmt::Display, + where + T: std::str::FromStr, + T::Err: std::fmt::Debug + std::fmt::Display, { fn deserialize>(deserializer: D) -> Result { let value: &str = Deserialize::deserialize(deserializer)?; @@ -512,7 +512,7 @@ pub struct TransactionNftDetails { /// /// The request provides options such as pagination, limiting the number of results, /// and applying specific filters to the history. -#[derive(Debug, Deserialize)] +#[derive(Debug, Deserialize, Clone)] pub struct NftTransfersReq { /// List of chains to fetch the NFT transfer history from. pub(crate) chains: Vec, @@ -638,6 +638,13 @@ pub struct NftsTransferHistoryList { pub(crate) total: usize, } +#[derive(Debug, Serialize)] +pub struct NftsTransferHistoryLists { + pub(crate) transfer_history: NftsTransferHistoryList, + pub(crate) pubkey: String, +} + + /// Filters that can be applied to the NFT transfer history. /// /// Allows filtering based on transaction type (send/receive), date range, @@ -796,16 +803,16 @@ pub(crate) struct PhishingDomainRes { } fn serialize_token_id(token_id: &BigUint, serializer: S) -> Result -where - S: Serializer, + where + S: Serializer, { let token_id_str = token_id.to_string(); serializer.serialize_str(&token_id_str) } fn deserialize_token_id<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, + where + D: Deserializer<'de>, { let s = String::deserialize(deserializer)?; BigUint::from_str(&s).map_err(serde::de::Error::custom) From 879438b871777a03ef96b0ba819b0b8f5d0bd0a8 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 18 Apr 2024 08:12:02 +0100 Subject: [PATCH 045/186] use try_join_all for efficiency --- mm2src/coins/nft.rs | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/mm2src/coins/nft.rs b/mm2src/coins/nft.rs index 5673274aae..990493a7d9 100644 --- a/mm2src/coins/nft.rs +++ b/mm2src/coins/nft.rs @@ -27,7 +27,6 @@ use crypto::StandardHDCoinAddress; use ethereum_types::{Address, H256}; use futures::compat::Future01CompatExt; use futures::future::try_join_all; -use futures_util::future::join_all; use mm2_err_handle::map_to_mm::MapToMmResult; use mm2_net::transport::send_post_request_to_uri; use mm2_number::BigUint; @@ -137,12 +136,7 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult }) .collect::>(); - let mut nft_lists = vec![]; - for res in join_all(future_list).await { - nft_lists.push(res?); - } - - Ok(nft_lists) + try_join_all(future_list).await } /// Retrieves detailed metadata for a specified NFT. @@ -238,12 +232,7 @@ pub async fn get_nft_transfers(ctx: MmArc, req: NftTransfersReq) -> MmResult>(); - let mut nft_transfers = vec![]; - for res in join_all(future_list).await { - nft_transfers.push(res?); - } - - Ok(nft_transfers) + try_join_all(future_list).await } async fn process_transfers_confirmations( From 234a2aa4744b011ed9f659a219684c9a80fcbcd8 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 19 Apr 2024 01:37:09 +0100 Subject: [PATCH 046/186] finish implement async_sqlite_storage_vw for multi key support --- mm2src/coins/nft.rs | 161 +-- mm2src/coins/nft/nft_errors.rs | 1 + mm2src/coins/nft/nft_structs.rs | 62 +- mm2src/coins/nft/nft_tests.rs | 52 +- mm2src/coins/nft/storage/db_test_helpers.rs | 8 +- mm2src/coins/nft/storage/sql_storage.rs | 1016 ++++++++++--------- mm2src/coins/nft/storage/wasm/nft_idb.rs | 1 + mm2src/mm2_core/src/mm_ctx.rs | 22 +- mm2src/mm2_db/src/indexed_db/indexed_db.rs | 101 +- mm2src/mm2_main/src/lp_native_dex.rs | 7 +- mm2src/mm2_test_helpers/src/for_tests.rs | 4 +- 11 files changed, 764 insertions(+), 671 deletions(-) diff --git a/mm2src/coins/nft.rs b/mm2src/coins/nft.rs index 990493a7d9..f32fb636b4 100644 --- a/mm2src/coins/nft.rs +++ b/mm2src/coins/nft.rs @@ -6,8 +6,7 @@ pub(crate) mod nft_errors; pub mod nft_structs; pub(crate) mod storage; -#[cfg(any(test, target_arch = "wasm32"))] -mod nft_tests; +#[cfg(any(test, target_arch = "wasm32"))] mod nft_tests; use crate::{coin_conf, get_my_address, lp_coinfind_or_err, CoinsContext, MarketCoinOps, MmCoinEnum, MmCoinStruct, MyAddressReq, WithdrawError}; @@ -20,7 +19,10 @@ use crate::eth::{eth_addr_to_hex, get_eth_address, withdraw_erc1155, withdraw_er EthTxFeeDetails}; use crate::nft::nft_errors::{ClearNftDbError, MetaFromUrlError, ProtectFromSpamError, TransferConfirmationsError, UpdateSpamPhishingError}; -use crate::nft::nft_structs::{build_nft_with_empty_meta, BuildNftFields, ClearNftDbReq, NftCommon, NftCtx, NftInfo, NftLists, NftsTransferHistoryLists, NftTransferCommon, PhishingDomainReq, PhishingDomainRes, RefreshMetadataReq, SpamContractReq, SpamContractRes, TransferMeta, TransferStatus, UriMeta}; +use crate::nft::nft_structs::{build_nft_with_empty_meta, BuildNftFields, ClearNftDbReq, NftCommon, NftCtx, NftInfo, + NftLists, NftTransferCommon, NftsTransferHistoryLists, PhishingDomainReq, + PhishingDomainRes, RefreshMetadataReq, SpamContractReq, SpamContractRes, TransferMeta, + TransferStatus, UriMeta}; use crate::nft::storage::{NftListStorageOps, NftTransferHistoryStorageOps}; use common::parse_rfc3339_to_timestamp; use crypto::StandardHDCoinAddress; @@ -86,12 +88,12 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult .map_to_mm(GetNftInfoError::Internal)?; let get_nfts = - |id: String, chains: Vec| -> Pin> + Send>> { + |id: String, chains: Vec| -> Pin> + Send>> { let ctx_clone = ctx.clone(); let req = req.clone(); let res = async move { - let nft_ctx = NftCtx::from_ctx(&ctx_clone, Some(&id)).map_to_mm(GetNftInfoError::Internal)?; + let nft_ctx = NftCtx::from_ctx(&ctx_clone).map_to_mm(GetNftInfoError::Internal)?; let chains = req .chains @@ -99,7 +101,7 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult .into_iter() .filter(|c| chains.contains(c)) .collect::>(); - let storage = nft_ctx.lock_db().await?; + let storage = nft_ctx.lock_db(Some(&id)).await?; for chain in req.chains.iter() { if !NftListStorageOps::is_initialized(&storage, chain).await? { NftListStorageOps::init(&storage, chain).await?; @@ -116,10 +118,7 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult } } - Ok(NftLists { - nft_list, - pubkey: id, - }) + Ok(NftLists { nft_list, pubkey: id }) }; Box::pin(res) @@ -147,9 +146,9 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult pub async fn get_nft_metadata(ctx: MmArc, req: NftMetadataReq) -> MmResult { // TODO: db_id let db_id: Option = None; - let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; + let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; - let storage = nft_ctx.lock_db().await?; + let storage = nft_ctx.lock_db(db_id.as_deref()).await?; if !NftListStorageOps::is_initialized(&storage, &req.chain).await? { NftListStorageOps::init(&storage, &req.chain).await?; } @@ -186,20 +185,25 @@ pub async fn get_nft_metadata(ctx: MmArc, req: NftMetadataReq) -> MmResult MmResult, GetNftInfoError> { +pub async fn get_nft_transfers( + ctx: MmArc, + req: NftTransfersReq, +) -> MmResult, GetNftInfoError> { let db_ids = find_unique_nft_account_ids(&ctx, req.chains.clone()) .await .map_to_mm(GetNftInfoError::Internal)?; let get_nft_transfers = - |db_id: String, chains: Vec| -> Pin> + Send>> { + |db_id: String, + chains: Vec| + -> Pin> + Send>> { let ctx = ctx.clone(); let req = req.clone(); let res = async move { - let nft_ctx = NftCtx::from_ctx(&ctx, Some(&db_id)).map_to_mm(GetNftInfoError::Internal)?; + let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; - let storage = nft_ctx.lock_db().await?; + let storage = nft_ctx.lock_db(Some(&db_id)).await?; for chain in req.chains.iter() { if !NftTransferHistoryStorageOps::is_initialized(&storage, chain).await? { NftTransferHistoryStorageOps::init(&storage, chain).await?; @@ -215,7 +219,10 @@ pub async fn get_nft_transfers(ctx: MmArc, req: NftTransfersReq) -> MmResult { let current_block = current_block_impl(eth_coin).await?; Ok((ticker, current_block)) - } + }, _ => MmError::err(TransferConfirmationsError::CoinDoesntSupportNft { coin: coin_enum.ticker().to_owned(), }), @@ -284,9 +291,9 @@ async fn process_transfers_confirmations( pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNftError> { // TODO: db_id let db_id: Option = None; - let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; + let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; - let storage = nft_ctx.lock_db().await?; + let storage = nft_ctx.lock_db(db_id.as_deref()).await?; for chain in req.chains.iter() { let transfer_history_initialized = NftTransferHistoryStorageOps::is_initialized(&storage, chain).await?; @@ -304,7 +311,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft return MmError::err(UpdateNftError::CoinDoesntSupportNft { coin: coin_enum.ticker().to_owned(), }); - } + }, }; let nft_transfers = get_moralis_nft_transfers(&ctx, chain, from_block, &req.url, eth_coin).await?; storage.add_transfers_to_history(*chain, nft_transfers).await?; @@ -319,7 +326,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft update_spam(&storage, *chain, &req.url_antispam).await?; update_phishing(&storage, chain, &req.url_antispam).await?; continue; - } + }, Err(_) => { // if there is an error, then NFT LIST table doesnt exist, so we need to cache nft list from moralis. NftListStorageOps::init(&storage, chain).await?; @@ -329,7 +336,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft update_spam(&storage, *chain, &req.url_antispam).await?; update_phishing(&storage, chain, &req.url_antispam).await?; continue; - } + }, }; let scanned_block = storage @@ -354,7 +361,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft &req.url, &req.url_antispam, ) - .await?; + .await?; update_nft_global_in_coins_ctx(&ctx, &storage, *chain).await?; update_transfers_with_empty_meta(&storage, chain, &req.url, &req.url_antispam).await?; update_spam(&storage, *chain, &req.url_antispam).await?; @@ -368,17 +375,17 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft /// This function uses the up-to-date NFT list for a given chain and updates the /// corresponding global NFT information in the coins context. async fn update_nft_global_in_coins_ctx(ctx: &MmArc, storage: &T, chain: Chain) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let coins_ctx = CoinsContext::from_ctx(ctx).map_to_mm(UpdateNftError::Internal)?; let mut coins = coins_ctx.coins.lock().await; let ticker = chain.to_nft_ticker(); if let Some(MmCoinStruct { - inner: MmCoinEnum::EthCoin(nft_global), - .. - }) = coins.get_mut(ticker) + inner: MmCoinEnum::EthCoin(nft_global), + .. + }) = coins.get_mut(ticker) { let nft_list = storage.get_nft_list(vec![chain], true, 1, None, None).await?; update_nft_infos(nft_global, nft_list.nfts).await; @@ -422,8 +429,8 @@ async fn update_nft_infos(nft_global: &mut EthCoin, nft_list: Vec) { /// `update_spam` function updates spam contracts info in NFT list and NFT transfers. async fn update_spam(storage: &T, chain: Chain, url_antispam: &Url) -> MmResult<(), UpdateSpamPhishingError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let token_addresses = storage.get_token_addresses(chain).await?; if !token_addresses.is_empty() { @@ -449,8 +456,8 @@ async fn update_spam(storage: &T, chain: Chain, url_antispam: &Url) -> MmResu } async fn update_phishing(storage: &T, chain: &Chain, url_antispam: &Url) -> MmResult<(), UpdateSpamPhishingError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let transfer_domains = storage.get_domains(chain).await?; let nft_domains = storage.get_animation_external_domains(chain).await?; @@ -528,9 +535,9 @@ fn prepare_uri_for_blocklist_endpoint( pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResult<(), UpdateNftError> { // TODO: db_id let db_id: Option = None; - let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; + let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; - let storage = nft_ctx.lock_db().await?; + let storage = nft_ctx.lock_db(db_id.as_deref()).await?; let token_address_str = eth_addr_to_hex(&req.token_address); let moralis_meta = match get_moralis_metadata( token_address_str.clone(), @@ -539,7 +546,7 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu &req.url, &req.url_antispam, ) - .await + .await { Ok(moralis_meta) => moralis_meta, Err(_) => { @@ -550,7 +557,7 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu .update_transfer_spam_by_token_address(&req.chain, token_address_str.clone(), true) .await?; return Ok(()); - } + }, }; let mut nft_db = storage .get_nft(&req.chain, token_address_str.clone(), req.token_id.clone()) @@ -566,7 +573,7 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu moralis_meta.common.metadata.as_deref(), &req.url_antispam, ) - .await; + .await; // Gather domains for phishing checks let domains = gather_domains(&token_domain, &uri_meta); nft_db.common.collection_name = moralis_meta.common.collection_name; @@ -594,8 +601,8 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu /// The `update_transfer_meta_using_nft` function updates the transfer metadata associated with the given NFT. /// If metadata info contains potential spam links, function sets `possible_spam` true. async fn update_transfer_meta_using_nft(storage: &T, chain: &Chain, nft: &mut Nft) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let transfer_meta = TransferMeta::from(nft.clone()); storage @@ -629,8 +636,8 @@ async fn refresh_possible_spam( nft_db: &mut Nft, url_antispam: &Url, ) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let address_hex = eth_addr_to_hex(&nft_db.common.token_address); let spam_res = send_spam_request(chain, url_antispam, address_hex.clone()).await?; @@ -654,8 +661,8 @@ async fn refresh_possible_phishing( nft_db: &mut Nft, url_antispam: &Url, ) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { if !domains.is_empty() { let domain_list = domains.into_iter().collect::>().join(","); @@ -907,9 +914,9 @@ async fn get_fee_details(eth_coin: &EthCoin, transaction_hash: &str) -> Option None, } } @@ -1067,11 +1074,11 @@ async fn handle_nft_transfer handle_send_erc721(storage, chain, transfer).await, (TransferStatus::Receive, ContractType::Erc721) => { handle_receive_erc721(storage, chain, transfer, url, url_antispam, my_address).await - } + }, (TransferStatus::Send, ContractType::Erc1155) => handle_send_erc1155(storage, chain, transfer).await, (TransferStatus::Receive, ContractType::Erc1155) => { handle_receive_erc1155(storage, chain, transfer, url, url_antispam, my_address).await - } + }, } } @@ -1128,7 +1135,7 @@ async fn handle_receive_erc721 { let mut nft = match get_moralis_metadata( token_address_str.clone(), @@ -1137,7 +1144,7 @@ async fn handle_receive_erc721 { // sometimes moralis updates Get All NFTs (which also affects Get Metadata) later @@ -1146,16 +1153,16 @@ async fn handle_receive_erc721 { mark_as_spam_and_build_empty_meta(storage, chain, token_address_str, &transfer, my_address).await? - } + }, }; storage .add_nfts_to_list(*chain, vec![nft.clone()], transfer.block_number) .await?; update_transfer_meta_using_nft(storage, chain, &mut nft).await?; - } + }, } Ok(()) } @@ -1178,19 +1185,19 @@ async fn handle_send_erc1155 { nft_db.common.amount -= transfer.common.amount; storage .update_nft_amount(chain, nft_db.clone(), transfer.block_number) .await?; - } + }, Ordering::Less => { return MmError::err(UpdateNftError::InsufficientAmountInCache { amount_list: nft_db.common.amount.to_string(), amount_history: transfer.common.amount.to_string(), }); - } + }, } Ok(()) } @@ -1220,7 +1227,7 @@ async fn handle_receive_erc1155 { let nft = match get_moralis_metadata( @@ -1230,20 +1237,20 @@ async fn handle_receive_erc1155 { create_nft_from_moralis_metadata(moralis_meta, &transfer, my_address, chain, url_antispam).await? - } + }, Err(_) => { mark_as_spam_and_build_empty_meta(storage, chain, token_address_str, &transfer, my_address).await? - } + }, }; storage .add_nfts_to_list(*chain, [nft.clone()], transfer.block_number) .await?; nft - } + }, }; update_transfer_meta_using_nft(storage, chain, &mut nft).await?; Ok(()) @@ -1263,7 +1270,7 @@ async fn create_nft_from_moralis_metadata( moralis_meta.common.metadata.as_deref(), url_antispam, ) - .await; + .await; let nft = Nft { common: NftCommon { token_address: moralis_meta.common.token_address, @@ -1336,8 +1343,8 @@ async fn cache_nfts_from_moralis(storage: &T, chain: &Chain, nfts: Vec) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { for mut nft in nfts.into_iter() { update_transfer_meta_using_nft(storage, chain, &mut nft).await?; @@ -1352,8 +1359,8 @@ async fn update_transfers_with_empty_meta( url: &Url, url_antispam: &Url, ) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let token_addr_id = storage.get_transfers_with_empty_meta(*chain).await?; for addr_id_pair in token_addr_id.into_iter() { @@ -1364,7 +1371,7 @@ async fn update_transfers_with_empty_meta( url, url_antispam, ) - .await + .await { Ok(nft_meta) => nft_meta, Err(_) => { @@ -1375,7 +1382,7 @@ async fn update_transfers_with_empty_meta( .update_transfer_spam_by_token_address(chain, addr_id_pair.token_address, true) .await?; continue; - } + }, }; update_transfer_meta_using_nft(storage, chain, &mut nft_meta).await?; } @@ -1400,7 +1407,7 @@ fn process_text_for_spam_link(text: &mut Option, redact: bool) -> Result *text = Some("URL redacted for user protection".to_string()); } Ok(true) - } + }, _ => Ok(false), } } @@ -1479,7 +1486,7 @@ fn process_metadata_field( ); } Ok(true) - } + }, _ => Ok(false), } } @@ -1496,7 +1503,7 @@ async fn build_nft_from_moralis( nft_moralis.common.metadata.as_deref(), url_antispam, ) - .await; + .await; let token_domain = get_domain_from_url(token_uri.as_deref()); Nft { common: NftCommon { @@ -1535,8 +1542,8 @@ pub async fn clear_nft_db(ctx: MmArc, req: ClearNftDbReq) -> MmResult<(), ClearN // TODO: db_id let db_id: Option = None; if req.clear_all { - let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(ClearNftDbError::Internal)?; - let storage = nft_ctx.lock_db().await?; + let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(ClearNftDbError::Internal)?; + let storage = nft_ctx.lock_db(db_id.as_deref()).await?; storage.clear_all_nft_data().await?; storage.clear_all_history_data().await?; return Ok(()); @@ -1548,8 +1555,8 @@ pub async fn clear_nft_db(ctx: MmArc, req: ClearNftDbReq) -> MmResult<(), ClearN )); } - let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(ClearNftDbError::Internal)?; - let storage = nft_ctx.lock_db().await?; + let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(ClearNftDbError::Internal)?; + let storage = nft_ctx.lock_db(db_id.as_deref()).await?; let mut errors = Vec::new(); for chain in req.chains.iter() { if let Err(e) = clear_data_for_chain(&storage, chain).await { @@ -1564,8 +1571,8 @@ pub async fn clear_nft_db(ctx: MmArc, req: ClearNftDbReq) -> MmResult<(), ClearN } async fn clear_data_for_chain(storage: &T, chain: &Chain) -> MmResult<(), ClearNftDbError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let (is_nft_list_init, is_history_init) = ( NftListStorageOps::is_initialized(storage, chain).await?, diff --git a/mm2src/coins/nft/nft_errors.rs b/mm2src/coins/nft/nft_errors.rs index 8a6fb90f19..47ea023eee 100644 --- a/mm2src/coins/nft/nft_errors.rs +++ b/mm2src/coins/nft/nft_errors.rs @@ -345,6 +345,7 @@ pub enum LockDBError { /// Errors related to SQL operations in non-WASM environments. #[cfg(not(target_arch = "wasm32"))] SqlError(SqlError), + InternalError(String), } #[cfg(not(target_arch = "wasm32"))] diff --git a/mm2src/coins/nft/nft_structs.rs b/mm2src/coins/nft/nft_structs.rs index edf9954fc8..d0c237de3c 100644 --- a/mm2src/coins/nft/nft_structs.rs +++ b/mm2src/coins/nft/nft_structs.rs @@ -24,6 +24,7 @@ use crate::{TransactionType, TxFeeDetails, WithdrawFee}; cfg_native! { use db_common::async_sql_conn::AsyncConnection; use futures::lock::Mutex as AsyncMutex; + use mm2_core::mm_ctx::{AsyncConnectionArc, log_sqlite_file_open_attempt}; } cfg_wasm32! { @@ -202,8 +203,8 @@ impl FromStr for Chain { /// This implementation will use `FromStr` to deserialize `Chain`. impl<'de> Deserialize<'de> for Chain { fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, + where + D: Deserializer<'de>, { let s = String::deserialize(deserializer)?; s.parse().map_err(de::Error::custom) @@ -395,9 +396,9 @@ pub(crate) struct NftFromMoralis { pub(crate) struct SerdeStringWrap(pub(crate) T); impl<'de, T> Deserialize<'de> for SerdeStringWrap - where - T: std::str::FromStr, - T::Err: std::fmt::Debug + std::fmt::Display, +where + T: std::str::FromStr, + T::Err: std::fmt::Debug + std::fmt::Display, { fn deserialize>(deserializer: D) -> Result { let value: &str = Deserialize::deserialize(deserializer)?; @@ -644,7 +645,6 @@ pub struct NftsTransferHistoryLists { pub(crate) pubkey: String, } - /// Filters that can be applied to the NFT transfer history. /// /// Allows filtering based on transaction type (send/receive), date range, @@ -719,6 +719,8 @@ impl From for TransferMeta { } } } +#[cfg(not(target_arch = "wasm32"))] +pub struct NftCacheDbSql(pub AsyncConnection); /// The primary context for NFT operations within the MM environment. /// @@ -730,8 +732,8 @@ pub(crate) struct NftCtx { #[cfg(target_arch = "wasm32")] pub(crate) nft_cache_db: SharedDb, #[cfg(not(target_arch = "wasm32"))] - pub(crate) nft_cache_db: Arc>, - _db_id: Option, + pub(crate) nft_cache_dbs: Arc>>, + pub(crate) ctx: MmArc, } impl NftCtx { @@ -739,24 +741,25 @@ impl NftCtx { /// /// If an `NftCtx` instance doesn't already exist in the MM context, it gets created and cached for subsequent use. #[cfg(not(target_arch = "wasm32"))] - pub(crate) fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { + pub(crate) fn from_ctx(ctx: &MmArc) -> Result, String> { Ok(try_s!(from_ctx(&ctx.nft_ctx, move || { let async_sqlite_connection = ctx - .async_sqlite_connection + .async_sqlite_connection_v2 .ok_or("async_sqlite_connection is not initialized".to_owned())?; Ok(NftCtx { - nft_cache_db: async_sqlite_connection.clone(), - _db_id: db_id.map(|e| e.to_string()), + nft_cache_dbs: async_sqlite_connection.clone(), + ctx: ctx.clone(), }) }))) } #[cfg(target_arch = "wasm32")] - pub(crate) fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { + pub(crate) fn from_ctx(ctx: &MmArc) -> Result, String> { Ok(try_s!(from_ctx(&ctx.nft_ctx, move || { + let db_id: Option<&str> = None; // TODO Ok(NftCtx { - nft_cache_db: ConstructibleDb::new(ctx, db_id).into_shared(), - _db_id: db_id.map(|e| e.to_string()), + nft_cache_dbs: ConstructibleDb::new(ctx, db_id).into_shared(), + ctx: ctx.clone(), }) }))) } @@ -765,8 +768,27 @@ impl NftCtx { #[cfg(not(target_arch = "wasm32"))] pub(crate) async fn lock_db( &self, + db_id: Option<&str>, ) -> MmResult { - Ok(self.nft_cache_db.lock().await) + let db_id = db_id.map(|s| s.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); + + let mut connections = self.nft_cache_dbs.lock().await; + if let Some(async_conn) = connections.get(&db_id) { + let conn = NftCacheDbSql(async_conn.lock().await.clone()); + Ok(conn) + } else { + let sqlite_file_path = self.ctx.dbdir(Some(&db_id)).join("KOMODEFI.db"); + log_sqlite_file_open_attempt(&sqlite_file_path); + let async_conn = Arc::new(AsyncMutex::new( + AsyncConnection::open(sqlite_file_path) + .await + .map_to_mm(|e| LockDBError::InternalError(e.to_string()))?, + )); + connections.insert(db_id.to_string(), async_conn.clone()); + + let conn = NftCacheDbSql(async_conn.lock().await.clone()); + Ok(conn) + } } #[cfg(target_arch = "wasm32")] @@ -803,16 +825,16 @@ pub(crate) struct PhishingDomainRes { } fn serialize_token_id(token_id: &BigUint, serializer: S) -> Result - where - S: Serializer, +where + S: Serializer, { let token_id_str = token_id.to_string(); serializer.serialize_str(&token_id_str) } fn deserialize_token_id<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, +where + D: Deserializer<'de>, { let s = String::deserialize(deserializer)?; BigUint::from_str(&s).map_err(serde::de::Error::custom) diff --git a/mm2src/coins/nft/nft_tests.rs b/mm2src/coins/nft/nft_tests.rs index 926932ec3f..d9943d61c4 100644 --- a/mm2src/coins/nft/nft_tests.rs +++ b/mm2src/coins/nft/nft_tests.rs @@ -158,7 +158,7 @@ cross_test!(test_camo, { cross_test!(test_add_get_nfts, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -175,7 +175,7 @@ cross_test!(test_add_get_nfts, { cross_test!(test_last_nft_block, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -190,7 +190,7 @@ cross_test!(test_last_nft_block, { cross_test!(test_nft_list, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -210,7 +210,7 @@ cross_test!(test_nft_list, { cross_test!(test_remove_nft, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -235,7 +235,7 @@ cross_test!(test_remove_nft, { cross_test!(test_nft_amount, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let mut nft = nft(); storage @@ -273,7 +273,7 @@ cross_test!(test_nft_amount, { cross_test!(test_refresh_metadata, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let new_symbol = "NEW_SYMBOL"; let mut nft = nft(); @@ -293,7 +293,7 @@ cross_test!(test_refresh_metadata, { cross_test!(test_update_nft_spam_by_token_address, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -314,7 +314,7 @@ cross_test!(test_update_nft_spam_by_token_address, { cross_test!(test_exclude_nft_spam, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -333,7 +333,7 @@ cross_test!(test_exclude_nft_spam, { cross_test!(test_get_animation_external_domains, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -347,7 +347,7 @@ cross_test!(test_get_animation_external_domains, { cross_test!(test_update_nft_phishing_by_domain, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -375,7 +375,7 @@ cross_test!(test_update_nft_phishing_by_domain, { cross_test!(test_exclude_nft_phishing_spam, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -399,7 +399,7 @@ cross_test!(test_exclude_nft_phishing_spam, { cross_test!(test_clear_nft, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft = nft(); storage.add_nfts_to_list(chain, vec![nft], 28056726).await.unwrap(); @@ -411,7 +411,7 @@ cross_test!(test_clear_nft, { cross_test!(test_clear_all_nft, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft = nft(); storage.add_nfts_to_list(chain, vec![nft], 28056726).await.unwrap(); @@ -441,7 +441,7 @@ async fn test_clear_nft_target(storage: &S, chain: &Chain) cross_test!(test_add_get_transfers, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -468,7 +468,7 @@ cross_test!(test_add_get_transfers, { cross_test!(test_last_transfer_block, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -483,7 +483,7 @@ cross_test!(test_last_transfer_block, { cross_test!(test_transfer_history, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -502,7 +502,7 @@ cross_test!(test_transfer_history, { cross_test!(test_transfer_history_filters, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -564,7 +564,7 @@ cross_test!(test_transfer_history_filters, { cross_test!(test_get_update_transfer_meta, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -599,7 +599,7 @@ cross_test!(test_get_update_transfer_meta, { cross_test!(test_update_transfer_spam_by_token_address, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -620,7 +620,7 @@ cross_test!(test_update_transfer_spam_by_token_address, { cross_test!(test_get_token_addresses, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -632,7 +632,7 @@ cross_test!(test_get_token_addresses, { cross_test!(test_exclude_transfer_spam, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -655,7 +655,7 @@ cross_test!(test_exclude_transfer_spam, { cross_test!(test_get_domains, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -669,7 +669,7 @@ cross_test!(test_get_domains, { cross_test!(test_update_transfer_phishing_by_domain, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -697,7 +697,7 @@ cross_test!(test_update_transfer_phishing_by_domain, { cross_test!(test_exclude_transfer_phishing_spam, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -740,7 +740,7 @@ cross_test!(test_exclude_transfer_phishing_spam, { cross_test!(test_clear_history, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -752,7 +752,7 @@ cross_test!(test_clear_history, { cross_test!(test_clear_all_history, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); diff --git a/mm2src/coins/nft/storage/db_test_helpers.rs b/mm2src/coins/nft/storage/db_test_helpers.rs index e188f66c8f..d59b845661 100644 --- a/mm2src/coins/nft/storage/db_test_helpers.rs +++ b/mm2src/coins/nft/storage/db_test_helpers.rs @@ -355,10 +355,8 @@ pub(crate) fn nft_transfer_history() -> Vec { pub(crate) async fn get_nft_ctx(_chain: &Chain) -> Arc { #[cfg(not(target_arch = "wasm32"))] - let ctx = mm_ctx_with_custom_async_db().await; + let ctx = mm_ctx_with_custom_async_db().await; #[cfg(target_arch = "wasm32")] - let ctx = mm_ctx_with_custom_db(); - let nft_ctx = NftCtx::from_ctx(&ctx, None).unwrap(); - - nft_ctx + let ctx = mm_ctx_with_custom_db(); + NftCtx::from_ctx(&ctx).unwrap() } diff --git a/mm2src/coins/nft/storage/sql_storage.rs b/mm2src/coins/nft/storage/sql_storage.rs index 6844b261d9..9229ec1856 100644 --- a/mm2src/coins/nft/storage/sql_storage.rs +++ b/mm2src/coins/nft/storage/sql_storage.rs @@ -1,18 +1,17 @@ use crate::nft::eth_addr_to_hex; -use crate::nft::nft_structs::{Chain, ContractType, ConvertChain, Nft, NftCommon, NftList, NftListFilters, - NftTokenAddrId, NftTransferCommon, NftTransferHistory, NftTransferHistoryFilters, - NftsTransferHistoryList, TransferMeta, UriMeta}; +use crate::nft::nft_structs::{Chain, ContractType, ConvertChain, Nft, NftCacheDbSql, NftCommon, NftList, + NftListFilters, NftTokenAddrId, NftTransferCommon, NftTransferHistory, + NftTransferHistoryFilters, NftsTransferHistoryList, TransferMeta, UriMeta}; use crate::nft::storage::{get_offset_limit, NftDetailsJson, NftListStorageOps, NftStorageError, NftTransferHistoryStorageOps, RemoveNftResult, TransferDetailsJson}; use async_trait::async_trait; -use db_common::async_sql_conn::{AsyncConnError, AsyncConnection}; +use db_common::async_sql_conn::AsyncConnError; use db_common::sql_build::{SqlCondition, SqlQuery}; use db_common::sqlite::rusqlite::types::{FromSqlError, Type}; use db_common::sqlite::rusqlite::{Connection, Error as SqlError, Result as SqlResult, Row, Statement}; use db_common::sqlite::sql_builder::SqlBuilder; use db_common::sqlite::{query_single_row, string_from_row, SafeTableName, CHECK_TABLE_EXISTS_SQL}; use ethereum_types::Address; -use futures::lock::MutexGuard as AsyncMutexGuard; use mm2_err_handle::prelude::*; use mm2_number::{BigDecimal, BigUint}; use serde_json::Value as Json; @@ -547,35 +546,37 @@ fn is_table_empty(conn: &Connection, safe_table_name: SafeTableName) -> Result { +impl NftListStorageOps for NftCacheDbSql { type Error = AsyncConnError; async fn init(&self, chain: &Chain) -> MmResult<(), Self::Error> { let sql_nft_list = create_nft_list_table_sql(chain)?; - self.call(move |conn| { - conn.execute(&sql_nft_list, []).map(|_| ())?; - conn.execute(&create_scanned_nft_blocks_sql()?, []).map(|_| ())?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + conn.execute(&sql_nft_list, []).map(|_| ())?; + conn.execute(&create_scanned_nft_blocks_sql()?, []).map(|_| ())?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn is_initialized(&self, chain: &Chain) -> MmResult { let table_name = chain.nft_list_table_name()?; - self.call(move |conn| { - let nft_list_initialized = - query_single_row(conn, CHECK_TABLE_EXISTS_SQL, [table_name.inner()], string_from_row)?; - let scanned_nft_blocks_initialized = query_single_row( - conn, - CHECK_TABLE_EXISTS_SQL, - [scanned_nft_blocks_table_name()?.inner()], - string_from_row, - )?; - Ok(nft_list_initialized.is_some() && scanned_nft_blocks_initialized.is_some()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let nft_list_initialized = + query_single_row(conn, CHECK_TABLE_EXISTS_SQL, [table_name.inner()], string_from_row)?; + let scanned_nft_blocks_initialized = query_single_row( + conn, + CHECK_TABLE_EXISTS_SQL, + [scanned_nft_blocks_table_name()?.inner()], + string_from_row, + )?; + Ok(nft_list_initialized.is_some() && scanned_nft_blocks_initialized.is_some()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_nft_list( @@ -586,33 +587,34 @@ impl NftListStorageOps for AsyncMutexGuard<'_, AsyncConnection> { page_number: Option, filters: Option, ) -> MmResult { - self.call(move |conn| { - let sql_builder = get_nft_list_builder_preimage(chains, filters)?; - let total_count_builder_sql = sql_builder - .clone() - .count("*") - .sql() - .map_err(|e| SqlError::ToSqlConversionFailure(e.into()))?; - let total: isize = conn - .prepare(&total_count_builder_sql)? - .query_row([], |row| row.get(0))?; - let count_total = total.try_into().expect("count should not be failed"); - - let (offset, limit) = get_offset_limit(max, limit, page_number, count_total); - let sql = finalize_sql_builder(sql_builder, offset, limit)?; - let nfts = conn - .prepare(&sql)? - .query_map([], nft_from_row)? - .collect::, _>>()?; - let result = NftList { - nfts, - skipped: offset, - total: count_total, - }; - Ok(result) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_builder = get_nft_list_builder_preimage(chains, filters)?; + let total_count_builder_sql = sql_builder + .clone() + .count("*") + .sql() + .map_err(|e| SqlError::ToSqlConversionFailure(e.into()))?; + let total: isize = conn + .prepare(&total_count_builder_sql)? + .query_row([], |row| row.get(0))?; + let count_total = total.try_into().expect("count should not be failed"); + + let (offset, limit) = get_offset_limit(max, limit, page_number, count_total); + let sql = finalize_sql_builder(sql_builder, offset, limit)?; + let nfts = conn + .prepare(&sql)? + .query_map([], nft_from_row)? + .collect::, _>>()?; + let result = NftList { + nfts, + skipped: offset, + total: count_total, + }; + Ok(result) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn add_nfts_to_list(&self, chain: Chain, nfts: I, last_scanned_block: u64) -> MmResult<(), Self::Error> @@ -620,55 +622,56 @@ impl NftListStorageOps for AsyncMutexGuard<'_, AsyncConnection> { I: IntoIterator + Send + 'static, I::IntoIter: Send, { - self.call(move |conn| { - let sql_transaction = conn.transaction()?; - - for nft in nfts { - let details_json = NftDetailsJson { - owner_of: nft.common.owner_of, - token_hash: nft.common.token_hash, - minter_address: nft.common.minter_address, - block_number_minted: nft.block_number_minted, - }; - let details_json = json::to_string(&details_json).expect("serialization should not fail"); - let params = [ - Some(eth_addr_to_hex(&nft.common.token_address)), - Some(nft.token_id.to_string()), - Some(nft.chain.to_string()), - Some(nft.common.amount.to_string()), - Some(nft.block_number.to_string()), - Some(nft.contract_type.to_string()), - Some(i32::from(nft.common.possible_spam).to_string()), - Some(i32::from(nft.possible_phishing).to_string()), - nft.common.collection_name, - nft.common.symbol, - nft.common.token_uri, - nft.common.token_domain, - nft.common.metadata, - nft.common.last_token_uri_sync, - nft.common.last_metadata_sync, - nft.uri_meta.raw_image_url, - nft.uri_meta.image_url, - nft.uri_meta.image_domain, - nft.uri_meta.token_name, - nft.uri_meta.description, - nft.uri_meta.attributes.map(|v| v.to_string()), - nft.uri_meta.animation_url, - nft.uri_meta.animation_domain, - nft.uri_meta.external_url, - nft.uri_meta.external_domain, - nft.uri_meta.image_details.map(|v| v.to_string()), - Some(details_json), - ]; - sql_transaction.execute(&insert_nft_in_list_sql(&chain)?, params)?; - } - let scanned_block_params = [chain.to_ticker().to_string(), last_scanned_block.to_string()]; - sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_transaction = conn.transaction()?; + + for nft in nfts { + let details_json = NftDetailsJson { + owner_of: nft.common.owner_of, + token_hash: nft.common.token_hash, + minter_address: nft.common.minter_address, + block_number_minted: nft.block_number_minted, + }; + let details_json = json::to_string(&details_json).expect("serialization should not fail"); + let params = [ + Some(eth_addr_to_hex(&nft.common.token_address)), + Some(nft.token_id.to_string()), + Some(nft.chain.to_string()), + Some(nft.common.amount.to_string()), + Some(nft.block_number.to_string()), + Some(nft.contract_type.to_string()), + Some(i32::from(nft.common.possible_spam).to_string()), + Some(i32::from(nft.possible_phishing).to_string()), + nft.common.collection_name, + nft.common.symbol, + nft.common.token_uri, + nft.common.token_domain, + nft.common.metadata, + nft.common.last_token_uri_sync, + nft.common.last_metadata_sync, + nft.uri_meta.raw_image_url, + nft.uri_meta.image_url, + nft.uri_meta.image_domain, + nft.uri_meta.token_name, + nft.uri_meta.description, + nft.uri_meta.attributes.map(|v| v.to_string()), + nft.uri_meta.animation_url, + nft.uri_meta.animation_domain, + nft.uri_meta.external_url, + nft.uri_meta.external_domain, + nft.uri_meta.image_details.map(|v| v.to_string()), + Some(details_json), + ]; + sql_transaction.execute(&insert_nft_in_list_sql(&chain)?, params)?; + } + let scanned_block_params = [chain.to_ticker().to_string(), last_scanned_block.to_string()]; + sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_nft( @@ -678,17 +681,18 @@ impl NftListStorageOps for AsyncMutexGuard<'_, AsyncConnection> { token_id: BigUint, ) -> MmResult, Self::Error> { let table_name = chain.nft_list_table_name()?; - self.call(move |conn| { - let sql = format!( - "SELECT * FROM {} WHERE token_address=?1 AND token_id=?2", - table_name.inner() - ); - let params = [token_address, token_id.to_string()]; - let nft = query_single_row(conn, &sql, params, nft_from_row)?; - Ok(nft) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql = format!( + "SELECT * FROM {} WHERE token_address=?1 AND token_id=?2", + table_name.inner() + ); + let params = [token_address, token_id.to_string()]; + let nft = query_single_row(conn, &sql, params, nft_from_row)?; + Ok(nft) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn remove_nft_from_list( @@ -702,21 +706,22 @@ impl NftListStorageOps for AsyncMutexGuard<'_, AsyncConnection> { let sql = delete_nft_sql(table_name)?; let params = [token_address, token_id.to_string()]; let scanned_block_params = [chain.to_ticker().to_string(), scanned_block.to_string()]; - self.call(move |conn| { - let sql_transaction = conn.transaction()?; - let rows_num = sql_transaction.execute(&sql, params)?; - - let remove_nft_result = if rows_num > 0 { - RemoveNftResult::NftRemoved - } else { - RemoveNftResult::NftDidNotExist - }; - sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; - sql_transaction.commit()?; - Ok(remove_nft_result) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_transaction = conn.transaction()?; + let rows_num = sql_transaction.execute(&sql, params)?; + + let remove_nft_result = if rows_num > 0 { + RemoveNftResult::NftRemoved + } else { + RemoveNftResult::NftDidNotExist + }; + sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; + sql_transaction.commit()?; + Ok(remove_nft_result) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_nft_amount( @@ -731,74 +736,78 @@ impl NftListStorageOps for AsyncMutexGuard<'_, AsyncConnection> { table_name.inner() ); let params = [token_address, token_id.to_string()]; - self.call(move |conn| { - let amount = query_single_row(conn, &sql, params, nft_amount_from_row)?; - Ok(amount) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let amount = query_single_row(conn, &sql, params, nft_amount_from_row)?; + Ok(amount) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn refresh_nft_metadata(&self, chain: &Chain, nft: Nft) -> MmResult<(), Self::Error> { let sql = refresh_nft_metadata_sql(chain)?; - self.call(move |conn| { - let sql_transaction = conn.transaction()?; - let params = [ - Some(i32::from(nft.common.possible_spam).to_string()), - Some(i32::from(nft.possible_phishing).to_string()), - nft.common.collection_name, - nft.common.symbol, - nft.common.token_uri, - nft.common.token_domain, - nft.common.metadata, - nft.common.last_token_uri_sync, - nft.common.last_metadata_sync, - nft.uri_meta.raw_image_url, - nft.uri_meta.image_url, - nft.uri_meta.image_domain, - nft.uri_meta.token_name, - nft.uri_meta.description, - nft.uri_meta.attributes.map(|v| v.to_string()), - nft.uri_meta.animation_url, - nft.uri_meta.animation_domain, - nft.uri_meta.external_url, - nft.uri_meta.external_domain, - nft.uri_meta.image_details.map(|v| v.to_string()), - Some(eth_addr_to_hex(&nft.common.token_address)), - Some(nft.token_id.to_string()), - ]; - sql_transaction.execute(&sql, params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_transaction = conn.transaction()?; + let params = [ + Some(i32::from(nft.common.possible_spam).to_string()), + Some(i32::from(nft.possible_phishing).to_string()), + nft.common.collection_name, + nft.common.symbol, + nft.common.token_uri, + nft.common.token_domain, + nft.common.metadata, + nft.common.last_token_uri_sync, + nft.common.last_metadata_sync, + nft.uri_meta.raw_image_url, + nft.uri_meta.image_url, + nft.uri_meta.image_domain, + nft.uri_meta.token_name, + nft.uri_meta.description, + nft.uri_meta.attributes.map(|v| v.to_string()), + nft.uri_meta.animation_url, + nft.uri_meta.animation_domain, + nft.uri_meta.external_url, + nft.uri_meta.external_domain, + nft.uri_meta.image_details.map(|v| v.to_string()), + Some(eth_addr_to_hex(&nft.common.token_address)), + Some(nft.token_id.to_string()), + ]; + sql_transaction.execute(&sql, params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_last_block_number(&self, chain: &Chain) -> MmResult, Self::Error> { let table_name = chain.nft_list_table_name()?; let sql = select_last_block_number_sql(table_name)?; - self.call(move |conn| { - let block_number = query_single_row(conn, &sql, [], block_number_from_row)?; - Ok(block_number) - }) - .await? - .map(|b| b.try_into()) - .transpose() - .map_to_mm(|e| AsyncConnError::Rusqlite(SqlError::FromSqlConversionFailure(2, Type::Integer, Box::new(e)))) + self.0 + .call(move |conn| { + let block_number = query_single_row(conn, &sql, [], block_number_from_row)?; + Ok(block_number) + }) + .await? + .map(|b| b.try_into()) + .transpose() + .map_to_mm(|e| AsyncConnError::Rusqlite(SqlError::FromSqlConversionFailure(2, Type::Integer, Box::new(e)))) } async fn get_last_scanned_block(&self, chain: &Chain) -> MmResult, Self::Error> { let sql = select_last_scanned_block_sql()?; let params = [chain.to_ticker()]; - self.call(move |conn| { - let block_number = query_single_row(conn, &sql, params, block_number_from_row)?; - Ok(block_number) - }) - .await? - .map(|b| b.try_into()) - .transpose() - .map_to_mm(|e| AsyncConnError::Rusqlite(SqlError::FromSqlConversionFailure(2, Type::Integer, Box::new(e)))) + self.0 + .call(move |conn| { + let block_number = query_single_row(conn, &sql, params, block_number_from_row)?; + Ok(block_number) + }) + .await? + .map(|b| b.try_into()) + .transpose() + .map_to_mm(|e| AsyncConnError::Rusqlite(SqlError::FromSqlConversionFailure(2, Type::Integer, Box::new(e)))) } async fn update_nft_amount(&self, chain: &Chain, nft: Nft, scanned_block: u64) -> MmResult<(), Self::Error> { @@ -808,20 +817,21 @@ impl NftListStorageOps for AsyncMutexGuard<'_, AsyncConnection> { table_name.inner() ); let scanned_block_params = [chain.to_ticker().to_string(), scanned_block.to_string()]; - self.call(move |conn| { - let sql_transaction = conn.transaction()?; - let params = [ - Some(nft.common.amount.to_string()), - Some(eth_addr_to_hex(&nft.common.token_address)), - Some(nft.token_id.to_string()), - ]; - sql_transaction.execute(&sql, params)?; - sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_transaction = conn.transaction()?; + let params = [ + Some(nft.common.amount.to_string()), + Some(eth_addr_to_hex(&nft.common.token_address)), + Some(nft.token_id.to_string()), + ]; + sql_transaction.execute(&sql, params)?; + sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn update_nft_amount_and_block_number(&self, chain: &Chain, nft: Nft) -> MmResult<(), Self::Error> { @@ -831,34 +841,36 @@ impl NftListStorageOps for AsyncMutexGuard<'_, AsyncConnection> { table_name.inner() ); let scanned_block_params = [chain.to_ticker().to_string(), nft.block_number.to_string()]; - self.call(move |conn| { - let sql_transaction = conn.transaction()?; - let params = [ - Some(nft.common.amount.to_string()), - Some(nft.block_number.to_string()), - Some(eth_addr_to_hex(&nft.common.token_address)), - Some(nft.token_id.to_string()), - ]; - sql_transaction.execute(&sql, params)?; - sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_transaction = conn.transaction()?; + let params = [ + Some(nft.common.amount.to_string()), + Some(nft.block_number.to_string()), + Some(eth_addr_to_hex(&nft.common.token_address)), + Some(nft.token_id.to_string()), + ]; + sql_transaction.execute(&sql, params)?; + sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_nfts_by_token_address(&self, chain: Chain, token_address: String) -> MmResult, Self::Error> { - self.call(move |conn| { - let table_name = chain.nft_list_table_name()?; - let mut stmt = get_nfts_by_token_address_statement(conn, table_name)?; - let nfts = stmt - .query_map([token_address], nft_from_row)? - .collect::, _>>()?; - Ok(nfts) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let table_name = chain.nft_list_table_name()?; + let mut stmt = get_nfts_by_token_address_statement(conn, table_name)?; + let nfts = stmt + .query_map([token_address], nft_from_row)? + .collect::, _>>()?; + Ok(nfts) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn update_nft_spam_by_token_address( @@ -872,34 +884,36 @@ impl NftListStorageOps for AsyncMutexGuard<'_, AsyncConnection> { "UPDATE {} SET possible_spam = ?1 WHERE token_address = ?2;", table_name.inner() ); - self.call(move |conn| { - let sql_transaction = conn.transaction()?; - let params = [Some(i32::from(possible_spam).to_string()), Some(token_address.clone())]; - sql_transaction.execute(&sql, params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_transaction = conn.transaction()?; + let params = [Some(i32::from(possible_spam).to_string()), Some(token_address.clone())]; + sql_transaction.execute(&sql, params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_animation_external_domains(&self, chain: &Chain) -> MmResult, Self::Error> { let safe_table_name = chain.nft_list_table_name()?; - self.call(move |conn| { - let table_name = safe_table_name.inner(); - let sql_query = format!( - "SELECT DISTINCT animation_domain FROM {} UNION SELECT DISTINCT external_domain FROM {}", - table_name, table_name - ); - let mut stmt = conn.prepare(&sql_query)?; - let domains = stmt - .query_map([], |row| row.get::<_, Option>(0))? - .collect::, _>>()?; - let domains = domains.into_iter().flatten().collect(); - Ok(domains) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let table_name = safe_table_name.inner(); + let sql_query = format!( + "SELECT DISTINCT animation_domain FROM {} UNION SELECT DISTINCT external_domain FROM {}", + table_name, table_name + ); + let mut stmt = conn.prepare(&sql_query)?; + let domains = stmt + .query_map([], |row| row.get::<_, Option>(0))? + .collect::, _>>()?; + let domains = domains.into_iter().flatten().collect(); + Ok(domains) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn update_nft_phishing_by_domain( @@ -914,15 +928,16 @@ impl NftListStorageOps for AsyncMutexGuard<'_, AsyncConnection> { OR image_domain = ?2 OR animation_domain = ?2 OR external_domain = ?2;", table_name.inner() ); - self.call(move |conn| { - let sql_transaction = conn.transaction()?; - let params = [Some(i32::from(possible_phishing).to_string()), Some(domain)]; - sql_transaction.execute(&sql, params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_transaction = conn.transaction()?; + let params = [Some(i32::from(possible_phishing).to_string()), Some(domain)]; + sql_transaction.execute(&sql, params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn clear_nft_data(&self, chain: &Chain) -> MmResult<(), Self::Error> { @@ -931,61 +946,65 @@ impl NftListStorageOps for AsyncMutexGuard<'_, AsyncConnection> { let table_scanned_blocks = scanned_nft_blocks_table_name()?; let sql_scanned_block = format!("DELETE from {} where chain=?1", table_scanned_blocks.inner()); let scanned_block_param = [chain.to_ticker()]; - self.call(move |conn| { - let sql_transaction = conn.transaction()?; - sql_transaction.execute(&sql_nft, [])?; - sql_transaction.execute(&sql_scanned_block, scanned_block_param)?; - sql_transaction.commit()?; - if is_table_empty(conn, table_scanned_blocks.clone())? { - conn.execute(&format!("DROP TABLE IF EXISTS {};", table_scanned_blocks.inner()), []) - .map(|_| ())?; - } - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_transaction = conn.transaction()?; + sql_transaction.execute(&sql_nft, [])?; + sql_transaction.execute(&sql_scanned_block, scanned_block_param)?; + sql_transaction.commit()?; + if is_table_empty(conn, table_scanned_blocks.clone())? { + conn.execute(&format!("DROP TABLE IF EXISTS {};", table_scanned_blocks.inner()), []) + .map(|_| ())?; + } + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn clear_all_nft_data(&self) -> MmResult<(), Self::Error> { - self.call(move |conn| { - let sql_transaction = conn.transaction()?; - for chain in Chain::variant_list().into_iter() { - let table_name = chain.nft_list_table_name()?; - sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_name.inner()), [])?; - } - let table_scanned_blocks = scanned_nft_blocks_table_name()?; - sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_scanned_blocks.inner()), [])?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_transaction = conn.transaction()?; + for chain in Chain::variant_list().into_iter() { + let table_name = chain.nft_list_table_name()?; + sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_name.inner()), [])?; + } + let table_scanned_blocks = scanned_nft_blocks_table_name()?; + sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_scanned_blocks.inner()), [])?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } } #[async_trait] -impl NftTransferHistoryStorageOps for AsyncMutexGuard<'_, AsyncConnection> { +impl NftTransferHistoryStorageOps for NftCacheDbSql { type Error = AsyncConnError; async fn init(&self, chain: &Chain) -> MmResult<(), Self::Error> { let sql_transfer_history = create_transfer_history_table_sql(chain)?; - self.call(move |conn| { - conn.execute(&sql_transfer_history, []).map(|_| ())?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + conn.execute(&sql_transfer_history, []).map(|_| ())?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn is_initialized(&self, chain: &Chain) -> MmResult { let table_name = chain.transfer_history_table_name()?; - self.call(move |conn| { - let nft_list_initialized = - query_single_row(conn, CHECK_TABLE_EXISTS_SQL, [table_name.inner()], string_from_row)?; - Ok(nft_list_initialized.is_some()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let nft_list_initialized = + query_single_row(conn, CHECK_TABLE_EXISTS_SQL, [table_name.inner()], string_from_row)?; + Ok(nft_list_initialized.is_some()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_transfer_history( @@ -996,33 +1015,34 @@ impl NftTransferHistoryStorageOps for AsyncMutexGuard<'_, AsyncConnection> { page_number: Option, filters: Option, ) -> MmResult { - self.call(move |conn| { - let sql_builder = get_nft_transfer_builder_preimage(chains, filters)?; - let total_count_builder_sql = sql_builder - .clone() - .count("*") - .sql() - .map_err(|e| SqlError::ToSqlConversionFailure(e.into()))?; - let total: isize = conn - .prepare(&total_count_builder_sql)? - .query_row([], |row| row.get(0))?; - let count_total = total.try_into().expect("count should not be failed"); - - let (offset, limit) = get_offset_limit(max, limit, page_number, count_total); - let sql = finalize_sql_builder(sql_builder, offset, limit)?; - let transfers = conn - .prepare(&sql)? - .query_map([], transfer_history_from_row)? - .collect::, _>>()?; - let result = NftsTransferHistoryList { - transfer_history: transfers, - skipped: offset, - total: count_total, - }; - Ok(result) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_builder = get_nft_transfer_builder_preimage(chains, filters)?; + let total_count_builder_sql = sql_builder + .clone() + .count("*") + .sql() + .map_err(|e| SqlError::ToSqlConversionFailure(e.into()))?; + let total: isize = conn + .prepare(&total_count_builder_sql)? + .query_row([], |row| row.get(0))?; + let count_total = total.try_into().expect("count should not be failed"); + + let (offset, limit) = get_offset_limit(max, limit, page_number, count_total); + let sql = finalize_sql_builder(sql_builder, offset, limit)?; + let transfers = conn + .prepare(&sql)? + .query_map([], transfer_history_from_row)? + .collect::, _>>()?; + let result = NftsTransferHistoryList { + transfer_history: transfers, + skipped: offset, + total: count_total, + }; + Ok(result) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn add_transfers_to_history(&self, chain: Chain, transfers: I) -> MmResult<(), Self::Error> @@ -1030,62 +1050,64 @@ impl NftTransferHistoryStorageOps for AsyncMutexGuard<'_, AsyncConnection> { I: IntoIterator + Send + 'static, I::IntoIter: Send, { - self.call(move |conn| { - let sql_transaction = conn.transaction()?; - for transfer in transfers { - let details_json = TransferDetailsJson { - block_hash: transfer.common.block_hash, - transaction_index: transfer.common.transaction_index, - value: transfer.common.value, - transaction_type: transfer.common.transaction_type, - verified: transfer.common.verified, - operator: transfer.common.operator, - from_address: transfer.common.from_address, - to_address: transfer.common.from_address, - fee_details: transfer.fee_details, - }; - let transfer_json = json::to_string(&details_json).expect("serialization should not fail"); - let params = [ - Some(transfer.common.transaction_hash), - Some(transfer.common.log_index.to_string()), - Some(transfer.chain.to_string()), - Some(transfer.block_number.to_string()), - Some(transfer.block_timestamp.to_string()), - Some(transfer.contract_type.to_string()), - Some(eth_addr_to_hex(&transfer.common.token_address)), - Some(transfer.token_id.to_string()), - Some(transfer.status.to_string()), - Some(transfer.common.amount.to_string()), - transfer.token_uri, - transfer.token_domain, - transfer.collection_name, - transfer.image_url, - transfer.image_domain, - transfer.token_name, - Some(i32::from(transfer.common.possible_spam).to_string()), - Some(i32::from(transfer.possible_phishing).to_string()), - Some(transfer_json), - ]; - sql_transaction.execute(&insert_transfer_in_history_sql(&chain)?, params)?; - } - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_transaction = conn.transaction()?; + for transfer in transfers { + let details_json = TransferDetailsJson { + block_hash: transfer.common.block_hash, + transaction_index: transfer.common.transaction_index, + value: transfer.common.value, + transaction_type: transfer.common.transaction_type, + verified: transfer.common.verified, + operator: transfer.common.operator, + from_address: transfer.common.from_address, + to_address: transfer.common.from_address, + fee_details: transfer.fee_details, + }; + let transfer_json = json::to_string(&details_json).expect("serialization should not fail"); + let params = [ + Some(transfer.common.transaction_hash), + Some(transfer.common.log_index.to_string()), + Some(transfer.chain.to_string()), + Some(transfer.block_number.to_string()), + Some(transfer.block_timestamp.to_string()), + Some(transfer.contract_type.to_string()), + Some(eth_addr_to_hex(&transfer.common.token_address)), + Some(transfer.token_id.to_string()), + Some(transfer.status.to_string()), + Some(transfer.common.amount.to_string()), + transfer.token_uri, + transfer.token_domain, + transfer.collection_name, + transfer.image_url, + transfer.image_domain, + transfer.token_name, + Some(i32::from(transfer.common.possible_spam).to_string()), + Some(i32::from(transfer.possible_phishing).to_string()), + Some(transfer_json), + ]; + sql_transaction.execute(&insert_transfer_in_history_sql(&chain)?, params)?; + } + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_last_block_number(&self, chain: &Chain) -> MmResult, Self::Error> { let table_name = chain.transfer_history_table_name()?; let sql = select_last_block_number_sql(table_name)?; - self.call(move |conn| { - let block_number = query_single_row(conn, &sql, [], block_number_from_row)?; - Ok(block_number) - }) - .await? - .map(|b| b.try_into()) - .transpose() - .map_to_mm(|e| AsyncConnError::Rusqlite(SqlError::FromSqlConversionFailure(2, Type::Integer, Box::new(e)))) + self.0 + .call(move |conn| { + let block_number = query_single_row(conn, &sql, [], block_number_from_row)?; + Ok(block_number) + }) + .await? + .map(|b| b.try_into()) + .transpose() + .map_to_mm(|e| AsyncConnError::Rusqlite(SqlError::FromSqlConversionFailure(2, Type::Integer, Box::new(e)))) } async fn get_transfers_from_block( @@ -1093,15 +1115,16 @@ impl NftTransferHistoryStorageOps for AsyncMutexGuard<'_, AsyncConnection> { chain: Chain, from_block: u64, ) -> MmResult, Self::Error> { - self.call(move |conn| { - let mut stmt = get_transfers_from_block_statement(conn, &chain)?; - let transfers = stmt - .query_map([from_block], transfer_history_from_row)? - .collect::, _>>()?; - Ok(transfers) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let mut stmt = get_transfers_from_block_statement(conn, &chain)?; + let transfers = stmt + .query_map([from_block], transfer_history_from_row)? + .collect::, _>>()?; + Ok(transfers) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_transfers_by_token_addr_id( @@ -1110,15 +1133,16 @@ impl NftTransferHistoryStorageOps for AsyncMutexGuard<'_, AsyncConnection> { token_address: String, token_id: BigUint, ) -> MmResult, Self::Error> { - self.call(move |conn| { - let mut stmt = get_transfers_by_token_addr_id_statement(conn, chain)?; - let transfers = stmt - .query_map([token_address, token_id.to_string()], transfer_history_from_row)? - .collect::, _>>()?; - Ok(transfers) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let mut stmt = get_transfers_by_token_addr_id_statement(conn, chain)?; + let transfers = stmt + .query_map([token_address, token_id.to_string()], transfer_history_from_row)? + .collect::, _>>()?; + Ok(transfers) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_transfer_by_tx_hash_and_log_index( @@ -1132,17 +1156,18 @@ impl NftTransferHistoryStorageOps for AsyncMutexGuard<'_, AsyncConnection> { "SELECT * FROM {} WHERE transaction_hash=?1 AND log_index = ?2", table_name.inner() ); - self.call(move |conn| { - let transfer = query_single_row( - conn, - &sql, - [transaction_hash, log_index.to_string()], - transfer_history_from_row, - )?; - Ok(transfer) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let transfer = query_single_row( + conn, + &sql, + [transaction_hash, log_index.to_string()], + transfer_history_from_row, + )?; + Ok(transfer) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn update_transfers_meta_by_token_addr_id( @@ -1168,27 +1193,29 @@ impl NftTransferHistoryStorageOps for AsyncMutexGuard<'_, AsyncConnection> { Some(transfer_meta.token_address), Some(transfer_meta.token_id.to_string()), ]; - self.call(move |conn| { - let sql_transaction = conn.transaction()?; - sql_transaction.execute(&sql, params)?; - if set_spam { - sql_transaction.execute(&sql_spam, params_spam)?; - } - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_transaction = conn.transaction()?; + sql_transaction.execute(&sql, params)?; + if set_spam { + sql_transaction.execute(&sql_spam, params_spam)?; + } + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_transfers_with_empty_meta(&self, chain: Chain) -> MmResult, Self::Error> { - self.call(move |conn| { - let sql_builder = get_transfers_with_empty_meta_builder(conn, &chain)?; - let token_addr_id_pair = sql_builder.query(token_address_id_from_row)?; - Ok(token_addr_id_pair) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_builder = get_transfers_with_empty_meta_builder(conn, &chain)?; + let token_addr_id_pair = sql_builder.query(token_address_id_from_row)?; + Ok(token_addr_id_pair) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_transfers_by_token_address( @@ -1196,16 +1223,17 @@ impl NftTransferHistoryStorageOps for AsyncMutexGuard<'_, AsyncConnection> { chain: Chain, token_address: String, ) -> MmResult, Self::Error> { - self.call(move |conn| { - let table_name = chain.transfer_history_table_name()?; - let mut stmt = get_nfts_by_token_address_statement(conn, table_name)?; - let transfers = stmt - .query_map([token_address], transfer_history_from_row)? - .collect::, _>>()?; - Ok(transfers) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let table_name = chain.transfer_history_table_name()?; + let mut stmt = get_nfts_by_token_address_statement(conn, table_name)?; + let transfers = stmt + .query_map([token_address], transfer_history_from_row)? + .collect::, _>>()?; + Ok(transfers) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn update_transfer_spam_by_token_address( @@ -1219,47 +1247,50 @@ impl NftTransferHistoryStorageOps for AsyncMutexGuard<'_, AsyncConnection> { "UPDATE {} SET possible_spam = ?1 WHERE token_address = ?2;", table_name.inner() ); - self.call(move |conn| { - let sql_transaction = conn.transaction()?; - let params = [Some(i32::from(possible_spam).to_string()), Some(token_address.clone())]; - sql_transaction.execute(&sql, params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_transaction = conn.transaction()?; + let params = [Some(i32::from(possible_spam).to_string()), Some(token_address.clone())]; + sql_transaction.execute(&sql, params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_token_addresses(&self, chain: Chain) -> MmResult, Self::Error> { - self.call(move |conn| { - let table_name = chain.transfer_history_table_name()?; - let mut stmt = get_token_addresses_statement(conn, table_name)?; - let addresses = stmt - .query_map([], address_from_row)? - .collect::, _>>()?; - Ok(addresses) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let table_name = chain.transfer_history_table_name()?; + let mut stmt = get_token_addresses_statement(conn, table_name)?; + let addresses = stmt + .query_map([], address_from_row)? + .collect::, _>>()?; + Ok(addresses) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_domains(&self, chain: &Chain) -> MmResult, Self::Error> { let safe_table_name = chain.transfer_history_table_name()?; - self.call(move |conn| { - let table_name = safe_table_name.inner(); - let sql_query = format!( - "SELECT DISTINCT token_domain FROM {} UNION SELECT DISTINCT image_domain FROM {}", - table_name, table_name - ); - let mut stmt = conn.prepare(&sql_query)?; - let domains = stmt - .query_map([], |row| row.get::<_, Option>(0))? - .collect::, _>>()?; - let domains = domains.into_iter().flatten().collect(); - Ok(domains) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let table_name = safe_table_name.inner(); + let sql_query = format!( + "SELECT DISTINCT token_domain FROM {} UNION SELECT DISTINCT image_domain FROM {}", + table_name, table_name + ); + let mut stmt = conn.prepare(&sql_query)?; + let domains = stmt + .query_map([], |row| row.get::<_, Option>(0))? + .collect::, _>>()?; + let domains = domains.into_iter().flatten().collect(); + Ok(domains) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn update_transfer_phishing_by_domain( @@ -1273,40 +1304,43 @@ impl NftTransferHistoryStorageOps for AsyncMutexGuard<'_, AsyncConnection> { "UPDATE {} SET possible_phishing = ?1 WHERE token_domain = ?2 OR image_domain = ?2;", safe_table_name.inner() ); - self.call(move |conn| { - let sql_transaction = conn.transaction()?; - let params = [Some(i32::from(possible_phishing).to_string()), Some(domain)]; - sql_transaction.execute(&sql, params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_transaction = conn.transaction()?; + let params = [Some(i32::from(possible_phishing).to_string()), Some(domain)]; + sql_transaction.execute(&sql, params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn clear_history_data(&self, chain: &Chain) -> MmResult<(), Self::Error> { let table_name = chain.transfer_history_table_name()?; - self.call(move |conn| { - let sql_transaction = conn.transaction()?; - sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_name.inner()), [])?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_transaction = conn.transaction()?; + sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_name.inner()), [])?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn clear_all_history_data(&self) -> MmResult<(), Self::Error> { - self.call(move |conn| { - let sql_transaction = conn.transaction()?; - for chain in Chain::variant_list().into_iter() { - let table_name = chain.transfer_history_table_name()?; - sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_name.inner()), [])?; - } - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.0 + .call(move |conn| { + let sql_transaction = conn.transaction()?; + for chain in Chain::variant_list().into_iter() { + let table_name = chain.transfer_history_table_name()?; + sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_name.inner()), [])?; + } + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } } diff --git a/mm2src/coins/nft/storage/wasm/nft_idb.rs b/mm2src/coins/nft/storage/wasm/nft_idb.rs index 054f1c058e..8e78be3460 100644 --- a/mm2src/coins/nft/storage/wasm/nft_idb.rs +++ b/mm2src/coins/nft/storage/wasm/nft_idb.rs @@ -33,6 +33,7 @@ impl DbInstance for NftCacheIDB { .with_table::() .build() .await?; + Ok(NftCacheIDB { inner }) } } diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 8768b63002..1a283dc717 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -40,6 +40,7 @@ cfg_native! { /// Default interval to export and record metrics to log. const EXPORT_METRICS_INTERVAL: f64 = 5. * 60.; +pub type AsyncConnectionArc = Arc>; /// MarketMaker state, shared between the various MarketMaker threads. /// /// Every MarketMaker has one and only one instance of `MmCtx`. @@ -138,9 +139,9 @@ pub struct MmCtx { pub nft_ctx: Mutex>>, /// asynchronous handle for rusqlite connection. #[cfg(not(target_arch = "wasm32"))] - pub async_sqlite_connection: Constructible>>, + pub async_sqlite_connection: Constructible, #[cfg(not(target_arch = "wasm32"))] - pub async_sqlite_connection_ctx: Mutex>>, + pub async_sqlite_connection_v2: Constructible>>>, } impl MmCtx { @@ -191,7 +192,7 @@ impl MmCtx { #[cfg(not(target_arch = "wasm32"))] async_sqlite_connection: Constructible::default(), #[cfg(not(target_arch = "wasm32"))] - async_sqlite_connection_ctx: Mutex::new(None), + async_sqlite_connection_v2: Constructible::default(), } } @@ -381,6 +382,19 @@ impl MmCtx { Ok(()) } + #[cfg(not(target_Arch = "wasm32"))] + pub async fn init_async_sqlite_connection_v2(&self, db_id: Option<&str>) -> Result<(), String> { + let sqlite_file_path = self.dbdir(db_id).join("KOMODEFI.db"); + log_sqlite_file_open_attempt(&sqlite_file_path); + let async_conn = try_s!(AsyncConnection::open(sqlite_file_path).await); + + let mut store = HashMap::new(); + store.insert(self.rmd160_hex(), Arc::new(AsyncMutex::new(async_conn))); + try_s!(self.async_sqlite_connection_v2.pin(Arc::new(AsyncMutex::new(store)))); + + Ok(()) + } + #[cfg(not(target_arch = "wasm32"))] pub fn sqlite_conn_opt(&self) -> Option> { self.sqlite_connection.as_option().map(|conn| conn.lock().unwrap()) @@ -777,7 +791,7 @@ impl MmCtxBuilder { } #[cfg(not(target_arch = "wasm32"))] -pub(super) fn log_sqlite_file_open_attempt(sqlite_file_path: &Path) { +pub fn log_sqlite_file_open_attempt(sqlite_file_path: &Path) { match sqlite_file_path.canonicalize() { Ok(absolute_path) => { log::debug!("Trying to open SQLite database file {}", absolute_path.display()); diff --git a/mm2src/mm2_db/src/indexed_db/indexed_db.rs b/mm2src/mm2_db/src/indexed_db/indexed_db.rs index 9dbdc7391c..6883113c30 100644 --- a/mm2src/mm2_db/src/indexed_db/indexed_db.rs +++ b/mm2src/mm2_db/src/indexed_db/indexed_db.rs @@ -34,8 +34,8 @@ macro_rules! try_serialize_index_value { return MmError::err(DbTransactionError::ErrorSerializingIndex { index: $index.to_owned(), description: ser_err.to_string(), - }) - }, + }); + } } }}; } @@ -111,12 +111,15 @@ impl DbIdentifier { } pub fn display_db_id(&self) -> String { self.db_id.clone().unwrap_or_else(|| "KOMODEFI".to_string()) } + + pub fn db_id(&self) -> String { self.db_id.clone().unwrap_or_else(|| hex::encode(H160::default().as_slice())) } } pub struct IndexedDbBuilder { pub db_name: String, pub db_version: u32, pub tables: HashMap, + pub db_id: String, } impl IndexedDbBuilder { @@ -125,6 +128,7 @@ impl IndexedDbBuilder { db_name: db_id.to_string(), db_version: 1, tables: HashMap::new(), + db_id: db_id.db_id(), } } @@ -140,12 +144,13 @@ impl IndexedDbBuilder { } pub async fn build(self) -> InitDbResult { + let db_id = self.db_id.clone(); let (init_tx, init_rx) = oneshot::channel(); let (event_tx, event_rx) = mpsc::unbounded(); self.init_and_spawn(init_tx, event_rx); init_rx.await.expect("The init channel must not be closed")?; - Ok(IndexedDb { event_tx }) + Ok(IndexedDb { event_tx, db_id }) } fn init_and_spawn( @@ -165,7 +170,7 @@ impl IndexedDbBuilder { // ignore if the receiver is closed let _res = init_tx.send(Err(e)); return; - }, + } }; // ignore if the receiver is closed @@ -181,6 +186,7 @@ impl IndexedDbBuilder { pub struct IndexedDb { event_tx: DbEventTx, + db_id: String, } async fn send_event_recv_response( @@ -188,8 +194,8 @@ async fn send_event_recv_response( event: Event, result_rx: oneshot::Receiver>, ) -> MmResult -where - Error: WithInternal + NotMmError, + where + Error: WithInternal + NotMmError, { if let Err(e) = event_tx.unbounded_send(event) { return MmError::err(Error::internal(format!("Error sending event: {}", e))); @@ -226,7 +232,7 @@ impl IndexedDb { // ignore if the receiver is closed result_tx.send(Err(e)).ok(); return; - }, + } }; let (transaction_event_tx, transaction_event_rx) = mpsc::unbounded(); @@ -238,6 +244,10 @@ impl IndexedDb { // ignore if the receiver is closed result_tx.send(Ok(transaction_event_tx)).ok(); } + + pub fn get_db_id(&self) -> String { + self.db_id.to_string() + } } pub struct DbTransaction<'transaction> { @@ -273,10 +283,10 @@ impl DbTransaction<'_> { match event { internal::DbTransactionEvent::OpenTable { table_name, result_tx } => { Self::open_table(&transaction, table_name, result_tx) - }, + } internal::DbTransactionEvent::IsAborted { result_tx } => { result_tx.send(Ok(transaction.aborted())).ok(); - }, + } } } } @@ -292,7 +302,7 @@ impl DbTransaction<'_> { // ignore if the receiver is closed result_tx.send(Err(e)).ok(); return; - }, + } }; let (table_event_tx, table_event_rx) = mpsc::unbounded(); @@ -323,6 +333,7 @@ impl AddOrIgnoreResult { } } } + impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { /// Adds the given item to the table. /// https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/add @@ -344,8 +355,8 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { index_value: Value, item: &Table, ) -> DbTransactionResult - where - Value: Serialize, + where + Value: Serialize, { let ids = self.get_item_ids(index, index_value).await?; match ids.len() { @@ -377,8 +388,8 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { /// * `index` - the name of a corresponding `Table`'s field by which records will be searched. /// * `index_value` - the value of the `index`, therefore the value of a corresponding `Table`'s field. pub async fn get_items(&self, index: &str, index_value: Value) -> DbTransactionResult> - where - Value: Serialize, + where + Value: Serialize, { let (result_tx, result_rx) = oneshot::channel(); let index_value = try_serialize_index_value!(json::to_value(index_value), index); @@ -411,8 +422,8 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { index: &str, index_value: Value, ) -> DbTransactionResult> - where - Value: Serialize, + where + Value: Serialize, { let items = self.get_items(index, index_value).await?; if items.len() > 1 { @@ -443,8 +454,8 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { /// * `index` - the name of a corresponding `Table`'s field by which records will be searched. /// * `index_value` - the value of the `index`, therefore the value of a corresponding `Table`'s field. pub async fn get_item_ids(&self, index: &str, index_value: Value) -> DbTransactionResult> - where - Value: Serialize, + where + Value: Serialize, { let (result_tx, result_rx) = oneshot::channel(); let index_value = try_serialize_index_value!(json::to_value(index_value), index); @@ -532,8 +543,8 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { index_value: Value, item: &Table, ) -> DbTransactionResult - where - Value: Serialize, + where + Value: Serialize, { let ids = self.get_item_ids(index, index_value).await?; match ids.len() { @@ -541,7 +552,7 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { 1 => { let item_id = ids[0]; self.replace_item(item_id, item).await - }, + } got_items => MmError::err(DbTransactionError::MultipleItemsByUniqueIndex { index: index.to_owned(), got_items, @@ -581,8 +592,8 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { index: &str, index_value: Value, ) -> DbTransactionResult> - where - Value: Serialize, + where + Value: Serialize, { let ids = self.get_item_ids(index, index_value).await?; match ids.len() { @@ -591,7 +602,7 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { let item_id = ids[0]; self.delete_item(item_id).await?; Ok(Some(item_id)) - }, + } got_items => MmError::err(DbTransactionError::MultipleItemsByUniqueIndex { index: index.to_owned(), got_items, @@ -620,8 +631,8 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { index: &str, index_value: Value, ) -> DbTransactionResult> - where - Value: Serialize, + where + Value: Serialize, { let ids = self.get_item_ids(index, index_value).await?; for item_id in ids.iter() { @@ -700,7 +711,7 @@ async fn table_event_loop(mut rx: mpsc::UnboundedReceiver { let res = table.add_item(&item).await; result_tx.send(res).ok(); - }, + } internal::DbTableEvent::GetItems { index, index_value, @@ -708,7 +719,7 @@ async fn table_event_loop(mut rx: mpsc::UnboundedReceiver { let res = table.get_items(&index, index_value).await; result_tx.send(res).ok(); - }, + } internal::DbTableEvent::GetItemIds { index, index_value, @@ -716,11 +727,11 @@ async fn table_event_loop(mut rx: mpsc::UnboundedReceiver { let res = table.get_item_ids(&index, index_value).await; result_tx.send(res).ok(); - }, + } internal::DbTableEvent::GetAllItems { result_tx } => { let res = table.get_all_items().await; result_tx.send(res).ok(); - }, + } internal::DbTableEvent::Count { index, index_value, @@ -728,11 +739,11 @@ async fn table_event_loop(mut rx: mpsc::UnboundedReceiver { let res = table.count(&index, index_value).await; result_tx.send(res).ok(); - }, + } internal::DbTableEvent::CountAll { result_tx } => { let res = table.count_all().await; result_tx.send(res).ok(); - }, + } internal::DbTableEvent::ReplaceItem { item_id, item, @@ -740,18 +751,18 @@ async fn table_event_loop(mut rx: mpsc::UnboundedReceiver { let res = table.replace_item(item_id, item).await; result_tx.send(res).ok(); - }, + } internal::DbTableEvent::DeleteItem { item_id, result_tx } => { let res = table.delete_item(item_id).await; result_tx.send(res).ok(); - }, + } internal::DbTableEvent::Clear { result_tx } => { let res = table.clear().await; result_tx.send(res).ok(); - }, + } internal::DbTableEvent::IsAborted { result_tx } => { result_tx.send(Ok(table.aborted())).ok(); - }, + } internal::DbTableEvent::OpenCursor { index, filters, @@ -759,7 +770,7 @@ async fn table_event_loop(mut rx: mpsc::UnboundedReceiver { open_cursor(&table, index, filters, filters_ext, result_tx); - }, + } } } } @@ -804,14 +815,14 @@ fn open_cursor( }); result_tx.send(Err(cursor_err)).ok(); return; - }, + } }; let cursor = match CursorDriver::init_cursor(db_index, filters, filter_ext) { Ok(cursor) => cursor, Err(e) => { result_tx.send(Err(e)).ok(); return; - }, + } }; let (event_tx, event_rx) = mpsc::unbounded(); @@ -845,7 +856,7 @@ pub(crate) fn get_idb_factory() -> Result { } else { "IndexedDB not supported in worker context" } - .to_string(), + .to_string(), )), Err(e) => Err(InitDbError::NotSupported(stringify_js_error(&e))), } @@ -1065,7 +1076,7 @@ mod tests { AddOrIgnoreResult::Added(item_id) => item_id, AddOrIgnoreResult::ExistAlready(unknown_tx_id) => { panic!("Transaction should be added: found '{}'", unknown_tx_id) - }, + } }; let found_tx_id = match table .add_item_or_ignore_by_unique_index("tx_hash", TX_HASH, &tx_2) @@ -1213,7 +1224,7 @@ mod tests { .expect("Couldn't get items by the index 'ticker=RICK'"); assert_eq!(actual_rick_txs, vec![ (rick_tx_1_id, rick_tx_1_updated), - (rick_tx_2_id, rick_tx_2) + (rick_tx_2_id, rick_tx_2), ]); } @@ -1323,16 +1334,16 @@ mod tests { (0, 1) => { let table = upgrader.create_table("upgradable_table")?; table.create_index("first_index", false)?; - }, + } (0, 2) => { let table = upgrader.create_table("upgradable_table")?; table.create_index("first_index", false)?; table.create_index("second_index", false)?; - }, + } (1, 2) => { let table = upgrader.open_table("upgradable_table")?; table.create_index("second_index", false)?; - }, + } v => panic!("Unexpected old, new versions: {:?}", v), } Ok(()) diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index a628bfd2e2..4c2c9c0510 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -468,11 +468,14 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { .map_to_mm(MmInitError::Internal)?; for db_id in db_ids.iter() { fix_directories(&ctx, Some(db_id))?; - ctx.init_sqlite_connection(None) + ctx.init_sqlite_connection(Some(db_id)) .map_to_mm(MmInitError::ErrorSqliteInitializing)?; ctx.init_shared_sqlite_conn() .map_to_mm(MmInitError::ErrorSqliteInitializing)?; - ctx.init_async_sqlite_connection(None) + ctx.init_async_sqlite_connection(Some(db_id)) + .await + .map_to_mm(MmInitError::ErrorSqliteInitializing)?; + ctx.init_async_sqlite_connection_v2(Some(db_id)) .await .map_to_mm(MmInitError::ErrorSqliteInitializing)?; init_and_migrate_sql_db(&ctx, Some(db_id)).await?; diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index b70e2bd718..4b01dcaf10 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -1032,7 +1032,9 @@ pub async fn mm_ctx_with_custom_async_db() -> MmArc { let ctx = MmCtxBuilder::new().into_mm_arc(); let connection = AsyncConnection::open_in_memory().await.unwrap(); - let _ = ctx.async_sqlite_connection.pin(Arc::new(AsyncMutex::new(connection))); + let mut store = HashMap::new(); + store.insert(ctx.rmd160_hex(), Arc::new(AsyncMutex::new(connection))); + let _ = ctx.async_sqlite_connection_v2.pin(Arc::new(AsyncMutex::new(store))); ctx } From e2dacfced0c1610d48515b5b7452364b1caf5660 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 19 Apr 2024 05:30:13 +0100 Subject: [PATCH 047/186] finish refactoring nft ctx for multi key support db --- mm2src/coins/nft.rs | 28 +++--- mm2src/coins/nft/nft_structs.rs | 27 +++--- mm2src/coins/nft/nft_tests.rs | 52 ++++++------ mm2src/coins/nft/storage/db_test_helpers.rs | 2 +- mm2src/mm2_core/src/mm_ctx.rs | 4 +- mm2src/mm2_db/src/indexed_db/indexed_db.rs | 94 +++++++++++---------- 6 files changed, 105 insertions(+), 102 deletions(-) diff --git a/mm2src/coins/nft.rs b/mm2src/coins/nft.rs index f32fb636b4..ff2c0c968c 100644 --- a/mm2src/coins/nft.rs +++ b/mm2src/coins/nft.rs @@ -93,7 +93,7 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult let req = req.clone(); let res = async move { - let nft_ctx = NftCtx::from_ctx(&ctx_clone).map_to_mm(GetNftInfoError::Internal)?; + let nft_ctx = NftCtx::from_ctx(&ctx_clone, Some(&id)).map_to_mm(GetNftInfoError::Internal)?; let chains = req .chains @@ -101,7 +101,7 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult .into_iter() .filter(|c| chains.contains(c)) .collect::>(); - let storage = nft_ctx.lock_db(Some(&id)).await?; + let storage = nft_ctx.lock_db().await?; for chain in req.chains.iter() { if !NftListStorageOps::is_initialized(&storage, chain).await? { NftListStorageOps::init(&storage, chain).await?; @@ -146,9 +146,9 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult pub async fn get_nft_metadata(ctx: MmArc, req: NftMetadataReq) -> MmResult { // TODO: db_id let db_id: Option = None; - let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; + let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; - let storage = nft_ctx.lock_db(db_id.as_deref()).await?; + let storage = nft_ctx.lock_db().await?; if !NftListStorageOps::is_initialized(&storage, &req.chain).await? { NftListStorageOps::init(&storage, &req.chain).await?; } @@ -201,9 +201,9 @@ pub async fn get_nft_transfers( let req = req.clone(); let res = async move { - let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; + let nft_ctx = NftCtx::from_ctx(&ctx, Some(&db_id)).map_to_mm(GetNftInfoError::Internal)?; - let storage = nft_ctx.lock_db(Some(&db_id)).await?; + let storage = nft_ctx.lock_db().await?; for chain in req.chains.iter() { if !NftTransferHistoryStorageOps::is_initialized(&storage, chain).await? { NftTransferHistoryStorageOps::init(&storage, chain).await?; @@ -291,9 +291,9 @@ async fn process_transfers_confirmations( pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNftError> { // TODO: db_id let db_id: Option = None; - let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; + let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; - let storage = nft_ctx.lock_db(db_id.as_deref()).await?; + let storage = nft_ctx.lock_db().await?; for chain in req.chains.iter() { let transfer_history_initialized = NftTransferHistoryStorageOps::is_initialized(&storage, chain).await?; @@ -535,9 +535,9 @@ fn prepare_uri_for_blocklist_endpoint( pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResult<(), UpdateNftError> { // TODO: db_id let db_id: Option = None; - let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; + let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; - let storage = nft_ctx.lock_db(db_id.as_deref()).await?; + let storage = nft_ctx.lock_db().await?; let token_address_str = eth_addr_to_hex(&req.token_address); let moralis_meta = match get_moralis_metadata( token_address_str.clone(), @@ -1542,8 +1542,8 @@ pub async fn clear_nft_db(ctx: MmArc, req: ClearNftDbReq) -> MmResult<(), ClearN // TODO: db_id let db_id: Option = None; if req.clear_all { - let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(ClearNftDbError::Internal)?; - let storage = nft_ctx.lock_db(db_id.as_deref()).await?; + let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(ClearNftDbError::Internal)?; + let storage = nft_ctx.lock_db().await?; storage.clear_all_nft_data().await?; storage.clear_all_history_data().await?; return Ok(()); @@ -1555,8 +1555,8 @@ pub async fn clear_nft_db(ctx: MmArc, req: ClearNftDbReq) -> MmResult<(), ClearN )); } - let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(ClearNftDbError::Internal)?; - let storage = nft_ctx.lock_db(db_id.as_deref()).await?; + let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(ClearNftDbError::Internal)?; + let storage = nft_ctx.lock_db().await?; let mut errors = Vec::new(); for chain in req.chains.iter() { if let Err(e) = clear_data_for_chain(&storage, chain).await { diff --git a/mm2src/coins/nft/nft_structs.rs b/mm2src/coins/nft/nft_structs.rs index d0c237de3c..51972a6790 100644 --- a/mm2src/coins/nft/nft_structs.rs +++ b/mm2src/coins/nft/nft_structs.rs @@ -24,7 +24,7 @@ use crate::{TransactionType, TxFeeDetails, WithdrawFee}; cfg_native! { use db_common::async_sql_conn::AsyncConnection; use futures::lock::Mutex as AsyncMutex; - use mm2_core::mm_ctx::{AsyncConnectionArc, log_sqlite_file_open_attempt}; + use mm2_core::mm_ctx::{AsyncConnectionArc, log_sqlite_file_open_attempt, ASYNC_SQLITE_DB_ID}; } cfg_wasm32! { @@ -733,7 +733,8 @@ pub(crate) struct NftCtx { pub(crate) nft_cache_db: SharedDb, #[cfg(not(target_arch = "wasm32"))] pub(crate) nft_cache_dbs: Arc>>, - pub(crate) ctx: MmArc, + _ctx: MmArc, + _current_db_id: String, } impl NftCtx { @@ -741,25 +742,26 @@ impl NftCtx { /// /// If an `NftCtx` instance doesn't already exist in the MM context, it gets created and cached for subsequent use. #[cfg(not(target_arch = "wasm32"))] - pub(crate) fn from_ctx(ctx: &MmArc) -> Result, String> { + pub(crate) fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { Ok(try_s!(from_ctx(&ctx.nft_ctx, move || { let async_sqlite_connection = ctx .async_sqlite_connection_v2 .ok_or("async_sqlite_connection is not initialized".to_owned())?; Ok(NftCtx { nft_cache_dbs: async_sqlite_connection.clone(), - ctx: ctx.clone(), + _current_db_id: db_id.map(|d| d.to_string()).unwrap_or_else(|| ctx.rmd160_hex()), + _ctx: ctx.clone(), }) }))) } #[cfg(target_arch = "wasm32")] - pub(crate) fn from_ctx(ctx: &MmArc) -> Result, String> { + pub(crate) fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { Ok(try_s!(from_ctx(&ctx.nft_ctx, move || { - let db_id: Option<&str> = None; // TODO Ok(NftCtx { - nft_cache_dbs: ConstructibleDb::new(ctx, db_id).into_shared(), - ctx: ctx.clone(), + nft_cache_db: ConstructibleDb::new(ctx, db_id).into_shared(), + _current_db_id: db_id.map(|d| d.to_string()).unwrap_or_else(|| ctx.rmd160_hex()), + _ctx: ctx.clone(), }) }))) } @@ -768,23 +770,20 @@ impl NftCtx { #[cfg(not(target_arch = "wasm32"))] pub(crate) async fn lock_db( &self, - db_id: Option<&str>, ) -> MmResult { - let db_id = db_id.map(|s| s.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); - let mut connections = self.nft_cache_dbs.lock().await; - if let Some(async_conn) = connections.get(&db_id) { + if let Some(async_conn) = connections.get(&self._current_db_id) { let conn = NftCacheDbSql(async_conn.lock().await.clone()); Ok(conn) } else { - let sqlite_file_path = self.ctx.dbdir(Some(&db_id)).join("KOMODEFI.db"); + let sqlite_file_path = self._ctx.dbdir(Some(&self._current_db_id)).join(ASYNC_SQLITE_DB_ID); log_sqlite_file_open_attempt(&sqlite_file_path); let async_conn = Arc::new(AsyncMutex::new( AsyncConnection::open(sqlite_file_path) .await .map_to_mm(|e| LockDBError::InternalError(e.to_string()))?, )); - connections.insert(db_id.to_string(), async_conn.clone()); + connections.insert(self._current_db_id.to_owned(), async_conn.clone()); let conn = NftCacheDbSql(async_conn.lock().await.clone()); Ok(conn) diff --git a/mm2src/coins/nft/nft_tests.rs b/mm2src/coins/nft/nft_tests.rs index d9943d61c4..926932ec3f 100644 --- a/mm2src/coins/nft/nft_tests.rs +++ b/mm2src/coins/nft/nft_tests.rs @@ -158,7 +158,7 @@ cross_test!(test_camo, { cross_test!(test_add_get_nfts, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -175,7 +175,7 @@ cross_test!(test_add_get_nfts, { cross_test!(test_last_nft_block, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -190,7 +190,7 @@ cross_test!(test_last_nft_block, { cross_test!(test_nft_list, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -210,7 +210,7 @@ cross_test!(test_nft_list, { cross_test!(test_remove_nft, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -235,7 +235,7 @@ cross_test!(test_remove_nft, { cross_test!(test_nft_amount, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let mut nft = nft(); storage @@ -273,7 +273,7 @@ cross_test!(test_nft_amount, { cross_test!(test_refresh_metadata, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let new_symbol = "NEW_SYMBOL"; let mut nft = nft(); @@ -293,7 +293,7 @@ cross_test!(test_refresh_metadata, { cross_test!(test_update_nft_spam_by_token_address, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -314,7 +314,7 @@ cross_test!(test_update_nft_spam_by_token_address, { cross_test!(test_exclude_nft_spam, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -333,7 +333,7 @@ cross_test!(test_exclude_nft_spam, { cross_test!(test_get_animation_external_domains, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -347,7 +347,7 @@ cross_test!(test_get_animation_external_domains, { cross_test!(test_update_nft_phishing_by_domain, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -375,7 +375,7 @@ cross_test!(test_update_nft_phishing_by_domain, { cross_test!(test_exclude_nft_phishing_spam, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -399,7 +399,7 @@ cross_test!(test_exclude_nft_phishing_spam, { cross_test!(test_clear_nft, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft = nft(); storage.add_nfts_to_list(chain, vec![nft], 28056726).await.unwrap(); @@ -411,7 +411,7 @@ cross_test!(test_clear_nft, { cross_test!(test_clear_all_nft, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft = nft(); storage.add_nfts_to_list(chain, vec![nft], 28056726).await.unwrap(); @@ -441,7 +441,7 @@ async fn test_clear_nft_target(storage: &S, chain: &Chain) cross_test!(test_add_get_transfers, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -468,7 +468,7 @@ cross_test!(test_add_get_transfers, { cross_test!(test_last_transfer_block, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -483,7 +483,7 @@ cross_test!(test_last_transfer_block, { cross_test!(test_transfer_history, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -502,7 +502,7 @@ cross_test!(test_transfer_history, { cross_test!(test_transfer_history_filters, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -564,7 +564,7 @@ cross_test!(test_transfer_history_filters, { cross_test!(test_get_update_transfer_meta, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -599,7 +599,7 @@ cross_test!(test_get_update_transfer_meta, { cross_test!(test_update_transfer_spam_by_token_address, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -620,7 +620,7 @@ cross_test!(test_update_transfer_spam_by_token_address, { cross_test!(test_get_token_addresses, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -632,7 +632,7 @@ cross_test!(test_get_token_addresses, { cross_test!(test_exclude_transfer_spam, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -655,7 +655,7 @@ cross_test!(test_exclude_transfer_spam, { cross_test!(test_get_domains, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -669,7 +669,7 @@ cross_test!(test_get_domains, { cross_test!(test_update_transfer_phishing_by_domain, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -697,7 +697,7 @@ cross_test!(test_update_transfer_phishing_by_domain, { cross_test!(test_exclude_transfer_phishing_spam, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -740,7 +740,7 @@ cross_test!(test_exclude_transfer_phishing_spam, { cross_test!(test_clear_history, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -752,7 +752,7 @@ cross_test!(test_clear_history, { cross_test!(test_clear_all_history, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db(None).await.unwrap(); + let storage = nft_ctx.lock_db().await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); diff --git a/mm2src/coins/nft/storage/db_test_helpers.rs b/mm2src/coins/nft/storage/db_test_helpers.rs index d59b845661..75c7b248c2 100644 --- a/mm2src/coins/nft/storage/db_test_helpers.rs +++ b/mm2src/coins/nft/storage/db_test_helpers.rs @@ -358,5 +358,5 @@ pub(crate) async fn get_nft_ctx(_chain: &Chain) -> Arc { let ctx = mm_ctx_with_custom_async_db().await; #[cfg(target_arch = "wasm32")] let ctx = mm_ctx_with_custom_db(); - NftCtx::from_ctx(&ctx).unwrap() + NftCtx::from_ctx(&ctx, None).unwrap() } diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 1a283dc717..c246d3b2d1 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -39,7 +39,9 @@ cfg_native! { /// Default interval to export and record metrics to log. const EXPORT_METRICS_INTERVAL: f64 = 5. * 60.; +pub const ASYNC_SQLITE_DB_ID: &str = "KOMODEFI.db"; +#[cfg(not(target_arch = "wasm32"))] pub type AsyncConnectionArc = Arc>; /// MarketMaker state, shared between the various MarketMaker threads. /// @@ -382,7 +384,7 @@ impl MmCtx { Ok(()) } - #[cfg(not(target_Arch = "wasm32"))] + #[cfg(not(target_arch = "wasm32"))] pub async fn init_async_sqlite_connection_v2(&self, db_id: Option<&str>) -> Result<(), String> { let sqlite_file_path = self.dbdir(db_id).join("KOMODEFI.db"); log_sqlite_file_open_attempt(&sqlite_file_path); diff --git a/mm2src/mm2_db/src/indexed_db/indexed_db.rs b/mm2src/mm2_db/src/indexed_db/indexed_db.rs index 6883113c30..dd53d4a466 100644 --- a/mm2src/mm2_db/src/indexed_db/indexed_db.rs +++ b/mm2src/mm2_db/src/indexed_db/indexed_db.rs @@ -35,7 +35,7 @@ macro_rules! try_serialize_index_value { index: $index.to_owned(), description: ser_err.to_string(), }); - } + }, } }}; } @@ -112,7 +112,11 @@ impl DbIdentifier { pub fn display_db_id(&self) -> String { self.db_id.clone().unwrap_or_else(|| "KOMODEFI".to_string()) } - pub fn db_id(&self) -> String { self.db_id.clone().unwrap_or_else(|| hex::encode(H160::default().as_slice())) } + pub fn db_id(&self) -> String { + self.db_id + .clone() + .unwrap_or_else(|| hex::encode(H160::default().as_slice())) + } } pub struct IndexedDbBuilder { @@ -170,7 +174,7 @@ impl IndexedDbBuilder { // ignore if the receiver is closed let _res = init_tx.send(Err(e)); return; - } + }, }; // ignore if the receiver is closed @@ -194,8 +198,8 @@ async fn send_event_recv_response( event: Event, result_rx: oneshot::Receiver>, ) -> MmResult - where - Error: WithInternal + NotMmError, +where + Error: WithInternal + NotMmError, { if let Err(e) = event_tx.unbounded_send(event) { return MmError::err(Error::internal(format!("Error sending event: {}", e))); @@ -232,7 +236,7 @@ impl IndexedDb { // ignore if the receiver is closed result_tx.send(Err(e)).ok(); return; - } + }, }; let (transaction_event_tx, transaction_event_rx) = mpsc::unbounded(); @@ -245,9 +249,7 @@ impl IndexedDb { result_tx.send(Ok(transaction_event_tx)).ok(); } - pub fn get_db_id(&self) -> String { - self.db_id.to_string() - } + pub fn get_db_id(&self) -> String { self.db_id.to_string() } } pub struct DbTransaction<'transaction> { @@ -283,10 +285,10 @@ impl DbTransaction<'_> { match event { internal::DbTransactionEvent::OpenTable { table_name, result_tx } => { Self::open_table(&transaction, table_name, result_tx) - } + }, internal::DbTransactionEvent::IsAborted { result_tx } => { result_tx.send(Ok(transaction.aborted())).ok(); - } + }, } } } @@ -302,7 +304,7 @@ impl DbTransaction<'_> { // ignore if the receiver is closed result_tx.send(Err(e)).ok(); return; - } + }, }; let (table_event_tx, table_event_rx) = mpsc::unbounded(); @@ -355,8 +357,8 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { index_value: Value, item: &Table, ) -> DbTransactionResult - where - Value: Serialize, + where + Value: Serialize, { let ids = self.get_item_ids(index, index_value).await?; match ids.len() { @@ -388,8 +390,8 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { /// * `index` - the name of a corresponding `Table`'s field by which records will be searched. /// * `index_value` - the value of the `index`, therefore the value of a corresponding `Table`'s field. pub async fn get_items(&self, index: &str, index_value: Value) -> DbTransactionResult> - where - Value: Serialize, + where + Value: Serialize, { let (result_tx, result_rx) = oneshot::channel(); let index_value = try_serialize_index_value!(json::to_value(index_value), index); @@ -422,8 +424,8 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { index: &str, index_value: Value, ) -> DbTransactionResult> - where - Value: Serialize, + where + Value: Serialize, { let items = self.get_items(index, index_value).await?; if items.len() > 1 { @@ -454,8 +456,8 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { /// * `index` - the name of a corresponding `Table`'s field by which records will be searched. /// * `index_value` - the value of the `index`, therefore the value of a corresponding `Table`'s field. pub async fn get_item_ids(&self, index: &str, index_value: Value) -> DbTransactionResult> - where - Value: Serialize, + where + Value: Serialize, { let (result_tx, result_rx) = oneshot::channel(); let index_value = try_serialize_index_value!(json::to_value(index_value), index); @@ -543,8 +545,8 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { index_value: Value, item: &Table, ) -> DbTransactionResult - where - Value: Serialize, + where + Value: Serialize, { let ids = self.get_item_ids(index, index_value).await?; match ids.len() { @@ -552,7 +554,7 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { 1 => { let item_id = ids[0]; self.replace_item(item_id, item).await - } + }, got_items => MmError::err(DbTransactionError::MultipleItemsByUniqueIndex { index: index.to_owned(), got_items, @@ -592,8 +594,8 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { index: &str, index_value: Value, ) -> DbTransactionResult> - where - Value: Serialize, + where + Value: Serialize, { let ids = self.get_item_ids(index, index_value).await?; match ids.len() { @@ -602,7 +604,7 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { let item_id = ids[0]; self.delete_item(item_id).await?; Ok(Some(item_id)) - } + }, got_items => MmError::err(DbTransactionError::MultipleItemsByUniqueIndex { index: index.to_owned(), got_items, @@ -631,8 +633,8 @@ impl<'transaction, Table: TableSignature> DbTable<'transaction, Table> { index: &str, index_value: Value, ) -> DbTransactionResult> - where - Value: Serialize, + where + Value: Serialize, { let ids = self.get_item_ids(index, index_value).await?; for item_id in ids.iter() { @@ -711,7 +713,7 @@ async fn table_event_loop(mut rx: mpsc::UnboundedReceiver { let res = table.add_item(&item).await; result_tx.send(res).ok(); - } + }, internal::DbTableEvent::GetItems { index, index_value, @@ -719,7 +721,7 @@ async fn table_event_loop(mut rx: mpsc::UnboundedReceiver { let res = table.get_items(&index, index_value).await; result_tx.send(res).ok(); - } + }, internal::DbTableEvent::GetItemIds { index, index_value, @@ -727,11 +729,11 @@ async fn table_event_loop(mut rx: mpsc::UnboundedReceiver { let res = table.get_item_ids(&index, index_value).await; result_tx.send(res).ok(); - } + }, internal::DbTableEvent::GetAllItems { result_tx } => { let res = table.get_all_items().await; result_tx.send(res).ok(); - } + }, internal::DbTableEvent::Count { index, index_value, @@ -739,11 +741,11 @@ async fn table_event_loop(mut rx: mpsc::UnboundedReceiver { let res = table.count(&index, index_value).await; result_tx.send(res).ok(); - } + }, internal::DbTableEvent::CountAll { result_tx } => { let res = table.count_all().await; result_tx.send(res).ok(); - } + }, internal::DbTableEvent::ReplaceItem { item_id, item, @@ -751,18 +753,18 @@ async fn table_event_loop(mut rx: mpsc::UnboundedReceiver { let res = table.replace_item(item_id, item).await; result_tx.send(res).ok(); - } + }, internal::DbTableEvent::DeleteItem { item_id, result_tx } => { let res = table.delete_item(item_id).await; result_tx.send(res).ok(); - } + }, internal::DbTableEvent::Clear { result_tx } => { let res = table.clear().await; result_tx.send(res).ok(); - } + }, internal::DbTableEvent::IsAborted { result_tx } => { result_tx.send(Ok(table.aborted())).ok(); - } + }, internal::DbTableEvent::OpenCursor { index, filters, @@ -770,7 +772,7 @@ async fn table_event_loop(mut rx: mpsc::UnboundedReceiver { open_cursor(&table, index, filters, filters_ext, result_tx); - } + }, } } } @@ -815,14 +817,14 @@ fn open_cursor( }); result_tx.send(Err(cursor_err)).ok(); return; - } + }, }; let cursor = match CursorDriver::init_cursor(db_index, filters, filter_ext) { Ok(cursor) => cursor, Err(e) => { result_tx.send(Err(e)).ok(); return; - } + }, }; let (event_tx, event_rx) = mpsc::unbounded(); @@ -856,7 +858,7 @@ pub(crate) fn get_idb_factory() -> Result { } else { "IndexedDB not supported in worker context" } - .to_string(), + .to_string(), )), Err(e) => Err(InitDbError::NotSupported(stringify_js_error(&e))), } @@ -1076,7 +1078,7 @@ mod tests { AddOrIgnoreResult::Added(item_id) => item_id, AddOrIgnoreResult::ExistAlready(unknown_tx_id) => { panic!("Transaction should be added: found '{}'", unknown_tx_id) - } + }, }; let found_tx_id = match table .add_item_or_ignore_by_unique_index("tx_hash", TX_HASH, &tx_2) @@ -1334,16 +1336,16 @@ mod tests { (0, 1) => { let table = upgrader.create_table("upgradable_table")?; table.create_index("first_index", false)?; - } + }, (0, 2) => { let table = upgrader.create_table("upgradable_table")?; table.create_index("first_index", false)?; table.create_index("second_index", false)?; - } + }, (1, 2) => { let table = upgrader.open_table("upgradable_table")?; table.create_index("second_index", false)?; - } + }, v => panic!("Unexpected old, new versions: {:?}", v), } Ok(()) From a00c2c7f1259fffab5952df3dfcdf70e728ce69e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 19 Apr 2024 05:45:08 +0100 Subject: [PATCH 048/186] replace async_sqlite_connection with v2 --- mm2src/coins/nft/nft_structs.rs | 2 +- mm2src/mm2_core/src/mm_ctx.rs | 19 +++---------------- mm2src/mm2_main/src/lp_native_dex.rs | 3 --- .../src/rpc/lp_commands/lp_commands_legacy.rs | 11 +++++++---- mm2src/mm2_test_helpers/src/for_tests.rs | 2 +- 5 files changed, 12 insertions(+), 25 deletions(-) diff --git a/mm2src/coins/nft/nft_structs.rs b/mm2src/coins/nft/nft_structs.rs index 51972a6790..389005d75b 100644 --- a/mm2src/coins/nft/nft_structs.rs +++ b/mm2src/coins/nft/nft_structs.rs @@ -745,7 +745,7 @@ impl NftCtx { pub(crate) fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { Ok(try_s!(from_ctx(&ctx.nft_ctx, move || { let async_sqlite_connection = ctx - .async_sqlite_connection_v2 + .async_sqlite_connection .ok_or("async_sqlite_connection is not initialized".to_owned())?; Ok(NftCtx { nft_cache_dbs: async_sqlite_connection.clone(), diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index c246d3b2d1..cec576d778 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -141,9 +141,7 @@ pub struct MmCtx { pub nft_ctx: Mutex>>, /// asynchronous handle for rusqlite connection. #[cfg(not(target_arch = "wasm32"))] - pub async_sqlite_connection: Constructible, - #[cfg(not(target_arch = "wasm32"))] - pub async_sqlite_connection_v2: Constructible>>>, + pub async_sqlite_connection: Constructible>>>, } impl MmCtx { @@ -193,8 +191,6 @@ impl MmCtx { nft_ctx: Mutex::new(None), #[cfg(not(target_arch = "wasm32"))] async_sqlite_connection: Constructible::default(), - #[cfg(not(target_arch = "wasm32"))] - async_sqlite_connection_v2: Constructible::default(), } } @@ -377,22 +373,13 @@ impl MmCtx { #[cfg(not(target_arch = "wasm32"))] pub async fn init_async_sqlite_connection(&self, db_id: Option<&str>) -> Result<(), String> { - let sqlite_file_path = self.dbdir(db_id).join("KOMODEFI.db"); - log_sqlite_file_open_attempt(&sqlite_file_path); - let async_conn = try_s!(AsyncConnection::open(sqlite_file_path).await); - try_s!(self.async_sqlite_connection.pin(Arc::new(AsyncMutex::new(async_conn)))); - Ok(()) - } - - #[cfg(not(target_arch = "wasm32"))] - pub async fn init_async_sqlite_connection_v2(&self, db_id: Option<&str>) -> Result<(), String> { - let sqlite_file_path = self.dbdir(db_id).join("KOMODEFI.db"); + let sqlite_file_path = self.dbdir(db_id).join(ASYNC_SQLITE_DB_ID); log_sqlite_file_open_attempt(&sqlite_file_path); let async_conn = try_s!(AsyncConnection::open(sqlite_file_path).await); let mut store = HashMap::new(); store.insert(self.rmd160_hex(), Arc::new(AsyncMutex::new(async_conn))); - try_s!(self.async_sqlite_connection_v2.pin(Arc::new(AsyncMutex::new(store)))); + try_s!(self.async_sqlite_connection.pin(Arc::new(AsyncMutex::new(store)))); Ok(()) } diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 4c2c9c0510..5b53abb807 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -475,9 +475,6 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { ctx.init_async_sqlite_connection(Some(db_id)) .await .map_to_mm(MmInitError::ErrorSqliteInitializing)?; - ctx.init_async_sqlite_connection_v2(Some(db_id)) - .await - .map_to_mm(MmInitError::ErrorSqliteInitializing)?; init_and_migrate_sql_db(&ctx, Some(db_id)).await?; migrate_db(&ctx, Some(db_id))?; } diff --git a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs index 16c65a2c01..46b10c2afb 100644 --- a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs +++ b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs @@ -248,10 +248,13 @@ pub async fn my_balance(ctx: MmArc, req: Json) -> Result>, Stri #[cfg(not(target_arch = "wasm32"))] async fn close_async_connection(ctx: &MmArc) { - if let Some(async_conn) = ctx.async_sqlite_connection.as_option() { - let mut conn = async_conn.lock().await; - if let Err(e) = conn.close().await { - error!("Error stopping AsyncConnection: {}", e); + if let Some(connections) = ctx.async_sqlite_connection.as_option() { + let connections = connections.lock().await; + for connection in connections.values() { + let mut conn = connection.lock().await; + if let Err(e) = conn.close().await { + error!("Error stopping AsyncConnection: {}", e); + } } } } diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index 4b01dcaf10..35283f9800 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -1034,7 +1034,7 @@ pub async fn mm_ctx_with_custom_async_db() -> MmArc { let connection = AsyncConnection::open_in_memory().await.unwrap(); let mut store = HashMap::new(); store.insert(ctx.rmd160_hex(), Arc::new(AsyncMutex::new(connection))); - let _ = ctx.async_sqlite_connection_v2.pin(Arc::new(AsyncMutex::new(store))); + let _ = ctx.async_sqlite_connection.pin(Arc::new(AsyncMutex::new(store))); ctx } From 04e1d973b9ae2a2085ba470ba7ea0fd020c14527 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 19 Apr 2024 06:32:24 +0100 Subject: [PATCH 049/186] rename AsyncConnectionArc to AsyncSqliteConnectionArc --- mm2src/coins/nft/nft_structs.rs | 4 ++-- mm2src/mm2_core/src/mm_ctx.rs | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/mm2src/coins/nft/nft_structs.rs b/mm2src/coins/nft/nft_structs.rs index 389005d75b..553de5d506 100644 --- a/mm2src/coins/nft/nft_structs.rs +++ b/mm2src/coins/nft/nft_structs.rs @@ -24,7 +24,7 @@ use crate::{TransactionType, TxFeeDetails, WithdrawFee}; cfg_native! { use db_common::async_sql_conn::AsyncConnection; use futures::lock::Mutex as AsyncMutex; - use mm2_core::mm_ctx::{AsyncConnectionArc, log_sqlite_file_open_attempt, ASYNC_SQLITE_DB_ID}; + use mm2_core::mm_ctx::{AsyncSqliteConnectionArc, log_sqlite_file_open_attempt, ASYNC_SQLITE_DB_ID}; } cfg_wasm32! { @@ -732,7 +732,7 @@ pub(crate) struct NftCtx { #[cfg(target_arch = "wasm32")] pub(crate) nft_cache_db: SharedDb, #[cfg(not(target_arch = "wasm32"))] - pub(crate) nft_cache_dbs: Arc>>, + pub(crate) nft_cache_dbs: Arc>>, _ctx: MmArc, _current_db_id: String, } diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index cec576d778..0c5308cb0e 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -42,7 +42,9 @@ const EXPORT_METRICS_INTERVAL: f64 = 5. * 60.; pub const ASYNC_SQLITE_DB_ID: &str = "KOMODEFI.db"; #[cfg(not(target_arch = "wasm32"))] -pub type AsyncConnectionArc = Arc>; +pub type AsyncSqliteConnectionArc = Arc>; +#[cfg(not(target_arch = "wasm32"))] +pub type SyncSqliteConnectionArc = Arc>; /// MarketMaker state, shared between the various MarketMaker threads. /// /// Every MarketMaker has one and only one instance of `MmCtx`. @@ -141,7 +143,7 @@ pub struct MmCtx { pub nft_ctx: Mutex>>, /// asynchronous handle for rusqlite connection. #[cfg(not(target_arch = "wasm32"))] - pub async_sqlite_connection: Constructible>>>, + pub async_sqlite_connection: Constructible>>>, } impl MmCtx { From f904927170600e5c091e276b7646674d93d3c9c5 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 19 Apr 2024 12:38:57 +0100 Subject: [PATCH 050/186] WIP sync_sqlite_connection_v2 --- .../utxo/utxo_block_header_storage/mod.rs | 10 +- mm2src/mm2_core/src/mm_ctx.rs | 101 ++++++++++++++++-- mm2src/mm2_main/src/lp_native_dex.rs | 2 + 3 files changed, 100 insertions(+), 13 deletions(-) diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index 4fe1bfa2dc..232d59df7d 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -31,16 +31,16 @@ impl BlockHeaderStorage { pub(crate) fn new_from_ctx( ctx: MmArc, ticker: String, - _db_id: Option<&str>, + db_id: Option<&str>, ) -> Result { - let sqlite_connection = ctx.sqlite_connection.ok_or(BlockHeaderStorageError::Internal( - "sqlite_connection is not initialized".to_owned(), - ))?; + let sqlite_connection = ctx + .sqlite_connection_res_v2(db_id) + .map_err(|_| BlockHeaderStorageError::Internal("sqlite_connection is not initialized".to_owned()))?; Ok(BlockHeaderStorage { inner: Box::new(SqliteBlockHeadersStorage { ticker, - conn: sqlite_connection.clone(), + conn: sqlite_connection, }), }) } diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 0c5308cb0e..9d27544261 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -18,7 +18,7 @@ use std::collections::HashSet; use std::fmt; use std::future::Future; use std::ops::Deref; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; cfg_wasm32! { use mm2_rpc::wasm_rpc::WasmRpcSender; @@ -34,7 +34,7 @@ cfg_native! { use mm2_metrics::MmMetricsError; use std::net::{IpAddr, SocketAddr, AddrParseError}; use std::path::{Path, PathBuf}; - use std::sync::MutexGuard; + use std::sync::{MutexGuard, Mutex}; } /// Default interval to export and record metrics to log. @@ -44,7 +44,7 @@ pub const ASYNC_SQLITE_DB_ID: &str = "KOMODEFI.db"; #[cfg(not(target_arch = "wasm32"))] pub type AsyncSqliteConnectionArc = Arc>; #[cfg(not(target_arch = "wasm32"))] -pub type SyncSqliteConnectionArc = Arc>; +pub type SyncSqliteConnectionArc = Arc>; /// MarketMaker state, shared between the various MarketMaker threads. /// /// Every MarketMaker has one and only one instance of `MmCtx`. @@ -124,9 +124,14 @@ pub struct MmCtx { /// Deprecated, please use `async_sqlite_connection` for new implementations. #[cfg(not(target_arch = "wasm32"))] pub sqlite_connection: Constructible>>, + #[cfg(not(target_arch = "wasm32"))] + pub sqlite_connection_v2: Constructible>>>, /// Deprecated, please create `shared_async_sqlite_conn` for new implementations and call db `KOMODEFI-shared.db`. #[cfg(not(target_arch = "wasm32"))] pub shared_sqlite_conn: Constructible>>, + /// asynchronous handle for rusqlite connection. + #[cfg(not(target_arch = "wasm32"))] + pub async_sqlite_connection: Constructible>>>, pub mm_version: String, pub datetime: String, pub mm_init_ctx: Mutex>>, @@ -141,9 +146,6 @@ pub struct MmCtx { pub db_namespace: DbNamespaceId, /// The context belonging to the `nft` mod: `NftCtx`. pub nft_ctx: Mutex>>, - /// asynchronous handle for rusqlite connection. - #[cfg(not(target_arch = "wasm32"))] - pub async_sqlite_connection: Constructible>>>, } impl MmCtx { @@ -182,7 +184,11 @@ impl MmCtx { #[cfg(not(target_arch = "wasm32"))] sqlite_connection: Constructible::default(), #[cfg(not(target_arch = "wasm32"))] + sqlite_connection_v2: Constructible::default(), + #[cfg(not(target_arch = "wasm32"))] shared_sqlite_conn: Constructible::default(), + #[cfg(not(target_arch = "wasm32"))] + async_sqlite_connection: Constructible::default(), mm_version: "".into(), datetime: "".into(), mm_init_ctx: Mutex::new(None), @@ -191,8 +197,6 @@ impl MmCtx { #[cfg(target_arch = "wasm32")] db_namespace: DbNamespaceId::Main, nft_ctx: Mutex::new(None), - #[cfg(not(target_arch = "wasm32"))] - async_sqlite_connection: Constructible::default(), } } @@ -364,12 +368,26 @@ impl MmCtx { Ok(()) } + #[cfg(not(target_arch = "wasm32"))] + pub fn init_sqlite_connection_v2(&self, db_id: Option<&str>) -> Result<(), String> { + let sqlite_file_path = self.dbdir(db_id).join("MM2.db"); + log_sqlite_file_open_attempt(&sqlite_file_path); + + let connection = try_s!(Connection::open(sqlite_file_path)); + let mut store = HashMap::new(); + store.insert(self.rmd160_hex(), Arc::new(Mutex::new(connection))); + try_s!(self.sqlite_connection_v2.pin(Arc::new(Mutex::new(store)))); + + Ok(()) + } + #[cfg(not(target_arch = "wasm32"))] pub fn init_shared_sqlite_conn(&self) -> Result<(), String> { let sqlite_file_path = self.shared_dbdir().join("MM2-shared.db"); log_sqlite_file_open_attempt(&sqlite_file_path); let connection = try_s!(Connection::open(sqlite_file_path)); try_s!(self.shared_sqlite_conn.pin(Arc::new(Mutex::new(connection)))); + Ok(()) } @@ -391,6 +409,29 @@ impl MmCtx { self.sqlite_connection.as_option().map(|conn| conn.lock().unwrap()) } + #[cfg(not(target_arch = "wasm32"))] + pub fn sqlite_conn_opt_v2(&self, db_id: Option<&str>) -> Option { + if let Some(connections) = self.sqlite_connection_v2.as_option() { + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex().clone()); + let connections = connections.lock().unwrap(); + if let Some(connection) = connections.get(&db_id) { + return Some(connection.clone()); + } else { + let sqlite_file_path = self.dbdir(Some(&db_id)).join("MM2.db"); + log_sqlite_file_open_attempt(&sqlite_file_path); + + let connection = Arc::new(Mutex::new( + Connection::open(sqlite_file_path).expect("failed to open db"), + )); + let mut store = HashMap::new(); + store.insert(self.rmd160_hex(), connection.clone()); + return Some(connection); + } + }; + + None + } + #[cfg(not(target_arch = "wasm32"))] pub fn sqlite_connection(&self) -> MutexGuard { self.sqlite_connection @@ -399,6 +440,50 @@ impl MmCtx { .unwrap() } + #[cfg(not(target_arch = "wasm32"))] + pub fn sqlite_connection_v2(&self, db_id: Option<&str>) -> SyncSqliteConnectionArc { + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex().clone()); + let connections = self + .sqlite_connection_v2 + .or(&|| panic!("sqlite_connection is not initialized")) + .lock() + .unwrap(); + if let Some(connection) = connections.get(&db_id) { + return connection.clone(); + } else { + let sqlite_file_path = self.dbdir(Some(&db_id)).join("MM2.db"); + log_sqlite_file_open_attempt(&sqlite_file_path); + + let connection = Arc::new(Mutex::new( + Connection::open(sqlite_file_path).expect("failed to open db"), + )); + let mut store = HashMap::new(); + store.insert(self.rmd160_hex(), connection.clone()); + return connection; + } + } + + #[cfg(not(target_arch = "wasm32"))] + pub fn sqlite_connection_res_v2(&self, db_id: Option<&str>) -> Result { + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex()); + let connections = self + .sqlite_connection_v2 + .ok_or("sqlite_connection is not initialized".to_string())? + .lock() + .unwrap(); + if let Some(connection) = connections.get(&db_id) { + Ok(connection.clone()) + } else { + let sqlite_file_path = self.dbdir(Some(&db_id)).join("MM2.db"); + log_sqlite_file_open_attempt(&sqlite_file_path); + + let connection = Arc::new(Mutex::new(try_s!(Connection::open(sqlite_file_path)))); + let mut store = HashMap::new(); + store.insert(self.rmd160_hex(), connection.clone()); + Ok(connection) + } + } + #[cfg(not(target_arch = "wasm32"))] pub fn shared_sqlite_conn(&self) -> MutexGuard { self.shared_sqlite_conn diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 5b53abb807..ac80cc831c 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -470,6 +470,8 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { fix_directories(&ctx, Some(db_id))?; ctx.init_sqlite_connection(Some(db_id)) .map_to_mm(MmInitError::ErrorSqliteInitializing)?; + ctx.init_sqlite_connection_v2(Some(db_id)) + .map_to_mm(MmInitError::ErrorSqliteInitializing)?; ctx.init_shared_sqlite_conn() .map_to_mm(MmInitError::ErrorSqliteInitializing)?; ctx.init_async_sqlite_connection(Some(db_id)) From ea778d2a6f70f85f2281b382b09333e63017c5da Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 19 Apr 2024 22:06:15 +0100 Subject: [PATCH 051/186] WIP utxo_block_header storage refacting --- mm2src/coins/utxo/utxo_block_header_storage/mod.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index 232d59df7d..48ad580942 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -60,14 +60,19 @@ impl BlockHeaderStorage { pub(crate) fn new_from_ctx( ctx: MmArc, ticker: String, - _db_id: Option<&str>, + db_id: Option<&str>, ) -> Result { + use core::panicking::panic; use db_common::sqlite::rusqlite::Connection; use std::sync::{Arc, Mutex}; + let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| ctx.rmd160_hex()); let conn = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); - let conn = ctx.sqlite_connection.clone_or(conn); + let mut connections = HashMap::new(); + connecions.insert(db_id, conn); + let conn = ctx.sqlite_connection_v2.clone_or(Arc::new(Mutex::new(connections))); + let conn = ctx.sqlite_connection_res_v2(db_id).unwrap(); Ok(BlockHeaderStorage { inner: Box::new(SqliteBlockHeadersStorage { ticker, conn }), }) From 8fa9cec03510dae1ab75c2d0a5321a567402946c Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 19 Apr 2024 22:31:57 +0100 Subject: [PATCH 052/186] WIP utxo_block_header storage refacting test fn --- mm2src/coins/utxo/utxo_block_header_storage/mod.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index 48ad580942..f6f8fe7a90 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -62,17 +62,15 @@ impl BlockHeaderStorage { ticker: String, db_id: Option<&str>, ) -> Result { - use core::panicking::panic; use db_common::sqlite::rusqlite::Connection; use std::sync::{Arc, Mutex}; let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| ctx.rmd160_hex()); let conn = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); let mut connections = HashMap::new(); - connecions.insert(db_id, conn); - let conn = ctx.sqlite_connection_v2.clone_or(Arc::new(Mutex::new(connections))); + connections.insert(db_id.to_owned(), conn.clone()); + let _ = ctx.sqlite_connection_v2.clone_or(Arc::new(Mutex::new(connections))); - let conn = ctx.sqlite_connection_res_v2(db_id).unwrap(); Ok(BlockHeaderStorage { inner: Box::new(SqliteBlockHeadersStorage { ticker, conn }), }) From 86649f5734608af720bac51b51079c6e955c003f Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 19 Apr 2024 23:28:20 +0100 Subject: [PATCH 053/186] WIP simplify new_from_ctx test fn --- mm2src/coins/utxo/utxo_block_header_storage/mod.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index f6f8fe7a90..eba710fe73 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -60,16 +60,12 @@ impl BlockHeaderStorage { pub(crate) fn new_from_ctx( ctx: MmArc, ticker: String, - db_id: Option<&str>, + _db_id: Option<&str>, ) -> Result { use db_common::sqlite::rusqlite::Connection; use std::sync::{Arc, Mutex}; - let db_id = db_id.map(|e| e.to_string()).unwrap_or_else(|| ctx.rmd160_hex()); let conn = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); - let mut connections = HashMap::new(); - connections.insert(db_id.to_owned(), conn.clone()); - let _ = ctx.sqlite_connection_v2.clone_or(Arc::new(Mutex::new(connections))); Ok(BlockHeaderStorage { inner: Box::new(SqliteBlockHeadersStorage { ticker, conn }), From 06998439a11e78703bf6238006cfbbda75430a3c Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sat, 20 Apr 2024 00:54:36 +0100 Subject: [PATCH 054/186] WIP sqlite_connection_v2 --- .../utxo/utxo_block_header_storage/mod.rs | 8 +- mm2src/mm2_core/src/mm_ctx.rs | 54 ++-- mm2src/mm2_main/src/database.rs | 41 ++- mm2src/mm2_main/src/database/my_orders.rs | 27 +- mm2src/mm2_main/src/database/my_swaps.rs | 12 +- mm2src/mm2_main/src/database/stats_nodes.rs | 25 +- mm2src/mm2_main/src/lp_ordermatch.rs | 295 +++++++++--------- .../src/lp_ordermatch/my_orders_storage.rs | 63 ++-- mm2src/mm2_main/src/lp_stats.rs | 52 +-- mm2src/mm2_main/src/lp_swap.rs | 150 +++++---- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 199 ++++++------ .../mm2_main/src/lp_swap/my_swaps_storage.rs | 48 +-- mm2src/mm2_main/src/lp_swap/swap_v2_common.rs | 48 +-- mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs | 28 +- mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 259 ++++++++------- 15 files changed, 698 insertions(+), 611 deletions(-) diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index eba710fe73..cb32984153 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -1,9 +1,11 @@ -#[cfg(not(target_arch = "wasm32"))] mod sql_block_header_storage; +#[cfg(not(target_arch = "wasm32"))] +mod sql_block_header_storage; #[cfg(not(target_arch = "wasm32"))] pub use sql_block_header_storage::SqliteBlockHeadersStorage; -#[cfg(target_arch = "wasm32")] mod wasm; +#[cfg(target_arch = "wasm32")] +mod wasm; #[cfg(target_arch = "wasm32")] pub use wasm::IDBBlockHeadersStorage; @@ -58,7 +60,7 @@ impl BlockHeaderStorage { #[cfg(all(test, not(target_arch = "wasm32")))] pub(crate) fn new_from_ctx( - ctx: MmArc, + _ctx: MmArc, ticker: String, _db_id: Option<&str>, ) -> Result { diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 9d27544261..101c530db0 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -45,6 +45,7 @@ pub const ASYNC_SQLITE_DB_ID: &str = "KOMODEFI.db"; pub type AsyncSqliteConnectionArc = Arc>; #[cfg(not(target_arch = "wasm32"))] pub type SyncSqliteConnectionArc = Arc>; + /// MarketMaker state, shared between the various MarketMaker threads. /// /// Every MarketMaker has one and only one instance of `MmCtx`. @@ -230,7 +231,7 @@ impl MmCtx { rpcport ) })? - }, + } None => 7783, // Default port if `rpcport` does not exist in the config }; if port < 1000 { @@ -245,7 +246,7 @@ impl MmCtx { } else { "127.0.0.1" } - .to_string(); + .to_string(); let ip: IpAddr = try_s!(rpcip.parse()); Ok(SocketAddr::new(ip, port as u16)) } @@ -265,7 +266,7 @@ impl MmCtx { return ERR!("IP address {} must be specified", ip); } Ok(()) - }, + } Ok(ServerName::DnsName(_)) => Ok(()), // NOTE: We need to have this wild card since `ServerName` is a non_exhaustive enum. Ok(_) => ERR!("Only IpAddress and DnsName are allowed in `alt_names`"), @@ -412,10 +413,10 @@ impl MmCtx { #[cfg(not(target_arch = "wasm32"))] pub fn sqlite_conn_opt_v2(&self, db_id: Option<&str>) -> Option { if let Some(connections) = self.sqlite_connection_v2.as_option() { - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex().clone()); + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex()); let connections = connections.lock().unwrap(); - if let Some(connection) = connections.get(&db_id) { - return Some(connection.clone()); + return if let Some(connection) = connections.get(&db_id) { + Some(connection.clone()) } else { let sqlite_file_path = self.dbdir(Some(&db_id)).join("MM2.db"); log_sqlite_file_open_attempt(&sqlite_file_path); @@ -425,8 +426,9 @@ impl MmCtx { )); let mut store = HashMap::new(); store.insert(self.rmd160_hex(), connection.clone()); - return Some(connection); - } + drop(connections); + Some(connection) + }; }; None @@ -442,14 +444,14 @@ impl MmCtx { #[cfg(not(target_arch = "wasm32"))] pub fn sqlite_connection_v2(&self, db_id: Option<&str>) -> SyncSqliteConnectionArc { - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex().clone()); + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex()); let connections = self .sqlite_connection_v2 .or(&|| panic!("sqlite_connection is not initialized")) .lock() .unwrap(); - if let Some(connection) = connections.get(&db_id) { - return connection.clone(); + return if let Some(connection) = connections.get(&db_id) { + connection.clone() } else { let sqlite_file_path = self.dbdir(Some(&db_id)).join("MM2.db"); log_sqlite_file_open_attempt(&sqlite_file_path); @@ -459,8 +461,10 @@ impl MmCtx { )); let mut store = HashMap::new(); store.insert(self.rmd160_hex(), connection.clone()); - return connection; - } + + drop(connections); + connection + }; } #[cfg(not(target_arch = "wasm32"))] @@ -480,6 +484,8 @@ impl MmCtx { let connection = Arc::new(Mutex::new(try_s!(Connection::open(sqlite_file_path)))); let mut store = HashMap::new(); store.insert(self.rmd160_hex(), connection.clone()); + + drop(connections); Ok(connection) } } @@ -621,7 +627,7 @@ impl MmArc { None => { log::info!("MmCtx was dropped. Stop the loop"); break; - }, + } } } }; @@ -655,7 +661,7 @@ impl MmArc { ve.insert(self.weak()); try_s!(self.ffi_handle.pin(rid)); return Ok(rid); - }, + } } } } @@ -752,8 +758,8 @@ impl MmFutSpawner { impl SpawnFuture for MmFutSpawner { fn spawn(&self, f: F) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn(f) } @@ -761,8 +767,8 @@ impl SpawnFuture for MmFutSpawner { impl SpawnAbortable for MmFutSpawner { fn spawn_with_settings(&self, fut: F, settings: AbortSettings) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn_with_settings(fut, settings) } @@ -776,9 +782,9 @@ pub fn from_ctx( ctx_field: &Mutex>>, constructor: C, ) -> Result, String> -where - C: FnOnce() -> Result, - T: 'static + Send + Sync, + where + C: FnOnce() -> Result, + T: 'static + Send + Sync, { let mut ctx_field = try_s!(ctx_field.lock()); if let Some(ref ctx) = *ctx_field { @@ -871,9 +877,9 @@ pub fn log_sqlite_file_open_attempt(sqlite_file_path: &Path) { match sqlite_file_path.canonicalize() { Ok(absolute_path) => { log::debug!("Trying to open SQLite database file {}", absolute_path.display()); - }, + } Err(_) => { log::debug!("Trying to open SQLite database file {}", sqlite_file_path.display()); - }, + } } } diff --git a/mm2src/mm2_main/src/database.rs b/mm2src/mm2_main/src/database.rs index 0ea9d362bb..19184fa1d4 100644 --- a/mm2src/mm2_main/src/database.rs +++ b/mm2src/mm2_main/src/database.rs @@ -2,9 +2,12 @@ /// #[path = "database/my_orders.rs"] pub mod my_orders; -#[path = "database/my_swaps.rs"] pub mod my_swaps; -#[path = "database/stats_nodes.rs"] pub mod stats_nodes; -#[path = "database/stats_swaps.rs"] pub mod stats_swaps; +#[path = "database/my_swaps.rs"] +pub mod my_swaps; +#[path = "database/stats_nodes.rs"] +pub mod stats_nodes; +#[path = "database/stats_swaps.rs"] +pub mod stats_swaps; use crate::CREATE_MY_SWAPS_TABLE; use coins::find_unique_account_ids_any; @@ -18,14 +21,15 @@ use stats_swaps::create_and_fill_stats_swaps_from_json_statements; const SELECT_MIGRATION: &str = "SELECT * FROM migration ORDER BY current_migration DESC LIMIT 1;"; -fn get_current_migration(ctx: &MmArc) -> SqlResult { - let conn = ctx.sqlite_connection(); +fn get_current_migration(ctx: &MmArc, db_id: Option<&str>) -> SqlResult { + let conn = ctx.sqlite_connection_v2(db_id); + let conn = conn.lock().unwrap(); conn.query_row(SELECT_MIGRATION, [], |row| row.get(0)) } pub async fn init_and_migrate_sql_db(ctx: &MmArc, db_id: Option<&str>) -> SqlResult<()> { info!("Checking the current SQLite migration"); - match get_current_migration(ctx) { + match get_current_migration(ctx, db_id) { Ok(current_migration) => { if current_migration >= 1 { info!( @@ -35,11 +39,11 @@ pub async fn init_and_migrate_sql_db(ctx: &MmArc, db_id: Option<&str>) -> SqlRes migrate_sqlite_database(ctx, current_migration).await?; return Ok(()); } - }, + } Err(e) => { debug!("Error '{}' on getting current migration. The database is either empty or corrupted, trying to clean it first", e); - clean_db(ctx); - }, + clean_db(ctx, db_id); + } }; info!("Trying to initialize the SQLite database"); @@ -50,21 +54,23 @@ pub async fn init_and_migrate_sql_db(ctx: &MmArc, db_id: Option<&str>) -> SqlRes Ok(()) } -fn init_db(ctx: &MmArc, _db_id: Option<&str>) -> SqlResult<()> { - let conn = ctx.sqlite_connection(); +fn init_db(ctx: &MmArc, db_id: Option<&str>) -> SqlResult<()> { + let conn = ctx.sqlite_connection_v2(db_id); + let conn = conn.lock().unwrap(); run_optimization_pragmas(&conn)?; let init_batch = concat!( - "BEGIN; + "BEGIN; CREATE TABLE IF NOT EXISTS migration (current_migration INTEGER NOT_NULL UNIQUE); INSERT INTO migration (current_migration) VALUES (1);", - CREATE_MY_SWAPS_TABLE!(), - "COMMIT;" + CREATE_MY_SWAPS_TABLE!(), + "COMMIT;" ); conn.execute_batch(init_batch) } -fn clean_db(ctx: &MmArc) { - let conn = ctx.sqlite_connection(); +fn clean_db(ctx: &MmArc, db_id: Option<&str>) { + let conn = ctx.sqlite_connection_v2(db_id); + let conn = conn.lock().unwrap(); if let Err(e) = conn.execute_batch( "DROP TABLE migration; DROP TABLE my_swaps;", @@ -149,7 +155,8 @@ pub async fn migrate_sqlite_database(ctx: &MmArc, current_migration: i64) -> Sql while let Some(statements_with_params) = statements_for_migration(ctx, current_migration).await { // `statements_for_migration` locks the [`MmCtx::sqlite_connection`] mutex, // so we can't create a transaction outside of this loop. - let conn = ctx.sqlite_connection(); + let conn = ctx.sqlite_connection_v2(Some(&db_id)); + let conn = conn.lock().unwrap(); let transaction = conn.unchecked_transaction()?; for (statement, params) in statements_with_params { debug!("Executing SQL statement {statement:?} with params {params:?} for db_id: {db_id}"); diff --git a/mm2src/mm2_main/src/database/my_orders.rs b/mm2src/mm2_main/src/database/my_orders.rs index 898d1f1620..b8f388fc3a 100644 --- a/mm2src/mm2_main/src/database/my_orders.rs +++ b/mm2src/mm2_main/src/database/my_orders.rs @@ -41,7 +41,7 @@ const UPDATE_ORDER_STATUS: &str = "UPDATE my_orders SET last_updated = ?2, statu const SELECT_STATUS_BY_UUID: &str = "SELECT status FROM my_orders WHERE uuid = ?1"; -pub fn insert_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder) -> SqlResult<()> { +pub fn insert_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder, db_id: Option<&str>) -> SqlResult<()> { debug!("Inserting new order {} to the SQLite database", uuid); let params = vec![ uuid.to_string(), @@ -56,12 +56,13 @@ pub fn insert_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder) -> SqlRes 0.to_string(), "Created".to_string(), ]; - let conn = ctx.sqlite_connection(); + let conn = ctx.sqlite_connection_v2(db_id); + let conn = conn.lock().unwrap(); conn.execute(INSERT_MY_ORDER, params_from_iter(params.iter())) .map(|_| ()) } -pub fn insert_taker_order(ctx: &MmArc, uuid: Uuid, order: &TakerOrder) -> SqlResult<()> { +pub fn insert_taker_order(ctx: &MmArc, uuid: Uuid, order: &TakerOrder, db_id: Option<&str>) -> SqlResult<()> { debug!("Inserting new order {} to the SQLite database", uuid); let price = order.request.rel_amount.to_decimal() / order.request.base_amount.to_decimal(); let initial_action = match order.request.action { @@ -81,12 +82,13 @@ pub fn insert_taker_order(ctx: &MmArc, uuid: Uuid, order: &TakerOrder) -> SqlRes 0.to_string(), "Created".to_string(), ]; - let conn = ctx.sqlite_connection(); + let conn = ctx.sqlite_connection_v2(db_id); + let conn = conn.lock().unwrap(); conn.execute(INSERT_MY_ORDER, params_from_iter(params.iter())) .map(|_| ()) } -pub fn update_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder) -> SqlResult<()> { +pub fn update_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder, db_id: Option<&str>) -> SqlResult<()> { debug!("Updating order {} in the SQLite database", uuid); let params = vec![ uuid.to_string(), @@ -95,12 +97,13 @@ pub fn update_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder) -> SqlRes order.updated_at.unwrap_or(0).to_string(), "Updated".to_string(), ]; - let conn = ctx.sqlite_connection(); + let conn = ctx.sqlite_connection_v2(db_id); + let conn = conn.lock().unwrap(); conn.execute(UPDATE_MY_ORDER, params_from_iter(params.iter())) .map(|_| ()) } -pub fn update_was_taker(ctx: &MmArc, uuid: Uuid) -> SqlResult<()> { +pub fn update_was_taker(ctx: &MmArc, uuid: Uuid, db_id: Option<&str>) -> SqlResult<()> { debug!("Updating order {} in the SQLite database", uuid); let params = vec![ uuid.to_string(), @@ -108,15 +111,17 @@ pub fn update_was_taker(ctx: &MmArc, uuid: Uuid) -> SqlResult<()> { now_ms().to_string(), 1.to_string(), ]; - let conn = ctx.sqlite_connection(); + let conn = ctx.sqlite_connection_v2(db_id); + let conn = conn.lock().unwrap(); conn.execute(UPDATE_WAS_TAKER, params_from_iter(params.iter())) .map(|_| ()) } -pub fn update_order_status(ctx: &MmArc, uuid: Uuid, status: String) -> SqlResult<()> { +pub fn update_order_status(ctx: &MmArc, uuid: Uuid, status: String, db_id: Option<&str>) -> SqlResult<()> { debug!("Updating order {} in the SQLite database", uuid); let params = vec![uuid.to_string(), now_ms().to_string(), status]; - let conn = ctx.sqlite_connection(); + let conn = ctx.sqlite_connection_v2(db_id); + let conn = conn.lock().unwrap(); conn.execute(UPDATE_ORDER_STATUS, params_from_iter(params.iter())) .map(|_| ()) } @@ -250,7 +255,7 @@ pub fn select_orders_by_filter( query_builder.limit(paging.limit); query_builder.offset(offset); offset - }, + } None => 0, }; diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index fcae009af4..5bc998e391 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -70,10 +70,11 @@ pub fn insert_new_swap( uuid: &str, started_at: &str, swap_type: u8, - _db_id: Option<&str>, + db_id: Option<&str>, ) -> SqlResult<()> { debug!("Inserting new swap {} to the SQLite database", uuid); - let conn = ctx.sqlite_connection(); + let conn = ctx.sqlite_connection_v2(db_id); + let conn = conn.lock().unwrap(); let params = [my_coin, other_coin, uuid, started_at, &swap_type.to_string()]; conn.execute(INSERT_MY_SWAP, params).map(|_| ()) } @@ -122,8 +123,9 @@ const INSERT_MY_SWAP_V2: &str = r#"INSERT INTO my_swaps ( :other_p2p_pub );"#; -pub fn insert_new_swap_v2(ctx: &MmArc, params: &[(&str, &dyn ToSql)], _db_id: Option<&str>) -> SqlResult<()> { - let conn = ctx.sqlite_connection(); +pub fn insert_new_swap_v2(ctx: &MmArc, params: &[(&str, &dyn ToSql)], db_id: Option<&str>) -> SqlResult<()> { + let conn = ctx.sqlite_connection_v2(db_id); + let conn = conn.lock().unwrap(); conn.execute(INSERT_MY_SWAP_V2, params).map(|_| ()) } @@ -240,7 +242,7 @@ pub fn select_uuids_by_my_swaps_filter( query_builder.limit(paging.limit); query_builder.offset(offset); offset - }, + } None => 0, }; diff --git a/mm2src/mm2_main/src/database/stats_nodes.rs b/mm2src/mm2_main/src/database/stats_nodes.rs index 7a6330c24a..0319673d4b 100644 --- a/mm2src/mm2_main/src/database/stats_nodes.rs +++ b/mm2src/mm2_main/src/database/stats_nodes.rs @@ -30,26 +30,29 @@ const SELECT_PEERS_NAMES: &str = "SELECT peer_id, name FROM nodes"; const INSERT_STAT: &str = "INSERT INTO stats_nodes (name, version, timestamp, error) VALUES (?1, ?2, ?3, ?4)"; -pub fn insert_node_info(ctx: &MmArc, node_info: &NodeInfo) -> SqlResult<()> { +pub fn insert_node_info(ctx: &MmArc, node_info: &NodeInfo, db_id: Option<&str>) -> SqlResult<()> { debug!("Inserting info about node {} to the SQLite database", node_info.name); let params = vec![ node_info.name.clone(), node_info.address.clone(), node_info.peer_id.clone(), ]; - let conn = ctx.sqlite_connection(); + let conn = ctx.sqlite_connection_v2(db_id); + let conn = conn.lock().unwrap(); conn.execute(INSERT_NODE, params_from_iter(params.iter())).map(|_| ()) } -pub fn delete_node_info(ctx: &MmArc, name: String) -> SqlResult<()> { +pub fn delete_node_info(ctx: &MmArc, name: String, db_id: Option<&str>) -> SqlResult<()> { debug!("Deleting info about node {} from the SQLite database", name); let params = vec![name]; - let conn = ctx.sqlite_connection(); + let conn = ctx.sqlite_connection_v2(db_id); + let conn = conn.lock().unwrap(); conn.execute(DELETE_NODE, params_from_iter(params.iter())).map(|_| ()) } -pub fn select_peers_addresses(ctx: &MmArc) -> SqlResult, SqlError> { - let conn = ctx.sqlite_connection(); +pub fn select_peers_addresses(ctx: &MmArc, db_id: Option<&str>) -> SqlResult, SqlError> { + let conn = ctx.sqlite_connection_v2(db_id); + let conn = conn.lock().unwrap(); let mut stmt = conn.prepare(SELECT_PEERS_ADDRESSES)?; let peers_addresses = stmt .query_map([], |row| Ok((row.get(0)?, row.get(1)?)))? @@ -58,8 +61,9 @@ pub fn select_peers_addresses(ctx: &MmArc) -> SqlResult, S Ok(peers_addresses) } -pub fn select_peers_names(ctx: &MmArc) -> SqlResult, SqlError> { - let conn = ctx.sqlite_connection(); +pub fn select_peers_names(ctx: &MmArc, db_id: Option<&str>) -> SqlResult, SqlError> { + let conn = ctx.sqlite_connection_v2(db_id); + let conn = conn.lock().unwrap(); let mut stmt = conn.prepare(SELECT_PEERS_NAMES)?; let peers_names = stmt .query_map([], |row| Ok((row.get(0)?, row.get(1)?)))? @@ -68,7 +72,7 @@ pub fn select_peers_names(ctx: &MmArc) -> SqlResult, Sql peers_names } -pub fn insert_node_version_stat(ctx: &MmArc, node_version_stat: NodeVersionStat) -> SqlResult<()> { +pub fn insert_node_version_stat(ctx: &MmArc, node_version_stat: NodeVersionStat, db_id: Option<&str>) -> SqlResult<()> { debug!( "Inserting new version stat for node {} to the SQLite database", node_version_stat.name @@ -79,6 +83,7 @@ pub fn insert_node_version_stat(ctx: &MmArc, node_version_stat: NodeVersionStat) node_version_stat.timestamp.to_string(), node_version_stat.error.unwrap_or_default(), ]; - let conn = ctx.sqlite_connection(); + let conn = ctx.sqlite_connection_v2(db_id); + let conn = conn.lock().unwrap(); conn.execute(INSERT_STAT, params_from_iter(params.iter())).map(|_| ()) } diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 7136643976..6756299ed7 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -49,7 +49,8 @@ use mm2_number::{BigDecimal, BigRational, MmNumber, MmNumberMultiRepr}; use mm2_rpc::data::legacy::{MatchBy, Mm2RpcResult, OrderConfirmationsSettings, OrderType, RpcOrderbookEntry, SellBuyRequest, SellBuyResponse, TakerAction, TakerRequestForRpc}; use mm2_state_machine::prelude::*; -#[cfg(test)] use mocktopus::macros::*; +#[cfg(test)] +use mocktopus::macros::*; use my_orders_storage::{delete_my_maker_order, delete_my_taker_order, save_maker_order_on_update, save_my_new_maker_order, save_my_new_taker_order, MyActiveOrders, MyOrdersFilteringHistory, MyOrdersHistory, MyOrdersStorage}; @@ -95,18 +96,24 @@ cfg_wasm32! { pub type OrdermatchDbLocked<'a> = DbLocked<'a, OrdermatchDb>; } -#[path = "lp_ordermatch/best_orders.rs"] mod best_orders; -#[path = "lp_ordermatch/lp_bot.rs"] mod lp_bot; +#[path = "lp_ordermatch/best_orders.rs"] +mod best_orders; +#[path = "lp_ordermatch/lp_bot.rs"] +mod lp_bot; + pub use lp_bot::{start_simple_market_maker_bot, stop_simple_market_maker_bot, StartSimpleMakerBotRequest, TradingBotEvent}; #[path = "lp_ordermatch/my_orders_storage.rs"] mod my_orders_storage; -#[path = "lp_ordermatch/new_protocol.rs"] mod new_protocol; +#[path = "lp_ordermatch/new_protocol.rs"] +mod new_protocol; #[path = "lp_ordermatch/order_requests_tracker.rs"] mod order_requests_tracker; -#[path = "lp_ordermatch/orderbook_depth.rs"] mod orderbook_depth; -#[path = "lp_ordermatch/orderbook_rpc.rs"] mod orderbook_rpc; +#[path = "lp_ordermatch/orderbook_depth.rs"] +mod orderbook_depth; +#[path = "lp_ordermatch/orderbook_rpc.rs"] +mod orderbook_rpc; #[cfg(all(test, not(target_arch = "wasm32")))] #[path = "ordermatch_tests.rs"] pub mod ordermatch_tests; @@ -152,8 +159,8 @@ pub enum OrderbookP2PHandlerError { P2PRequestError(String), #[display( - fmt = "Couldn't find an order {}, ignoring, it will be synced upon pubkey keep alive", - _0 + fmt = "Couldn't find an order {}, ignoring, it will be synced upon pubkey keep alive", + _0 )] OrderNotFound(Uuid), @@ -271,7 +278,7 @@ fn process_trie_delta( )), None => { orderbook.remove_order_trie_update(uuid); - }, + } } } @@ -310,13 +317,13 @@ async fn process_orders_keep_alive( P2PRequest::Ordermatch(req), propagated_from_peer.clone(), ) - .await? - .ok_or_else(|| { - MmError::new(OrderbookP2PHandlerError::P2PRequestError(format!( - "No response was received from peer {} for SyncPubkeyOrderbookState request!", - propagated_from_peer - ))) - })?; + .await? + .ok_or_else(|| { + MmError::new(OrderbookP2PHandlerError::P2PRequestError(format!( + "No response was received from peer {} for SyncPubkeyOrderbookState request!", + propagated_from_peer + ))) + })?; let mut orderbook = ordermatch_ctx.orderbook.lock(); for (pair, diff) in response.pair_orders_diff { @@ -392,13 +399,13 @@ async fn request_and_fill_orderbook(ctx: &MmArc, base: &str, rel: &str) -> Resul let response = try_s!(request_any_relay::(ctx.clone(), P2PRequest::Ordermatch(request)).await); let (pubkey_orders, protocol_infos, conf_infos) = match response { Some(( - GetOrderbookRes { - pubkey_orders, - protocol_infos, - conf_infos, - }, - _peer_id, - )) => (pubkey_orders, protocol_infos, conf_infos), + GetOrderbookRes { + pubkey_orders, + protocol_infos, + conf_infos, + }, + _peer_id, + )) => (pubkey_orders, protocol_infos, conf_infos), None => return Ok(()), }; @@ -414,7 +421,7 @@ async fn request_and_fill_orderbook(ctx: &MmArc, base: &str, rel: &str) -> Resul Err(e) => { warn!("Error {} decoding pubkey {}", e, pubkey); continue; - }, + } }; if is_my_order(&pubkey, &my_pubsecp, &orderbook.my_p2p_pubkeys) { @@ -480,9 +487,9 @@ fn delete_my_order(ctx: &MmArc, uuid: Uuid, p2p_privkey: Option(ctx: &MmArc, err_construct: F) -> MmResult, E> -where - E: NotMmError, - F: Fn(String) -> E, + where + E: NotMmError, + F: Fn(String) -> E, { match CryptoCtx::from_ctx(ctx).split_mm() { Ok(crypto_ctx) => Ok(Some(CryptoCtx::mm2_internal_pubkey_hex(crypto_ctx.as_ref()))), @@ -555,39 +562,39 @@ pub async fn process_msg(ctx: MmArc, from_peer: String, msg: &[u8], i_am_relay: let order: OrderbookItem = (created_msg, hex::encode(pubkey.to_bytes().as_slice())).into(); insert_or_update_order(&ctx, order); Ok(()) - }, + } new_protocol::OrdermatchMessage::PubkeyKeepAlive(keep_alive) => { process_orders_keep_alive(ctx, from_peer, pubkey.to_hex(), keep_alive, i_am_relay).await - }, + } new_protocol::OrdermatchMessage::TakerRequest(taker_request) => { let msg = TakerRequest::from_new_proto_and_pubkey(taker_request, pubkey.unprefixed().into()); process_taker_request(ctx, pubkey.unprefixed().into(), msg).await; Ok(()) - }, + } new_protocol::OrdermatchMessage::MakerReserved(maker_reserved) => { let msg = MakerReserved::from_new_proto_and_pubkey(maker_reserved, pubkey.unprefixed().into()); // spawn because process_maker_reserved may take significant time to run let spawner = ctx.spawner(); spawner.spawn(process_maker_reserved(ctx, pubkey.unprefixed().into(), msg)); Ok(()) - }, + } new_protocol::OrdermatchMessage::TakerConnect(taker_connect) => { process_taker_connect(ctx, pubkey, taker_connect.into()).await; Ok(()) - }, + } new_protocol::OrdermatchMessage::MakerConnected(maker_connected) => { process_maker_connected(ctx, pubkey, maker_connected.into()).await; Ok(()) - }, + } new_protocol::OrdermatchMessage::MakerOrderCancelled(cancelled_msg) => { delete_order(&ctx, &pubkey.to_hex(), cancelled_msg.uuid.into()); Ok(()) - }, + } new_protocol::OrdermatchMessage::MakerOrderUpdated(updated_msg) => { process_maker_order_updated(ctx, pubkey.to_hex(), updated_msg) - }, + } } - }, + } Err(e) => MmError::err(OrderbookP2PHandlerError::DecodeError(e.to_string())), } } @@ -629,8 +636,8 @@ impl From for TryFromBytesError { trait TryFromBytes { fn try_from_bytes(bytes: Vec) -> Result - where - Self: Sized; + where + Self: Sized; } impl TryFromBytes for String { @@ -664,13 +671,13 @@ pub fn process_peer_request(ctx: MmArc, request: OrdermatchRequest) -> Result { let response = process_sync_pubkey_orderbook_state(ctx, pubkey, trie_roots); response.map(|res| res.map(|r| encode_message(&r).expect("Serialization failed"))) - }, + } OrdermatchRequest::BestOrders { coin, action, volume } => { best_orders::process_best_orders_p2p_request(ctx, coin, action, volume) - }, + } OrdermatchRequest::BestOrdersByNumber { coin, action, number } => { best_orders::process_best_orders_p2p_request_by_number(ctx, coin, action, number) - }, + } OrdermatchRequest::OrderbookDepth { pairs } => orderbook_depth::process_orderbook_depth_p2p_request(ctx, pairs), } } @@ -736,7 +743,7 @@ fn get_pubkeys_orders(orderbook: &Orderbook, base: String, rel: String) -> GetPu uuid ); continue; - }, + } }; let uuids = uuids_by_pubkey.entry(order.pubkey.clone()).or_insert_with(Vec::new); protocol_infos.insert(order.uuid, order.base_rel_proto_info()); @@ -808,12 +815,12 @@ impl DeltaOrFullTrie { .map(|(key, value)| (key, value.map(From::from))) .collect(); DeltaOrFullTrie::Delta(new_map) - }, + } DeltaOrFullTrie::FullTrie(trie) => { trie.iter().for_each(|(key, val)| on_each(key, Some(val))); let new_trie = trie.into_iter().map(|(key, value)| (key, value.into())).collect(); DeltaOrFullTrie::FullTrie(new_trie) - }, + } } } } @@ -844,8 +851,8 @@ fn get_full_trie( db: &MemoryDB, getter: impl Fn(&Key) -> Option, ) -> Result, TrieDiffHistoryError> -where - Key: Clone + Eq + std::hash::Hash + TryFromBytes, + where + Key: Clone + Eq + std::hash::Hash + TryFromBytes, { let trie = TrieDB::::new(db, trie_root)?; let trie: Result, TrieDiffHistoryError> = trie @@ -924,10 +931,10 @@ fn process_sync_pubkey_orderbook_state( let delta_result = match pubkey_state.order_pairs_trie_state_history.get(&pair) { Some(history) => { DeltaOrFullTrie::from_history(history, root, *actual_pair_root, &orderbook.memory_db, order_getter) - }, + } None => { get_full_trie(actual_pair_root, &orderbook.memory_db, order_getter).map(DeltaOrFullTrie::FullTrie) - }, + } }; let delta = try_s!(delta_result); @@ -950,11 +957,11 @@ fn process_sync_pubkey_orderbook_state( if let Some(ref info) = o.conf_settings { conf_infos.insert(o.uuid, info.clone()); } - }, + } None => { protocol_infos.remove(uuid); conf_infos.remove(uuid); - }, + } }); (pair, new_trie) }) @@ -1004,10 +1011,10 @@ pub fn parse_orderbook_pair_from_topic(topic: &str) -> Option<(&str, &str)> { } else { None } - }, + } None => None, } - }, + } None => None, }, _ => None, @@ -1050,7 +1057,7 @@ fn maker_order_created_p2p_notify( Err(e) => { error!("Couldn't encode and sign the 'maker_order_created' message: {}", e); return; - }, + } }; let item: OrderbookItem = (message, hex::encode(key_pair.public_slice())).into(); insert_or_update_my_order(&ctx, item, order); @@ -1081,7 +1088,7 @@ fn maker_order_updated_p2p_notify( Err(e) => { error!("Couldn't encode and sign the 'maker_order_updated' message: {}", e); return; - }, + } }; process_my_maker_order_updated(&ctx, &message); broadcast_p2p_msg(&ctx, topic, encoded_msg, peer_id); @@ -1128,7 +1135,7 @@ impl BalanceTradeFeeUpdatedHandler for BalanceUpdateOrdermatchHandler { Err(e) => { log::warn!("Couldn't handle the 'balance_updated' event: {}", e); return; - }, + } }; let ordermatch_ctx = OrdermatchContext::from_ctx(&ctx).unwrap(); @@ -1151,9 +1158,9 @@ impl BalanceTradeFeeUpdatedHandler for BalanceUpdateOrdermatchHandler { order.clone(), MakerOrderCancellationReason::InsufficientBalance, ) - .compat() - .await - .ok(); + .compat() + .await + .ok(); continue; } } @@ -1598,12 +1605,12 @@ impl TakerOrder { if !uuids.contains(&reserved.maker_order_uuid) { return MatchReservedResult::NotMatched; } - }, + } MatchBy::Pubkeys(pubkeys) => { if !pubkeys.contains(&reserved.sender_pubkey) { return MatchReservedResult::NotMatched; } - }, + } } let my_base_amount = self.request.get_base_amount(); @@ -1621,18 +1628,18 @@ impl TakerOrder { } else { MatchReservedResult::NotMatched } - }, + } TakerAction::Sell => { let match_ticker = (self.request.base == reserved.rel || self.base_orderbook_ticker.as_ref() == Some(&reserved.rel)) && (self.request.rel == reserved.base - || self.rel_orderbook_ticker.as_ref() == Some(&reserved.base)); + || self.rel_orderbook_ticker.as_ref() == Some(&reserved.base)); if match_ticker && my_base_amount == other_rel_amount && my_rel_amount <= other_base_amount { MatchReservedResult::Matched } else { MatchReservedResult::NotMatched } - }, + } } } @@ -2040,7 +2047,7 @@ impl MakerOrder { } else { OrderMatchResult::NotMatched } - }, + } TakerAction::Sell => { let ticker_match = (self.base == taker.rel || self.base_orderbook_ticker.as_ref() == Some(&taker.rel)) && (self.rel == taker.base || self.rel_orderbook_ticker.as_ref() == Some(&taker.base)); @@ -2059,7 +2066,7 @@ impl MakerOrder { } else { OrderMatchResult::NotMatched } - }, + } } } @@ -2140,7 +2147,7 @@ impl From for MakerOrder { rel_orderbook_ticker: taker_order.base_orderbook_ticker, p2p_privkey: taker_order.p2p_privkey, } - }, + } } } } @@ -2338,7 +2345,7 @@ fn broadcast_ordermatch_message( Err(e) => { error!("Failed to encode and sign ordermatch message: {}", e); return; - }, + } }; broadcast_p2p_msg(ctx, topic, encoded_msg, peer_id); } @@ -2386,10 +2393,10 @@ impl TrieDiffHistory { while let Some(next_diff) = self.inner.remove(diff.next_root) { diff = next_diff; } - }, + } None => { self.inner.insert(insert_at, diff); - }, + } }; } @@ -2450,7 +2457,7 @@ fn pubkey_state_mut<'a>( RawEntryMut::Vacant(e) => { let state = OrderbookPubkeyState::with_history_timeout(Duration::new(TRIE_STATE_HISTORY_TIMEOUT, 0)); e.insert(from_pubkey.to_string(), state).1 - }, + } } } @@ -2542,7 +2549,7 @@ impl Orderbook { Err(e) => { error!("Error getting {} trie with root {:?}", e, prev_root); return; - }, + } }; let order_bytes = order.trie_state_bytes(); if let Err(e) = pair_trie.insert(order.uuid.as_bytes(), &order_bytes) { @@ -2651,7 +2658,7 @@ impl Orderbook { Err(_) => { error!("Failed to get existing trie with root {:?}", pair_state); return Some(order); - }, + } }; if pubkey_state.order_pairs_trie_state_history.get(&alb_ordered).is_some() { @@ -2901,11 +2908,11 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO Ok(None) => { error!("Coin {} is not found/enabled", maker_order.rel); return; - }, + } Err(e) => { error!("!lp_coinfind({}): {}", maker_order.rel, e); return; - }, + } }; let maker_coin = match lp_coinfind(&ctx, &maker_order.base).await { @@ -2913,11 +2920,11 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO Ok(None) => { error!("Coin {} is not found/enabled", maker_order.base); return; - }, + } Err(e) => { error!("!lp_coinfind({}): {}", maker_order.base, e); return; - }, + } }; let alice = bits256::from(maker_match.request.sender_pubkey.0); let maker_amount = maker_match.reserved.get_base_amount().clone(); @@ -2947,7 +2954,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO my_conf_settings, other_conf_settings, } - }, + } None => AtomicLocktimeVersion::V1, }; let lock_time = lp_atomic_locktime( @@ -2971,7 +2978,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO Err(e) => { error!("Error {} on secret generation", e); return; - }, + } }; let account_db_id = maker_coin.account_db_id(); @@ -3009,7 +3016,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO .run(Box::new(maker_swap_v2::Initialize::default())) .await .error_log(); - }, + } _ => todo!("implement fallback to the old protocol here"), } } else { @@ -3022,7 +3029,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO LEGACY_SWAP_TYPE, account_db_id.as_deref(), ) - .await + .await { error!("Error {} on new swap insertion", e); } @@ -3062,11 +3069,11 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat Ok(None) => { error!("Coin {} is not found/enabled", taker_coin_ticker); return; - }, + } Err(e) => { error!("!lp_coinfind({}): {}", taker_coin_ticker, e); return; - }, + } }; let maker_coin_ticker = taker_order.maker_coin_ticker(); @@ -3075,11 +3082,11 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat Ok(None) => { error!("Coin {} is not found/enabled", maker_coin_ticker); return; - }, + } Err(e) => { error!("!lp_coinfind({}): {}", maker_coin_ticker, e); return; - }, + } }; // lp_connected_alice is called only from process_maker_connected, which returns if CryptoCtx is not initialized @@ -3109,7 +3116,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat my_conf_settings, other_conf_settings, } - }, + } None => AtomicLocktimeVersion::V1, }; let locktime = lp_atomic_locktime( @@ -3134,7 +3141,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat Err(e) => { error!("Error {} on secret generation", e); return; - }, + } }; let secret_hash_algo = detect_secret_hash_algo(&maker_coin, &taker_coin); match (maker_coin, taker_coin) { @@ -3170,12 +3177,12 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat .run(Box::new(taker_swap_v2::Initialize::default())) .await .error_log(); - }, + } _ => todo!("implement fallback to the old protocol here"), } } else { #[cfg(any(test, feature = "run-docker-tests"))] - let fail_at = std::env::var("TAKER_FAIL_AT").map(FailAt::from).ok(); + let fail_at = std::env::var("TAKER_FAIL_AT").map(FailAt::from).ok(); if let Err(e) = insert_new_swap_to_db( ctx.clone(), @@ -3186,7 +3193,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat LEGACY_SWAP_TYPE, account_db_id.as_deref(), ) - .await + .await { error!("Error {} on new swap insertion", e); } @@ -3205,7 +3212,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat locktime, taker_order.p2p_privkey.map(SerializableSecp256k1Keypair::into_inner), #[cfg(any(test, feature = "run-docker-tests"))] - fail_at, + fail_at, ); run_taker_swap(RunTakerSwapInput::StartNew(taker_swap), ctx).await } @@ -3286,7 +3293,7 @@ pub async fn lp_ordermatch_loop(ctx: MmArc) { log::info!("Error {} on balance check to kickstart order {}, cancelling", e, uuid); to_cancel.push(uuid); continue; - }, + } }; let max_vol = match calc_max_maker_vol(&ctx, &base, ¤t_balance, FeeApproxStage::OrderIssue).await { @@ -3295,7 +3302,7 @@ pub async fn lp_ordermatch_loop(ctx: MmArc) { log::info!("Error {} on balance check to kickstart order {}, cancelling", e, uuid); to_cancel.push(uuid); continue; - }, + } }; if max_vol < order.available_amount() { order.max_base_vol = order.reserved_amount() + max_vol; @@ -3331,9 +3338,9 @@ pub async fn lp_ordermatch_loop(ctx: MmArc) { order.clone(), MakerOrderCancellationReason::InsufficientBalance, ) - .compat() - .await - .ok(); + .compat() + .await + .ok(); } } } @@ -3368,7 +3375,8 @@ pub async fn clean_memory_loop(ctx_weak: MmWeak) { /// The function locks the [`OrdermatchContext::my_maker_orders`] and [`OrdermatchContext::my_taker_orders`] mutexes. async fn handle_timed_out_taker_orders(ctx: MmArc, ordermatch_ctx: &OrdermatchContext) { let mut my_taker_orders = ordermatch_ctx.my_taker_orders.lock().await; - let storage = MyOrdersStorage::new(ctx.clone()); + // TODO db_id + let storage = MyOrdersStorage::new(ctx.clone(), None); let mut my_actual_taker_orders = HashMap::with_capacity(my_taker_orders.len()); for (uuid, order) in my_taker_orders.drain() { @@ -3458,7 +3466,8 @@ async fn check_balance_for_maker_orders(ctx: MmArc, ordermatch_ctx: &OrdermatchC /// The function locks the [`OrdermatchContext::my_maker_orders`] mutex. async fn handle_timed_out_maker_matches(ctx: MmArc, ordermatch_ctx: &OrdermatchContext) { let now = now_ms(); - let storage = MyOrdersStorage::new(ctx.clone()); + // TODO db_id + let storage = MyOrdersStorage::new(ctx.clone(), None); let my_maker_orders = ordermatch_ctx.maker_orders_ctx.lock().orders.clone(); for (_, order) in my_maker_orders.iter() { @@ -3549,11 +3558,11 @@ async fn process_maker_reserved(ctx: MmArc, from_pubkey: H256Json, reserved_msg: if (my_order.match_reserved(&reserved_msg) == MatchReservedResult::Matched && my_order.matches.is_empty()) && base_coin.is_coin_protocol_supported(&reserved_msg.base_protocol_info, None, lock_time, false) && rel_coin.is_coin_protocol_supported( - &reserved_msg.rel_protocol_info, - Some(reserved_msg.rel_amount.clone()), - lock_time, - false, - ) + &reserved_msg.rel_protocol_info, + Some(reserved_msg.rel_amount.clone()), + lock_time, + false, + ) { let connect = TakerConnect { sender_pubkey: H256Json::from(our_public_id.bytes), @@ -3572,7 +3581,8 @@ async fn process_maker_reserved(ctx: MmArc, from_pubkey: H256Json, reserved_msg: my_order .matches .insert(taker_match.reserved.maker_order_uuid, taker_match); - MyOrdersStorage::new(ctx) + // TODO db_id + MyOrdersStorage::new(ctx, None) .update_active_taker_order(my_order) .await .error_log_with_msg("!update_active_taker_order"); @@ -3610,7 +3620,7 @@ async fn process_maker_connected(ctx: MmArc, from_pubkey: PublicKey, connected: connected.maker_order_uuid ); return; - }, + } }; if order_match.reserved.sender_pubkey != unprefixed_from.into() { @@ -3649,7 +3659,8 @@ async fn process_taker_request(ctx: MmArc, from_pubkey: H256Json, taker_request: } let ordermatch_ctx = OrdermatchContext::from_ctx(&ctx).unwrap(); - let storage = MyOrdersStorage::new(ctx.clone()); + // TODO db_id + let storage = MyOrdersStorage::new(ctx.clone(), None); let mut my_orders = ordermatch_ctx.maker_orders_ctx.lock().orders.clone(); let filtered = my_orders .iter_mut() @@ -3677,21 +3688,21 @@ async fn process_taker_request(ctx: MmArc, from_pubkey: H256Json, taker_request: atomic_locktime_v, ) as f64 * rel_coin.maker_locktime_multiplier()) - .ceil() as u64; + .ceil() as u64; if !order.matches.contains_key(&taker_request.uuid) && base_coin.is_coin_protocol_supported( - taker_request.base_protocol_info_for_maker(), - Some(base_amount.clone()), - maker_lock_duration, - true, - ) + taker_request.base_protocol_info_for_maker(), + Some(base_amount.clone()), + maker_lock_duration, + true, + ) && rel_coin.is_coin_protocol_supported( - taker_request.rel_protocol_info_for_maker(), - None, - maker_lock_duration, - true, - ) + taker_request.rel_protocol_info_for_maker(), + None, + maker_lock_duration, + true, + ) { let reserved = MakerReserved { dest_pub_key: taker_request.sender_pubkey, @@ -3769,7 +3780,7 @@ async fn process_taker_connect(ctx: MmArc, sender_pubkey: PublicKey, connect_msg connect_msg.taker_order_uuid ); return; - }, + } }; if order_match.request.sender_pubkey != sender_unprefixed.into() { log::warn!("Connect message sender pubkey != request message sender pubkey"); @@ -3798,7 +3809,8 @@ async fn process_taker_connect(ctx: MmArc, sender_pubkey: PublicKey, connect_msg updated_msg.with_new_max_volume(my_order.available_amount().into()); maker_order_updated_p2p_notify(ctx.clone(), topic, updated_msg, my_order.p2p_keypair()); } - MyOrdersStorage::new(ctx) + // TODO db_id + MyOrdersStorage::new(ctx, None) .update_active_maker_order(&my_order) .await .error_log_with_msg("!update_active_maker_order"); @@ -4803,7 +4815,7 @@ pub async fn update_maker_order(ctx: &MmArc, req: MakerOrderUpdateReq) -> Result try_s!(validate_price(new_price.clone())); update_msg.with_new_price(new_price.clone().into()); new_price - }, + } None => order_before_update.price.clone(), }; @@ -4925,7 +4937,8 @@ pub async fn order_status(ctx: MmArc, req: Json) -> Result>, St let req: OrderStatusReq = try_s!(json::from_value(req)); let ordermatch_ctx = try_s!(OrdermatchContext::from_ctx(&ctx)); - let storage = MyOrdersStorage::new(ctx.clone()); + // TODO db_id + let storage = MyOrdersStorage::new(ctx.clone(), None); let maybe_order_mutex = ordermatch_ctx.maker_orders_ctx.lock().get_order(&req.uuid).cloned(); if let Some(order_mutex) = maybe_order_mutex { @@ -5053,7 +5066,8 @@ pub struct FilteringOrder { /// Returns *all* uuids of swaps, which match the selected filter. pub async fn orders_history_by_filter(ctx: MmArc, req: Json) -> Result>, String> { - let storage = MyOrdersStorage::new(ctx.clone()); + // TODO db_id + let storage = MyOrdersStorage::new(ctx.clone(), None); let filter: MyOrdersFilter = try_s!(json::from_value(req)); let db_result = try_s!(storage.select_orders_by_filter(&filter, None).await); @@ -5075,7 +5089,7 @@ pub async fn orders_history_by_filter(ctx: MmArc, req: Json) -> Result Result (), } @@ -5229,7 +5243,7 @@ pub async fn cancel_order_rpc(ctx: MmArc, req: Json) -> Result> return Response::builder() .body(json::to_vec(&res).expect("Serialization failed")) .map_err(|e| ERRL!("{}", e)); - }, + } // error is returned Entry::Vacant(_) => (), } @@ -5390,7 +5404,8 @@ pub async fn orders_kick_start(ctx: &MmArc) -> Result, String> { let mut coins = HashSet::new(); let ordermatch_ctx = try_s!(OrdermatchContext::from_ctx(ctx)); - let storage = MyOrdersStorage::new(ctx.clone()); + // TODO db_id + let storage = MyOrdersStorage::new(ctx.clone(), None); let saved_maker_orders = try_s!(storage.load_active_maker_orders().await); let saved_taker_orders = try_s!(storage.load_active_taker_orders().await); @@ -5508,7 +5523,7 @@ pub async fn cancel_orders_by(ctx: &MmArc, cancel_by: CancelBy) -> Result<(Vec { let mut to_remove = Vec::new(); for (uuid, order) in maker_orders.iter() { @@ -5528,7 +5543,7 @@ pub async fn cancel_orders_by(ctx: &MmArc, cancel_by: CancelBy) -> Result<(Vec { let mut to_remove = Vec::new(); for (uuid, order) in maker_orders.iter() { @@ -5548,7 +5563,7 @@ pub async fn cancel_orders_by(ctx: &MmArc, cancel_by: CancelBy) -> Result<(Vec match e.get() { OrderbookRequestingState::Requested => { // We are subscribed to the topic and the orderbook was requested already true - }, + } OrderbookRequestingState::NotRequested { subscribed_at } => { // We are subscribed to the topic. Also we didn't request the orderbook, // True if enough time has passed for the orderbook to fill by OrdermatchRequest::SyncPubkeyOrderbookState. *subscribed_at + ORDERBOOK_REQUESTING_TIMEOUT < current_timestamp - }, + } }, } }; @@ -5692,7 +5707,7 @@ fn choose_maker_confs_and_notas( maker_settings.rel_confs, maker_settings.rel_nota, ) - }, + } TakerAction::Buy => { let maker_coin_confs = if taker_settings.base_confs < maker_settings.base_confs { taker_settings.base_confs @@ -5710,7 +5725,7 @@ fn choose_maker_confs_and_notas( maker_settings.rel_confs, maker_settings.rel_nota, ) - }, + } }, None => ( maker_settings.base_confs, @@ -5810,12 +5825,12 @@ fn orderbook_address( coins::eth::addr_from_pubkey_str(pubkey) .map(OrderbookAddress::Transparent) .map_to_mm(OrderbookAddrErr::AddrFromPubkeyError) - }, + } CoinProtocol::UTXO | CoinProtocol::QTUM | CoinProtocol::QRC20 { .. } | CoinProtocol::BCH { .. } => { coins::utxo::address_by_conf_and_pubkey_str(coin, conf, pubkey, addr_format) .map(OrderbookAddress::Transparent) .map_to_mm(OrderbookAddrErr::AddrFromPubkeyError) - }, + } CoinProtocol::SLPTOKEN { platform, .. } => { let platform_conf = coin_conf(ctx, &platform); if platform_conf.is_null() { @@ -5829,12 +5844,12 @@ fn orderbook_address( .mm_err(|e| OrderbookAddrErr::AddrFromPubkeyError(e.to_string())), _ => MmError::err(OrderbookAddrErr::InvalidPlatformCoinProtocol(platform)), } - }, + } CoinProtocol::TENDERMINT(protocol) => Ok(coins::tendermint::account_id_from_pubkey_hex( &protocol.account_prefix, pubkey, ) - .map(|id| OrderbookAddress::Transparent(id.to_string()))?), + .map(|id| OrderbookAddress::Transparent(id.to_string()))?), CoinProtocol::TENDERMINTTOKEN(proto) => { let platform_conf = coin_conf(ctx, &proto.platform); if platform_conf.is_null() { @@ -5847,17 +5862,17 @@ fn orderbook_address( &platform.account_prefix, pubkey, ) - .map(|id| OrderbookAddress::Transparent(id.to_string()))?), + .map(|id| OrderbookAddress::Transparent(id.to_string()))?), _ => MmError::err(OrderbookAddrErr::InvalidPlatformCoinProtocol(format!( "Platform protocol {:?} is not TENDERMINT", platform_protocol ))), } - }, + } #[cfg(all(feature = "enable-solana", not(target_arch = "wasm32")))] CoinProtocol::SOLANA | CoinProtocol::SPLTOKEN { .. } => { MmError::err(OrderbookAddrErr::CoinIsNotSupported(coin.to_owned())) - }, + } CoinProtocol::ZHTLC { .. } => Ok(OrderbookAddress::Shielded), #[cfg(not(target_arch = "wasm32"))] // Todo: Shielded address is used for lightning for now, the lightning node public key can be used for the orderbook entry pubkey diff --git a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs index 38a144952f..e568d7f99c 100644 --- a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs +++ b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs @@ -7,7 +7,8 @@ use derive_more::Display; use futures::{FutureExt, TryFutureExt}; use mm2_core::mm_ctx::MmArc; use mm2_err_handle::prelude::*; -#[cfg(test)] use mocktopus::macros::*; +#[cfg(test)] +use mocktopus::macros::*; use uuid::Uuid; pub type MyOrdersResult = Result>; @@ -36,7 +37,8 @@ pub enum MyOrdersError { } pub async fn save_my_new_maker_order(ctx: MmArc, order: &MakerOrder) -> MyOrdersResult<()> { - let storage = MyOrdersStorage::new(ctx); + // TODO db_id shouldn't be None + let storage = MyOrdersStorage::new(ctx, None); storage .save_new_active_maker_order(order) .await @@ -49,7 +51,8 @@ pub async fn save_my_new_maker_order(ctx: MmArc, order: &MakerOrder) -> MyOrders } pub async fn save_my_new_taker_order(ctx: MmArc, order: &TakerOrder) -> MyOrdersResult<()> { - let storage = MyOrdersStorage::new(ctx); +// TODO db_id + let storage = MyOrdersStorage::new(ctx, None); storage .save_new_active_taker_order(order) .await @@ -62,7 +65,8 @@ pub async fn save_my_new_taker_order(ctx: MmArc, order: &TakerOrder) -> MyOrders } pub async fn save_maker_order_on_update(ctx: MmArc, order: &MakerOrder) -> MyOrdersResult<()> { - let storage = MyOrdersStorage::new(ctx); + // TODO db_id + let storage = MyOrdersStorage::new(ctx, None); storage.update_active_maker_order(order).await?; if order.save_in_history { @@ -77,7 +81,8 @@ pub fn delete_my_taker_order(ctx: MmArc, order: TakerOrder, reason: TakerOrderCa let uuid = order.request.uuid; let save_in_history = order.save_in_history; - let storage = MyOrdersStorage::new(ctx); + // TODO db_id + let storage = MyOrdersStorage::new(ctx, None); storage .delete_active_taker_order(uuid) .await @@ -92,7 +97,7 @@ pub fn delete_my_taker_order(ctx: MmArc, order: TakerOrder, reason: TakerOrderCa .await .error_log_with_msg("!save_order_in_history"); } - }, + } } if save_in_history { @@ -113,7 +118,8 @@ pub fn delete_my_maker_order(ctx: MmArc, order: MakerOrder, reason: MakerOrderCa let uuid = order_to_save.uuid; let save_in_history = order_to_save.save_in_history; - let storage = MyOrdersStorage::new(ctx); + // TODO db_id + let storage = MyOrdersStorage::new(ctx, None); if order_to_save.was_updated() { if let Ok(order_from_file) = storage.load_active_maker_order(order_to_save.uuid).await { order_to_save = order_from_file; @@ -222,7 +228,7 @@ mod native_impl { FsJsonError::Serializing(serializing) => MyOrdersError::ErrorSerializing(serializing.to_string()), FsJsonError::Deserializing(deserializing) => { MyOrdersError::ErrorDeserializing(deserializing.to_string()) - }, + } } } } @@ -230,10 +236,11 @@ mod native_impl { #[derive(Clone)] pub struct MyOrdersStorage { ctx: MmArc, + pub db_id: Option, } impl MyOrdersStorage { - pub fn new(ctx: MmArc) -> MyOrdersStorage { MyOrdersStorage { ctx } } + pub fn new(ctx: MmArc, db_id: Option<&str>) -> MyOrdersStorage { MyOrdersStorage { ctx, db_id: db_id.map(|e| e.to_string()) } } } #[async_trait] @@ -315,34 +322,38 @@ mod native_impl { filter: &MyOrdersFilter, paging_options: Option<&PagingOptions>, ) -> MyOrdersResult { - select_orders_by_filter(&self.ctx.sqlite_connection(), filter, paging_options) + let conn = self.ctx.sqlite_connection_v2(self.db_id.as_deref()); + let conn = conn.lock().unwrap(); + select_orders_by_filter(&conn, filter, paging_options) .map_to_mm(|e| MyOrdersError::ErrorLoading(e.to_string())) } async fn select_order_status(&self, uuid: Uuid) -> MyOrdersResult { - select_status_by_uuid(&self.ctx.sqlite_connection(), &uuid) + let conn = self.ctx.sqlite_connection_v2(self.db_id.as_deref()); + let conn = conn.lock().unwrap(); + select_status_by_uuid(&conn, &uuid) .map_to_mm(|e| MyOrdersError::ErrorLoading(e.to_string())) } async fn save_maker_order_in_filtering_history(&self, order: &MakerOrder) -> MyOrdersResult<()> { - insert_maker_order(&self.ctx, order.uuid, order).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) + insert_maker_order(&self.ctx, order.uuid, order, self.db_id.as_deref()).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) } async fn save_taker_order_in_filtering_history(&self, order: &TakerOrder) -> MyOrdersResult<()> { - insert_taker_order(&self.ctx, order.request.uuid, order) + insert_taker_order(&self.ctx, order.request.uuid, order, self.db_id.as_deref()) .map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) } async fn update_maker_order_in_filtering_history(&self, order: &MakerOrder) -> MyOrdersResult<()> { - update_maker_order(&self.ctx, order.uuid, order).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) + update_maker_order(&self.ctx, order.uuid, order, self.db_id.as_deref()).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) } async fn update_order_status_in_filtering_history(&self, uuid: Uuid, status: String) -> MyOrdersResult<()> { - update_order_status(&self.ctx, uuid, status).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) + update_order_status(&self.ctx, uuid, status, self.db_id.as_deref()).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) } async fn update_was_taker_in_filtering_history(&self, uuid: Uuid) -> MyOrdersResult<()> { - update_was_taker(&self.ctx, uuid).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) + update_was_taker(&self.ctx, uuid, self.db_id.as_deref()).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) } } } @@ -773,7 +784,7 @@ mod tests { #[wasm_bindgen_test] async fn test_delete_my_maker_order() { let ctx = MmCtxBuilder::new().with_test_db_namespace().into_mm_arc(); - let storage = MyOrdersStorage::new(ctx.clone()); + let storage = MyOrdersStorage::new(ctx.clone(), None); let maker1 = maker_order_for_test(); @@ -786,9 +797,9 @@ mod tests { maker1.clone(), MakerOrderCancellationReason::InsufficientBalance, ) - .compat() - .await - .unwrap(); + .compat() + .await + .unwrap(); let actual_active_maker_orders = storage .load_active_taker_orders() @@ -809,7 +820,7 @@ mod tests { #[wasm_bindgen_test] async fn test_delete_my_taker_order() { let ctx = MmCtxBuilder::new().with_test_db_namespace().into_mm_arc(); - let storage = MyOrdersStorage::new(ctx.clone()); + let storage = MyOrdersStorage::new(ctx.clone(), None); let taker1 = taker_order_for_test(); let taker2 = TakerOrder { @@ -867,7 +878,7 @@ mod tests { #[wasm_bindgen_test] async fn test_load_active_maker_taker_orders() { let ctx = MmCtxBuilder::new().with_test_db_namespace().into_mm_arc(); - let storage = MyOrdersStorage::new(ctx.clone()); + let storage = MyOrdersStorage::new(ctx.clone(), None); let maker1 = maker_order_for_test(); let mut maker2 = MakerOrder { @@ -914,7 +925,7 @@ mod tests { #[wasm_bindgen_test] async fn test_filtering_history() { let ctx = MmCtxBuilder::new().with_test_db_namespace().into_mm_arc(); - let storage = MyOrdersStorage::new(ctx.clone()); + let storage = MyOrdersStorage::new(ctx.clone(), None); let maker1 = maker_order_for_test(); let mut maker2 = MakerOrder { @@ -964,9 +975,9 @@ mod tests { maker_order_to_filtering_history_item(&maker2, "Updated".to_owned(), false).unwrap(), taker_order_to_filtering_history_item(&taker1, "MyCustomStatus".to_owned()).unwrap(), ] - .into_iter() - .sorted_by(|x, y| x.uuid.cmp(&y.uuid)) - .collect(); + .into_iter() + .sorted_by(|x, y| x.uuid.cmp(&y.uuid)) + .collect(); assert_eq!(actual_items, expected_items); diff --git a/mm2src/mm2_main/src/lp_stats.rs b/mm2src/mm2_main/src/lp_stats.rs index 8a064c1d70..44cfb0231d 100644 --- a/mm2src/mm2_main/src/lp_stats.rs +++ b/mm2src/mm2_main/src/lp_stats.rs @@ -89,24 +89,24 @@ pub struct NodeVersionStat { fn insert_node_info_to_db(_ctx: &MmArc, _node_info: &NodeInfo) -> Result<(), String> { Ok(()) } #[cfg(not(target_arch = "wasm32"))] -fn insert_node_info_to_db(ctx: &MmArc, node_info: &NodeInfo) -> Result<(), String> { - crate::mm2::database::stats_nodes::insert_node_info(ctx, node_info).map_err(|e| e.to_string()) +fn insert_node_info_to_db(ctx: &MmArc, node_info: &NodeInfo, db_id: Option<&str>) -> Result<(), String> { + crate::mm2::database::stats_nodes::insert_node_info(ctx, node_info, db_id).map_err(|e| e.to_string()) } #[cfg(target_arch = "wasm32")] -fn insert_node_version_stat_to_db(_ctx: &MmArc, _node_version_stat: NodeVersionStat) -> Result<(), String> { Ok(()) } +fn insert_node_version_stat_to_db(_ctx: &MmArc, _node_version_stat: NodeVersionStat, _db_id: Option<&str>) -> Result<(), String> { Ok(()) } #[cfg(not(target_arch = "wasm32"))] -fn insert_node_version_stat_to_db(ctx: &MmArc, node_version_stat: NodeVersionStat) -> Result<(), String> { - crate::mm2::database::stats_nodes::insert_node_version_stat(ctx, node_version_stat).map_err(|e| e.to_string()) +fn insert_node_version_stat_to_db(ctx: &MmArc, node_version_stat: NodeVersionStat, db_id: Option<&str>) -> Result<(), String> { + crate::mm2::database::stats_nodes::insert_node_version_stat(ctx, node_version_stat, db_id).map_err(|e| e.to_string()) } #[cfg(target_arch = "wasm32")] fn delete_node_info_from_db(_ctx: &MmArc, _name: String) -> Result<(), String> { Ok(()) } #[cfg(not(target_arch = "wasm32"))] -fn delete_node_info_from_db(ctx: &MmArc, name: String) -> Result<(), String> { - crate::mm2::database::stats_nodes::delete_node_info(ctx, name).map_err(|e| e.to_string()) +fn delete_node_info_from_db(ctx: &MmArc, name: String, db_id: Option<&str>) -> Result<(), String> { + crate::mm2::database::stats_nodes::delete_node_info(ctx, name, db_id).map_err(|e| e.to_string()) } #[cfg(target_arch = "wasm32")] @@ -114,7 +114,8 @@ fn select_peers_addresses_from_db(_ctx: &MmArc) -> Result, #[cfg(not(target_arch = "wasm32"))] fn select_peers_addresses_from_db(ctx: &MmArc) -> Result, String> { - crate::mm2::database::stats_nodes::select_peers_addresses(ctx).map_err(|e| e.to_string()) + let _db_id: Option<&str> = None; // TODO + crate::mm2::database::stats_nodes::select_peers_addresses(ctx, None).map_err(|e| e.to_string()) } #[cfg(target_arch = "wasm32")] @@ -142,7 +143,8 @@ pub async fn add_node_to_version_stat(ctx: MmArc, req: Json) -> NodeVersionResul peer_id: node_info.peer_id, }; - insert_node_info_to_db(&ctx, &node_info_with_ipv4_addr).map_to_mm(NodeVersionError::DatabaseError)?; + let _db_id: Option<&str> = None; // TODO + insert_node_info_to_db(&ctx, &node_info_with_ipv4_addr, None).map_to_mm(NodeVersionError::DatabaseError)?; Ok("success".into()) } @@ -159,7 +161,8 @@ pub async fn remove_node_from_version_stat(_ctx: MmArc, _req: Json) -> NodeVersi pub async fn remove_node_from_version_stat(ctx: MmArc, req: Json) -> NodeVersionResult { let node_name: String = json::from_value(req["name"].clone())?; - delete_node_info_from_db(&ctx, node_name).map_to_mm(NodeVersionError::DatabaseError)?; + let _db_id: Option<&str> = None; // TODO + delete_node_info_from_db(&ctx, node_name, None).map_to_mm(NodeVersionError::DatabaseError)?; Ok("success".into()) } @@ -236,6 +239,7 @@ pub async fn start_version_stat_collection(ctx: MmArc, req: Json) -> NodeVersion let interval: f64 = json::from_value(req["interval"].clone())?; + let peers_addresses = select_peers_addresses_from_db(&ctx).map_to_mm(NodeVersionError::DatabaseError)?; let netid = ctx.conf["netid"].as_u64().unwrap_or(0) as u16; @@ -285,22 +289,23 @@ async fn stat_collection_loop(ctx: MmArc, interval: f64) { StatsCollectionStatus::Updating(i) => { interval = i; *state = StatsCollectionStatus::Running; - }, + } StatsCollectionStatus::Stopping => { *state = StatsCollectionStatus::Stopped; break; - }, + } StatsCollectionStatus::Stopped => *state = StatsCollectionStatus::Running, } } - let peers_names = match select_peers_names(&ctx) { + let db_id: Option<&str> = None; // TODO + let peers_names = match select_peers_names(&ctx, db_id) { Ok(n) => n, Err(e) => { log::error!("Error selecting peers names from db: {}", e); Timer::sleep(10.).await; continue; - }, + } }; let peers: Vec = peers_names.keys().cloned().collect(); @@ -311,14 +316,14 @@ async fn stat_collection_loop(ctx: MmArc, interval: f64) { P2PRequest::NetworkInfo(NetworkInfoRequest::GetMm2Version), peers, ) - .await + .await { Ok(res) => res, Err(e) => { log::error!("Error getting nodes versions from peers: {}", e); Timer::sleep(10.).await; continue; - }, + } }; for (peer_id, response) in get_versions_res { @@ -335,10 +340,11 @@ async fn stat_collection_loop(ctx: MmArc, interval: f64) { timestamp, error: None, }; - if let Err(e) = insert_node_version_stat_to_db(&ctx, node_version_stat) { + let db_id: Option<&str> = None; // TODO + if let Err(e) = insert_node_version_stat_to_db(&ctx, node_version_stat, db_id) { log::error!("Error inserting node {} version {} into db: {}", name, v, e); }; - }, + } PeerDecodedResponse::Err(e) => { log::error!( "Node {} responded to version request with error: {}", @@ -351,10 +357,11 @@ async fn stat_collection_loop(ctx: MmArc, interval: f64) { timestamp, error: Some(e.clone()), }; - if let Err(e) = insert_node_version_stat_to_db(&ctx, node_version_stat) { + let db_id: Option<&str> = None; // TODO + if let Err(e) = insert_node_version_stat_to_db(&ctx, node_version_stat, db_id) { log::error!("Error inserting node {} error into db: {}", name, e); }; - }, + } PeerDecodedResponse::None => { log::debug!("Node {} did not respond to version request", name.clone()); let node_version_stat = NodeVersionStat { @@ -363,10 +370,11 @@ async fn stat_collection_loop(ctx: MmArc, interval: f64) { timestamp, error: None, }; - if let Err(e) = insert_node_version_stat_to_db(&ctx, node_version_stat) { + let db_id: Option<&str> = None; // TODO + if let Err(e) = insert_node_version_stat_to_db(&ctx, node_version_stat, db_id) { log::error!("Error inserting no response for node {} into db: {}", name, e); }; - }, + } } } } diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index fe59805baf..81b0e2ac14 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -95,26 +95,41 @@ use uuid::Uuid; #[cfg(feature = "custom-swap-locktime")] use std::sync::atomic::{AtomicU64, Ordering}; -#[path = "lp_swap/check_balance.rs"] mod check_balance; -#[path = "lp_swap/maker_swap.rs"] mod maker_swap; -#[path = "lp_swap/maker_swap_v2.rs"] pub mod maker_swap_v2; -#[path = "lp_swap/max_maker_vol_rpc.rs"] mod max_maker_vol_rpc; -#[path = "lp_swap/my_swaps_storage.rs"] mod my_swaps_storage; -#[path = "lp_swap/pubkey_banning.rs"] mod pubkey_banning; -#[path = "lp_swap/recreate_swap_data.rs"] mod recreate_swap_data; -#[path = "lp_swap/saved_swap.rs"] mod saved_swap; -#[path = "lp_swap/swap_lock.rs"] mod swap_lock; +#[path = "lp_swap/check_balance.rs"] +mod check_balance; +#[path = "lp_swap/maker_swap.rs"] +mod maker_swap; +#[path = "lp_swap/maker_swap_v2.rs"] +pub mod maker_swap_v2; +#[path = "lp_swap/max_maker_vol_rpc.rs"] +mod max_maker_vol_rpc; +#[path = "lp_swap/my_swaps_storage.rs"] +mod my_swaps_storage; +#[path = "lp_swap/pubkey_banning.rs"] +mod pubkey_banning; +#[path = "lp_swap/recreate_swap_data.rs"] +mod recreate_swap_data; +#[path = "lp_swap/saved_swap.rs"] +mod saved_swap; +#[path = "lp_swap/swap_lock.rs"] +mod swap_lock; #[path = "lp_swap/komodefi.swap_v2.pb.rs"] #[rustfmt::skip] mod swap_v2_pb; -#[path = "lp_swap/swap_v2_common.rs"] mod swap_v2_common; -#[path = "lp_swap/swap_v2_rpcs.rs"] pub(crate) mod swap_v2_rpcs; -#[path = "lp_swap/swap_watcher.rs"] pub(crate) mod swap_watcher; +#[path = "lp_swap/swap_v2_common.rs"] +mod swap_v2_common; +#[path = "lp_swap/swap_v2_rpcs.rs"] +pub(crate) mod swap_v2_rpcs; +#[path = "lp_swap/swap_watcher.rs"] +pub(crate) mod swap_watcher; #[path = "lp_swap/taker_restart.rs"] pub(crate) mod taker_restart; -#[path = "lp_swap/taker_swap.rs"] pub(crate) mod taker_swap; -#[path = "lp_swap/taker_swap_v2.rs"] pub mod taker_swap_v2; -#[path = "lp_swap/trade_preimage.rs"] mod trade_preimage; +#[path = "lp_swap/taker_swap.rs"] +pub(crate) mod taker_swap; +#[path = "lp_swap/taker_swap_v2.rs"] +pub mod taker_swap_v2; +#[path = "lp_swap/trade_preimage.rs"] +mod trade_preimage; #[cfg(target_arch = "wasm32")] #[path = "lp_swap/swap_wasm_db.rs"] @@ -241,7 +256,7 @@ pub fn p2p_keypair_and_peer_id_to_broadcast(ctx: &MmArc, p2p_privkey: Option<&Ke None => { let crypto_ctx = CryptoCtx::from_ctx(ctx).expect("CryptoCtx must be initialized already"); (*crypto_ctx.mm2_internal_key_pair(), None) - }, + } } } @@ -257,7 +272,7 @@ pub fn p2p_private_and_peer_id_to_broadcast(ctx: &MmArc, p2p_privkey: Option<&Ke None => { let crypto_ctx = CryptoCtx::from_ctx(ctx).expect("CryptoCtx must be initialized already"); (crypto_ctx.mm2_internal_privkey_secret().take(), None) - }, + } } } @@ -305,7 +320,7 @@ pub fn broadcast_swap_message(ctx: &MmArc, topic: String, msg: T, Err(e) => { error!("Error encoding and signing swap message: {}", e); return; - }, + } }; broadcast_p2p_msg(ctx, topic, encoded_msg, from); } @@ -322,7 +337,7 @@ pub fn broadcast_p2p_tx_msg(ctx: &MmArc, topic: String, msg: &TransactionEnum, p Err(e) => { error!("Error encoding and signing tx message: {}", e); return; - }, + } }; broadcast_p2p_msg(ctx, topic, encoded_msg, from); } @@ -342,14 +357,14 @@ pub async fn process_swap_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PRequest error!("Error saving the swap {} status: {}", status.data.uuid(), e); } Ok(()) - }, + } Err(swap_status_err) => { let error = format!( "Couldn't deserialize swap msg to either 'SwapMsg': {} or to 'SwapStatus': {}", swap_msg_err, swap_status_err ); MmError::err(P2PRequestError::DecodeError(error)) - }, + } }; #[cfg(target_arch = "wasm32")] @@ -357,7 +372,7 @@ pub async fn process_swap_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PRequest "Couldn't deserialize 'SwapMsg': {}", swap_msg_err ))); - }, + } }; debug!("Processing swap msg {:?} for uuid {}", msg, uuid); @@ -1030,8 +1045,9 @@ pub async fn insert_new_swap_to_db( } #[cfg(not(target_arch = "wasm32"))] -fn add_swap_to_db_index(ctx: &MmArc, swap: &SavedSwap) { - if let Some(conn) = ctx.sqlite_conn_opt() { +fn add_swap_to_db_index(ctx: &MmArc, swap: &SavedSwap, db_id: Option<&str>) { + if let Some(conn) = ctx.sqlite_conn_opt_v2(db_id) { + let conn = conn.lock().unwrap(); crate::mm2::database::stats_swaps::add_swap_to_index(&conn, swap) } } @@ -1039,7 +1055,7 @@ fn add_swap_to_db_index(ctx: &MmArc, swap: &SavedSwap) { #[cfg(not(target_arch = "wasm32"))] async fn save_stats_swap(ctx: &MmArc, swap: &SavedSwap, db_id: Option<&str>) -> Result<(), String> { try_s!(swap.save_to_stats_db(ctx, db_id).await); - add_swap_to_db_index(ctx, swap); + add_swap_to_db_index(ctx, swap, db_id); Ok(()) } @@ -1134,19 +1150,19 @@ pub async fn my_swap_status(ctx: MmArc, req: Json) -> Result>, let res_js = json!({ "result": MySwapStatusResponse::from(status) }); let res = try_s!(json::to_vec(&res_js)); Ok(try_s!(Response::builder().body(res))) - }, + } Some(MAKER_SWAP_V2_TYPE) => { let swap_data = try_s!(get_maker_swap_data_for_rpc(&ctx, &uuid, db_id.as_deref()).await); let res_js = json!({ "result": swap_data }); let res = try_s!(json::to_vec(&res_js)); Ok(try_s!(Response::builder().body(res))) - }, + } Some(TAKER_SWAP_V2_TYPE) => { let swap_data = try_s!(get_taker_swap_data_for_rpc(&ctx, &uuid, db_id.as_deref()).await); let res_js = json!({ "result": swap_data }); let res = try_s!(json::to_vec(&res_js)); Ok(try_s!(Response::builder().body(res))) - }, + } Some(unsupported_type) => ERR!("Got unsupported swap type from DB: {}", unsupported_type), None => ERR!("No swap with uuid {}", uuid), } @@ -1329,7 +1345,7 @@ pub async fn latest_swaps_for_pair( Ok(None) => { error!("No such swap with the uuid '{}'", uuid); continue; - }, + } Err(e) => return Err(MmError::new(LatestSwapsErr::UnableToLoadSavedSwaps(e.into_inner()))), }; swaps.push(swap); @@ -1360,7 +1376,7 @@ pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result { let swap_json = try_s!(json::to_value(MySwapStatusResponse::from(swap))); swaps.push(swap_json) - }, + } Ok(None) => warn!("No such swap with the uuid '{}'", uuid), Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), }, @@ -1368,14 +1384,14 @@ pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result { let swap_json = try_s!(json::to_value(data)); swaps.push(swap_json); - }, + } Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), }, TAKER_SWAP_V2_TYPE => match get_taker_swap_data_for_rpc(&ctx, uuid, Some(&db_result.pubkey)).await { Ok(data) => { let swap_json = try_s!(json::to_value(data)); swaps.push(swap_json); - }, + } Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), }, unknown_type => error!("Swap with the uuid '{}' has unknown type {}", uuid, unknown_type), @@ -1419,11 +1435,11 @@ pub async fn swap_kick_starts(ctx: MmArc) -> Result, String> { Ok(None) => { warn!("Swap {} is indexed, but doesn't exist in DB", uuid); continue; - }, + } Err(e) => { error!("Error {} on getting swap {} data from DB", e, uuid); continue; - }, + } }; info!("Kick starting the swap {}", swap.uuid()); let maker_coin_ticker = match swap.maker_coin_ticker() { @@ -1431,14 +1447,14 @@ pub async fn swap_kick_starts(ctx: MmArc) -> Result, String> { Err(e) => { error!("Error {} getting maker coin of swap: {}", e, swap.uuid()); continue; - }, + } }; let taker_coin_ticker = match swap.taker_coin_ticker() { Ok(t) => t, Err(e) => { error!("Error {} getting taker coin of swap {}", e, swap.uuid()); continue; - }, + } }; coins.insert(maker_coin_ticker.clone()); coins.insert(taker_coin_ticker.clone()); @@ -1456,7 +1472,7 @@ pub async fn swap_kick_starts(ctx: MmArc) -> Result, String> { Err(e) => { error!("Error {} getting DB repr of maker swap {}", e, maker_uuid); continue; - }, + } }; debug!("Got maker swap repr {:?}", maker_swap_repr); @@ -1481,7 +1497,7 @@ pub async fn swap_kick_starts(ctx: MmArc) -> Result, String> { Err(e) => { error!("Error {} getting DB repr of taker swap {}", e, taker_uuid); continue; - }, + } }; debug!("Got taker swap repr {:?}", taker_swap_repr); @@ -1512,11 +1528,11 @@ async fn kickstart_thread_handler(ctx: MmArc, swap: SavedSwap, maker_coin_ticker taker_coin_ticker ); Timer::sleep(5.).await; - }, + } Err(e) => { error!("Error {} on {} find attempt", e, taker_coin_ticker); return; - }, + } }; }; @@ -1530,11 +1546,11 @@ async fn kickstart_thread_handler(ctx: MmArc, swap: SavedSwap, maker_coin_ticker maker_coin_ticker ); Timer::sleep(5.).await; - }, + } Err(e) => { error!("Error {} on {} find attempt", e, maker_coin_ticker); return; - }, + } }; }; match swap { @@ -1547,8 +1563,8 @@ async fn kickstart_thread_handler(ctx: MmArc, swap: SavedSwap, maker_coin_ticker }, ctx, ) - .await; - }, + .await; + } SavedSwap::Taker(saved_swap) => { run_taker_swap( RunTakerSwapInput::KickStart { @@ -1558,8 +1574,8 @@ async fn kickstart_thread_handler(ctx: MmArc, swap: SavedSwap, maker_coin_ticker }, ctx, ) - .await; - }, + .await; + } } } @@ -1608,16 +1624,16 @@ pub async fn import_swaps(ctx: MmArc, req: Json) -> Result>, St LEGACY_SWAP_TYPE, accound_id.as_deref(), ) - .await + .await { error!("Error {} on new swap insertion", e); } } imported.push(swap.uuid().to_owned()); - }, + } Err(e) => { skipped.insert(swap.uuid().to_owned(), ERRL!("{}", e)); - }, + } } } let res = try_s!(json::to_vec(&json!({ @@ -1657,14 +1673,14 @@ pub async fn active_swaps_rpc(ctx: MmArc, req: Json) -> Result> Err(e) => { error!("Error on loading_from_db: {}", e); continue; - }, + } }; map.insert(*uuid, status); - }, + } unsupported_type => { error!("active_swaps_rpc doesn't support swap type {}", unsupported_type); continue; - }, + } } } Some(map) @@ -1728,7 +1744,7 @@ pub fn detect_secret_hash_algo(maker_coin: &MmCoinEnum, taker_coin: &MmCoinEnum) match (maker_coin, taker_coin) { (MmCoinEnum::Tendermint(_) | MmCoinEnum::TendermintToken(_) | MmCoinEnum::LightningCoin(_), _) => { SecretHashAlgo::SHA256 - }, + } // If taker is lightning coin the SHA256 of the secret will be sent as part of the maker signed invoice (_, MmCoinEnum::Tendermint(_) | MmCoinEnum::TendermintToken(_)) => SecretHashAlgo::SHA256, (_, _) => SecretHashAlgo::DHASH160, @@ -1839,19 +1855,19 @@ pub fn process_swap_v2_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PProcessRes match swap_message.inner { Some(swap_message::Inner::MakerNegotiation(maker_negotiation)) => { msg_store.maker_negotiation = Some(maker_negotiation) - }, + } Some(swap_message::Inner::TakerNegotiation(taker_negotiation)) => { msg_store.taker_negotiation = Some(taker_negotiation) - }, + } Some(swap_message::Inner::MakerNegotiated(maker_negotiated)) => { msg_store.maker_negotiated = Some(maker_negotiated) - }, + } Some(swap_message::Inner::TakerFundingInfo(taker_funding)) => msg_store.taker_funding = Some(taker_funding), Some(swap_message::Inner::MakerPaymentInfo(maker_payment)) => msg_store.maker_payment = Some(maker_payment), Some(swap_message::Inner::TakerPaymentInfo(taker_payment)) => msg_store.taker_payment = Some(taker_payment), Some(swap_message::Inner::TakerPaymentSpendPreimage(preimage)) => { msg_store.taker_payment_spend_preimage = Some(preimage) - }, + } None => return MmError::err(P2PProcessError::DecodeError("swap_message.inner is None".into())), } } @@ -1932,7 +1948,7 @@ mod lp_swap_tests { assert_eq!( DexFee::WithBurn { fee_amount: "0.00001".into(), - burn_amount: "0.00000001".into() + burn_amount: "0.00000001".into(), }, actual_fee ); @@ -2339,7 +2355,7 @@ mod lp_swap_tests { &rick_activation_params, maker_key_pair.private().secret, )) - .unwrap(); + .unwrap(); println!("Maker address {}", rick_maker.my_address().unwrap()); @@ -2350,7 +2366,7 @@ mod lp_swap_tests { &morty_activation_params, maker_key_pair.private().secret, )) - .unwrap(); + .unwrap(); let taker_ctx_conf = json!({ "netid": 1234, @@ -2374,7 +2390,7 @@ mod lp_swap_tests { &rick_activation_params, taker_key_pair.private().secret, )) - .unwrap(); + .unwrap(); let morty_taker = block_on(utxo_standard_coin_with_priv_key( &taker_ctx, @@ -2383,7 +2399,7 @@ mod lp_swap_tests { &morty_activation_params, taker_key_pair.private().secret, )) - .unwrap(); + .unwrap(); println!("Taker address {}", rick_taker.my_address().unwrap()); @@ -2416,7 +2432,7 @@ mod lp_swap_tests { maker_swap.fail_at = maker_fail_at; #[cfg(any(test, feature = "run-docker-tests"))] - let fail_at = std::env::var("TAKER_FAIL_AT").map(taker_swap::FailAt::from).ok(); + let fail_at = std::env::var("TAKER_FAIL_AT").map(taker_swap::FailAt::from).ok(); let taker_swap = TakerSwap::new( taker_ctx.clone(), @@ -2432,7 +2448,7 @@ mod lp_swap_tests { lock_duration, None, #[cfg(any(test, feature = "run-docker-tests"))] - fail_at, + fail_at, ); block_on(futures::future::join( @@ -2478,7 +2494,7 @@ mod lp_swap_tests { DexFee::Standard(t) => t, DexFee::WithBurn { .. } => { panic!("Wrong variant returned for MYCOIN from `dex_fee_amount_from_taker_coin`.") - }, + } }; let expected_mycoin_taker_fee = &kmd_taker_fee / &MmNumber::from("0.75"); @@ -2497,7 +2513,7 @@ mod lp_swap_tests { DexFee::Standard(t) => t, DexFee::WithBurn { .. } => { panic!("Wrong variant returned for MYCOIN from `dex_fee_amount_from_taker_coin`.") - }, + } }; let testcoin = coins::TestCoin::default(); @@ -2505,7 +2521,7 @@ mod lp_swap_tests { DexFee::Standard(t) => t, DexFee::WithBurn { .. } => { panic!("Wrong variant returned for TEST coin from `dex_fee_amount_from_taker_coin`.") - }, + } }; assert_eq!(testcoin_taker_fee * MmNumber::from("0.90"), mycoin_taker_fee); diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index d800883dd5..18c46849d6 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -43,7 +43,8 @@ cfg_wasm32!( ); // This is needed to have Debug on messages -#[allow(unused_imports)] use prost::Message; +#[allow(unused_imports)] +use prost::Message; /// Negotiation data representation to be stored in DB. #[derive(Clone, Debug, Deserialize, Serialize)] @@ -180,7 +181,7 @@ impl StateMachineStorage for MakerSwapStorage { insert_new_swap_v2(&ctx, sql_params, db_id.as_deref())?; Ok(()) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -215,15 +216,18 @@ impl StateMachineStorage for MakerSwapStorage { async fn get_repr(&self, id: Self::MachineId) -> Result { let ctx = self.ctx.clone(); let id_str = id.to_string(); + let db_id = self.db_id.clone(); async_blocking(move || { - Ok(ctx.sqlite_connection().query_row( + let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = conn.lock().unwrap(); + Ok(conn.query_row( SELECT_MY_SWAP_V2_BY_UUID, &[(":uuid", &id_str)], MakerSwapDbRepr::from_sql_row, )?) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -395,7 +399,7 @@ pub struct MakerSwapStateMachine - MakerSwapStateMachine +MakerSwapStateMachine { /// Timeout for taker payment's on-chain confirmation. #[inline] @@ -420,7 +424,7 @@ impl StorableStateMachine - for MakerSwapStateMachine +for MakerSwapStateMachine { type Storage = MakerSwapStorage; type Result = (); @@ -460,12 +464,12 @@ impl Result<(RestoredMachine, Box>), Self::RecreateError> { + ) -> Result<(RestoredMachine, Box>), Self::RecreateError> { if repr.events.is_empty() { return MmError::err(SwapRecreateError::ReprEventsEmpty); } - let current_state: Box> = match repr.events.remove(repr.events.len() - 1) + let current_state: Box> = match repr.events.remove(repr.events.len() - 1) { MakerSwapEvent::Initialized { maker_coin_start_block, @@ -617,8 +621,8 @@ impl return MmError::err(SwapRecreateError::SwapAborted), MakerSwapEvent::Completed => return MmError::err(SwapRecreateError::SwapCompleted), MakerSwapEvent::MakerPaymentRefunded { .. } => { - return MmError::err(SwapRecreateError::SwapFinishedWithRefund) - }, + return MmError::err(SwapRecreateError::SwapFinishedWithRefund); + } }; let dex_fee = if repr.dex_fee_burn > MmNumber::default() { @@ -707,7 +711,7 @@ impl { let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.maker_coin.account_db_id().as_deref()) .expect("from_ctx should not fail at this point"); @@ -715,7 +719,7 @@ impl Default for Initialize { } impl InitialState - for Initialize +for Initialize { type StateMachine = MakerSwapStateMachine; } #[async_trait] impl State - for Initialize +for Initialize { type StateMachine = MakerSwapStateMachine; @@ -807,7 +811,7 @@ impl { let reason = AbortReason::FailedToGetMakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let taker_coin_start_block = match state_machine.taker_coin.current_block().compat().await { @@ -815,7 +819,7 @@ impl { let reason = AbortReason::FailedToGetTakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let preimage_value = TradePreimageValue::Exact(state_machine.maker_volume.to_decimal()); @@ -829,7 +833,7 @@ impl { let reason = AbortReason::FailedToGetMakerPaymentFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let taker_payment_spend_trade_fee = match state_machine.taker_coin.get_receiver_trade_fee(stage).compat().await @@ -838,7 +842,7 @@ impl { let reason = AbortReason::FailedToGetTakerPaymentSpendFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let prepared_params = MakerSwapPreparedParams { @@ -855,7 +859,7 @@ impl { impl TransitionFrom> for Initialized {} impl StorableState - for Initialized +for Initialized { type StateMachine = MakerSwapStateMachine; @@ -902,7 +906,7 @@ impl State - for Initialized +for Initialized { type StateMachine = MakerSwapStateMachine; @@ -943,7 +947,7 @@ impl { let reason = AbortReason::DidNotReceiveTakerNegotiation(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; drop(abort_handle); @@ -953,11 +957,11 @@ impl { let reason = AbortReason::TakerAbortedNegotiation(abort.reason); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } None => { let reason = AbortReason::ReceivedInvalidTakerNegotiation; return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let started_at_diff = state_machine.started_at.abs_diff(taker_data.started_at); @@ -984,7 +988,7 @@ impl { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let maker_coin_htlc_pub_from_taker = @@ -993,7 +997,7 @@ impl { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let next_state = WaitingForTakerFunding { @@ -1066,13 +1070,12 @@ struct WaitingForTakerFunding TransitionFrom> - for WaitingForTakerFunding -{ -} +for WaitingForTakerFunding +{} #[async_trait] impl State - for WaitingForTakerFunding +for WaitingForTakerFunding { type StateMachine = MakerSwapStateMachine; @@ -1105,7 +1108,7 @@ impl { let reason = AbortReason::DidNotReceiveTakerFundingInfo(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; drop(abort_handle); @@ -1115,7 +1118,7 @@ impl { let reason = AbortReason::FailedToParseTakerFunding(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let next_state = TakerFundingReceived { maker_coin_start_block: self.maker_coin_start_block, @@ -1129,7 +1132,7 @@ impl StorableState - for WaitingForTakerFunding +for WaitingForTakerFunding { type StateMachine = MakerSwapStateMachine; @@ -1152,13 +1155,12 @@ struct TakerFundingReceived - TransitionFrom> for TakerFundingReceived -{ -} +TransitionFrom> for TakerFundingReceived +{} #[async_trait] impl State - for TakerFundingReceived +for TakerFundingReceived { type StateMachine = MakerSwapStateMachine; @@ -1199,7 +1201,7 @@ impl { let reason = AbortReason::FailedToGenerateFundingSpend(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let args = SendMakerPaymentArgs { @@ -1215,7 +1217,7 @@ impl { let reason = AbortReason::FailedToSendMakerPayment(format!("{:?}", e)); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; info!( "Sent maker payment {} tx {:02x} during swap {}", @@ -1237,7 +1239,7 @@ impl StorableState - for TakerFundingReceived +for TakerFundingReceived { type StateMachine = MakerSwapStateMachine; @@ -1265,14 +1267,13 @@ struct MakerPaymentSentFundingSpendGenerated - TransitionFrom> - for MakerPaymentSentFundingSpendGenerated -{ -} +TransitionFrom> +for MakerPaymentSentFundingSpendGenerated +{} #[async_trait] impl State - for MakerPaymentSentFundingSpendGenerated +for MakerPaymentSentFundingSpendGenerated { type StateMachine = MakerSwapStateMachine; @@ -1329,7 +1330,7 @@ impl { @@ -1342,7 +1343,7 @@ impl { let next_state = MakerPaymentRefundRequired { maker_coin_start_block: self.maker_coin_start_block, @@ -1353,15 +1354,15 @@ impl { Timer::sleep(30.).await; - }, + } Err(e) => match e { SearchForFundingSpendErr::Rpc(e) => { error!("Rpc error {} on search_for_taker_funding_spend", e); Timer::sleep(30.).await; - }, + } // Other error cases are considered irrecoverable, so we should proceed to refund stage // handling using @ binding to trigger a compiler error when new variant is added e @ SearchForFundingSpendErr::InvalidInputTx(_) @@ -1376,7 +1377,7 @@ impl StorableState - for MakerPaymentSentFundingSpendGenerated +for MakerPaymentSentFundingSpendGenerated { type StateMachine = MakerSwapStateMachine; @@ -1434,18 +1435,17 @@ struct MakerPaymentRefundRequired - TransitionFrom> - for MakerPaymentRefundRequired -{ -} +TransitionFrom> +for MakerPaymentRefundRequired +{} + impl - TransitionFrom> for MakerPaymentRefundRequired -{ -} +TransitionFrom> for MakerPaymentRefundRequired +{} #[async_trait] impl State - for MakerPaymentRefundRequired +for MakerPaymentRefundRequired { type StateMachine = MakerSwapStateMachine; @@ -1471,7 +1471,7 @@ impl { let reason = AbortReason::MakerPaymentRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let next_state = MakerPaymentRefunded { @@ -1496,7 +1496,7 @@ impl { error!("Error {} on can_refund_htlc, retrying in 30 seconds", e); Timer::sleep(30.).await; - }, + } } } @@ -1526,7 +1526,7 @@ impl { let reason = AbortReason::MakerPaymentRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let next_state = MakerPaymentRefunded { @@ -1541,7 +1541,7 @@ impl StorableState - for MakerPaymentRefundRequired +for MakerPaymentRefundRequired { type StateMachine = MakerSwapStateMachine; @@ -1568,14 +1568,13 @@ struct TakerPaymentReceived - TransitionFrom> - for TakerPaymentReceived -{ -} +TransitionFrom> +for TakerPaymentReceived +{} #[async_trait] impl State - for TakerPaymentReceived +for TakerPaymentReceived { type StateMachine = MakerSwapStateMachine; @@ -1616,7 +1615,7 @@ impl s, @@ -1659,7 +1658,7 @@ impl StorableState - for TakerPaymentReceived +for TakerPaymentReceived { type StateMachine = MakerSwapStateMachine; @@ -1748,13 +1747,12 @@ struct TakerPaymentSpent - TransitionFrom> for TakerPaymentSpent -{ -} +TransitionFrom> for TakerPaymentSpent +{} #[async_trait] impl State - for TakerPaymentSpent +for TakerPaymentSpent { type StateMachine = MakerSwapStateMachine; @@ -1764,7 +1762,7 @@ impl StorableState - for TakerPaymentSpent +for TakerPaymentSpent { type StateMachine = MakerSwapStateMachine; @@ -1829,7 +1827,7 @@ impl Aborted { #[async_trait] impl LastState - for Aborted +for Aborted { type StateMachine = MakerSwapStateMachine; @@ -1842,7 +1840,7 @@ impl StorableState - for Aborted +for Aborted { type StateMachine = MakerSwapStateMachine; @@ -1854,19 +1852,20 @@ impl TransitionFrom> for Aborted {} + impl TransitionFrom> for Aborted {} + impl - TransitionFrom> for Aborted -{ -} +TransitionFrom> for Aborted +{} + impl - TransitionFrom> for Aborted -{ -} +TransitionFrom> for Aborted +{} + impl - TransitionFrom> for Aborted -{ -} +TransitionFrom> for Aborted +{} struct Completed { maker_coin: PhantomData, @@ -1883,7 +1882,7 @@ impl Completed { } impl StorableState - for Completed +for Completed { type StateMachine = MakerSwapStateMachine; @@ -1892,7 +1891,7 @@ impl LastState - for Completed +for Completed { type StateMachine = MakerSwapStateMachine; @@ -1905,9 +1904,8 @@ impl - TransitionFrom> for Completed -{ -} +TransitionFrom> for Completed +{} struct MakerPaymentRefunded { taker_coin: PhantomData, @@ -1917,7 +1915,7 @@ struct MakerPaymentRefunded { } impl StorableState - for MakerPaymentRefunded +for MakerPaymentRefunded { type StateMachine = MakerSwapStateMachine; @@ -1938,7 +1936,7 @@ impl LastState - for MakerPaymentRefunded +for MakerPaymentRefunded { type StateMachine = MakerSwapStateMachine; @@ -1954,6 +1952,5 @@ impl - TransitionFrom> for MakerPaymentRefunded -{ -} +TransitionFrom> for MakerPaymentRefunded +{} diff --git a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs index 174bc870ce..f55847a581 100644 --- a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs +++ b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs @@ -104,8 +104,10 @@ mod native_impl { paging_options: Option<&PagingOptions>, db_id: &str, ) -> MySwapsResult { + let conn = self.ctx.sqlite_connection_v2(Some(db_id)); + let conn = conn.lock().unwrap(); Ok(select_uuids_by_my_swaps_filter( - &self.ctx.sqlite_connection(), + &conn, filter, paging_options, db_id, @@ -153,17 +155,17 @@ mod wasm_impl { let stringified_error = e.to_string(); match e { // We don't expect that the `String` and `u32` types serialization to fail. - CursorError::ErrorSerializingIndexFieldValue {..} + CursorError::ErrorSerializingIndexFieldValue { .. } // We don't expect that the `String` and `u32` types deserialization to fail. - | CursorError::ErrorDeserializingIndexValue {..} - | CursorError::ErrorOpeningCursor {..} - | CursorError::AdvanceError {..} - | CursorError::InvalidKeyRange {..} - | CursorError::TypeMismatch {..} - | CursorError::IncorrectNumberOfKeysPerIndex {..} + | CursorError::ErrorDeserializingIndexValue { .. } + | CursorError::ErrorOpeningCursor { .. } + | CursorError::AdvanceError { .. } + | CursorError::InvalidKeyRange { .. } + | CursorError::TypeMismatch { .. } + | CursorError::IncorrectNumberOfKeysPerIndex { .. } | CursorError::UnexpectedState(..) - | CursorError::IncorrectUsage {..} => MySwapsError::InternalError(stringified_error), - CursorError::ErrorDeserializingItem {..} => MySwapsError::ErrorDeserializingItem(stringified_error), + | CursorError::IncorrectUsage { .. } => MySwapsError::InternalError(stringified_error), + CursorError::ErrorDeserializingItem { .. } => MySwapsError::ErrorDeserializingItem(stringified_error), } } } @@ -228,7 +230,7 @@ mod wasm_impl { .await? .collect() .await? - }, + } (Some(my_coin), None) => { my_swaps_table .cursor_builder() @@ -238,7 +240,7 @@ mod wasm_impl { .await? .collect() .await? - }, + } (None, Some(other_coin)) => { my_swaps_table .cursor_builder() @@ -248,7 +250,7 @@ mod wasm_impl { .await? .collect() .await? - }, + } (None, None) => { my_swaps_table .cursor_builder() @@ -257,7 +259,7 @@ mod wasm_impl { .await? .collect() .await? - }, + } }; let uuids: BTreeSet = items @@ -277,7 +279,7 @@ mod wasm_impl { skipped: 0, pubkey: db_id.to_string(), }) - }, + } } } } @@ -297,7 +299,7 @@ mod wasm_impl { .position(|ordered_uuid| ordered_uuid.uuid == expected_uuid) .or_mm_err(|| MySwapsError::FromUuidNotFound(expected_uuid))? + 1 - }, + } None => (paging.page_number.get() - 1) * paging.limit, }; @@ -438,13 +440,13 @@ mod wasm_tests { (7, "c52659d7-4e13-41f5-9c1a-30cc2f646033", MAKER_SWAP_V2_TYPE), (8, "af5e0383-97f6-4408-8c03-a8eb8d17e46d", LEGACY_SWAP_TYPE), ] - .iter() - .map(|(started_at, uuid, swap_type)| OrderedUuid { - started_at: *started_at, - uuid: Uuid::parse_str(uuid).unwrap(), - swap_type: *swap_type, - }) - .collect(); + .iter() + .map(|(started_at, uuid, swap_type)| OrderedUuid { + started_at: *started_at, + uuid: Uuid::parse_str(uuid).unwrap(), + swap_type: *swap_type, + }) + .collect(); let paging = PagingOptions { limit: 2, diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs index 8d7abd1f44..074927a889 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs @@ -106,7 +106,11 @@ pub(super) async fn has_db_record_for( let id_str = id.to_string(); let db_id = db_id.map(|e| e.to_string()); - Ok(async_blocking(move || does_swap_exist(&ctx.sqlite_connection(), &id_str, db_id.as_deref())).await?) + Ok(async_blocking(move || { + let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = conn.lock().unwrap(); + does_swap_exist(&conn, &id_str, db_id.as_deref()) + }).await?) } #[cfg(target_arch = "wasm32")] @@ -130,22 +134,26 @@ pub(super) async fn store_swap_event( event: T::Event, db_id: Option<&str>, ) -> MmResult<(), SwapStateMachineError> -where - T::Event: DeserializeOwned + Serialize + Send + 'static, + where + T::Event: DeserializeOwned + Serialize + Send + 'static, { let id_str = id.to_string(); let db_id = db_id.map(|e| e.to_string()); async_blocking(move || { - let events_json = get_swap_events(&ctx.sqlite_connection(), &id_str, db_id.as_deref())?; + let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = conn.lock().unwrap(); + let events_json = get_swap_events(&conn, &id_str, db_id.as_deref())?; let mut events: Vec = serde_json::from_str(&events_json)?; events.push(event); drop_mutability!(events); let serialized_events = serde_json::to_string(&events)?; - update_swap_events(&ctx.sqlite_connection(), &id_str, &serialized_events, db_id.as_deref())?; + let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = conn.lock().unwrap(); + update_swap_events(&conn, &id_str, &serialized_events, db_id.as_deref())?; Ok(()) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -204,10 +212,12 @@ pub(super) async fn get_unfinished_swaps_uuids( ) -> MmResult, SwapStateMachineError> { let db_id = db_id.map(|e| e.to_string()); async_blocking(move || { - select_unfinished_swaps_uuids(&ctx.sqlite_connection(), swap_type, db_id.as_deref()) + let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = conn.lock().unwrap(); + select_unfinished_swaps_uuids(&conn, swap_type, db_id.as_deref()) .map_to_mm(|e| SwapStateMachineError::StorageError(e.to_string())) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -236,13 +246,15 @@ pub(super) async fn mark_swap_as_finished( ) -> MmResult<(), SwapStateMachineError> { let db_id = db_id.map(|e| e.to_string()); async_blocking(move || { + let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = conn.lock().unwrap(); Ok(set_swap_is_finished( - &ctx.sqlite_connection(), + &conn, &id.to_string(), db_id.as_deref(), )?) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -306,7 +318,7 @@ pub(super) async fn acquire_reentrancy_lock_impl(ctx: &MmArc, uuid: Uuid) -> MmR attempts += 1; Timer::sleep(40.).await; } - }, + } } } } @@ -333,7 +345,7 @@ pub(super) trait GetSwapCoins { /// Generic function for upgraded swaps kickstart handling. /// It is implemented only for UtxoStandardCoin/UtxoStandardCoin case temporary. pub(super) async fn swap_kickstart_handler< - T: StorableStateMachine>, + T: StorableStateMachine>, >( ctx: MmArc, swap_repr: ::DbRepr, @@ -356,11 +368,11 @@ pub(super) async fn swap_kickstart_handler< uuid, taker_coin_ticker, ); Timer::sleep(1.).await; - }, + } Err(e) => { error!("Error {} on {} find attempt", e, taker_coin_ticker); return; - }, + } }; }; @@ -375,11 +387,11 @@ pub(super) async fn swap_kickstart_handler< uuid, maker_coin_ticker, ); Timer::sleep(1.).await; - }, + } Err(e) => { error!("Error {} on {} find attempt", e, maker_coin_ticker); return; - }, + } }; }; @@ -391,7 +403,7 @@ pub(super) async fn swap_kickstart_handler< maker_coin_ticker, taker_coin_ticker ); return; - }, + } }; let recreate_context = SwapRecreateCtx { maker_coin, taker_coin }; @@ -401,7 +413,7 @@ pub(super) async fn swap_kickstart_handler< Err(e) => { error!("Error {} on trying to recreate the swap {}", e, uuid); return; - }, + } }; if let Err(e) = state_machine.kickstart(state).await { diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs index dcbd72cc8e..0661de8de4 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs @@ -35,21 +35,24 @@ cfg_wasm32!( ); #[cfg(not(target_arch = "wasm32"))] -pub(super) async fn get_swap_type(ctx: &MmArc, uuid: &Uuid, _db_id: Option<&str>) -> MmResult, SqlError> { +pub(super) async fn get_swap_type(ctx: &MmArc, uuid: &Uuid, db_id: Option<&str>) -> MmResult, SqlError> { let ctx = ctx.clone(); let uuid = uuid.to_string(); + let db_id = db_id.map(|e| e.to_string()); async_blocking(move || { const SELECT_SWAP_TYPE_BY_UUID: &str = "SELECT swap_type FROM my_swaps WHERE uuid = :uuid;"; + let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = conn.lock().unwrap(); let maybe_swap_type = query_single_row( - &ctx.sqlite_connection(), + &conn, SELECT_SWAP_TYPE_BY_UUID, &[(":uuid", uuid.as_str())], |row| row.get(0), )?; Ok(maybe_swap_type) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -172,21 +175,24 @@ pub(super) async fn get_taker_swap_data_for_rpc( async fn get_swap_data_for_rpc_impl( ctx: &MmArc, uuid: &Uuid, - _db_id: Option<&str>, + db_id: Option<&str>, ) -> MmResult>, SqlError> { let ctx = ctx.clone(); let uuid = uuid.to_string(); + let db_id = db_id.map(|e| e.to_string()); async_blocking(move || { + let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = conn.lock().unwrap(); let swap_data = query_single_row( - &ctx.sqlite_connection(), + &conn, SELECT_MY_SWAP_V2_FOR_RPC_BY_UUID, &[(":uuid", uuid.as_str())], MySwapForRpc::from_row, )?; Ok(swap_data) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -313,15 +319,15 @@ async fn get_swap_data_by_uuid_and_type( SavedSwap::Maker(m) => SwapRpcData::MakerV1(m), SavedSwap::Taker(t) => SwapRpcData::TakerV1(t), })) - }, + } MAKER_SWAP_V2_TYPE => { let data = get_maker_swap_data_for_rpc(ctx, &uuid, db_id).await?; Ok(data.map(SwapRpcData::MakerV2)) - }, + } TAKER_SWAP_V2_TYPE => { let data = get_taker_swap_data_for_rpc(ctx, &uuid, db_id).await?; Ok(data.map(SwapRpcData::TakerV2)) - }, + } unsupported => MmError::err(GetSwapDataErr::UnsupportedSwapType(unsupported)), } } @@ -365,7 +371,7 @@ impl HttpStatusCode for MySwapStatusError { MySwapStatusError::NoSwapWithUuid(_) => StatusCode::BAD_REQUEST, MySwapStatusError::DbError(_) | MySwapStatusError::UnsupportedSwapType(_) => { StatusCode::INTERNAL_SERVER_ERROR - }, + } } } } @@ -516,7 +522,7 @@ pub(crate) async fn active_swaps_rpc( match get_swap_data_by_uuid_and_type(&ctx, None, *uuid, *swap_type).await { Ok(Some(data)) => { statuses.insert(*uuid, data); - }, + } Ok(None) => warn!("Swap {} data doesn't exist in DB", uuid), Err(e) => error!("Error {} while trying to get swap {} data", e, uuid), } diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index 44a20af720..3abe896415 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -44,7 +44,8 @@ cfg_wasm32!( ); // This is needed to have Debug on messages -#[allow(unused_imports)] use prost::Message; +#[allow(unused_imports)] +use prost::Message; /// Negotiation data representation to be stored in DB. #[derive(Clone, Debug, Deserialize, Serialize)] @@ -212,7 +213,7 @@ impl StateMachineStorage for TakerSwapStorage { insert_new_swap_v2(&ctx, sql_params, db_id.as_deref())?; Ok(()) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -247,15 +248,18 @@ impl StateMachineStorage for TakerSwapStorage { async fn get_repr(&self, id: Self::MachineId) -> Result { let ctx = self.ctx.clone(); let id_str = id.to_string(); + let db_id = self.db_id.clone(); async_blocking(move || { - Ok(ctx.sqlite_connection().query_row( + let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = conn.lock().unwrap(); + Ok(conn.query_row( SELECT_MY_SWAP_V2_BY_UUID, &[(":uuid", &id_str)], TakerSwapDbRepr::from_sql_row, )?) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -429,7 +433,7 @@ pub struct TakerSwapStateMachine - TakerSwapStateMachine +TakerSwapStateMachine { fn maker_payment_conf_timeout(&self) -> u64 { self.started_at + self.lock_duration / 3 } @@ -450,7 +454,7 @@ impl StorableStateMachine - for TakerSwapStateMachine +for TakerSwapStateMachine { type Storage = TakerSwapStorage; type Result = (); @@ -490,12 +494,12 @@ impl Result<(RestoredMachine, Box>), Self::RecreateError> { + ) -> Result<(RestoredMachine, Box>), Self::RecreateError> { if repr.events.is_empty() { return MmError::err(SwapRecreateError::ReprEventsEmpty); } - let current_state: Box> = match repr.events.remove(repr.events.len() - 1) + let current_state: Box> = match repr.events.remove(repr.events.len() - 1) { TakerSwapEvent::Initialized { maker_coin_start_block, @@ -730,11 +734,11 @@ impl return MmError::err(SwapRecreateError::SwapAborted), TakerSwapEvent::Completed => return MmError::err(SwapRecreateError::SwapCompleted), TakerSwapEvent::TakerFundingRefunded { .. } => { - return MmError::err(SwapRecreateError::SwapFinishedWithRefund) - }, + return MmError::err(SwapRecreateError::SwapFinishedWithRefund); + } TakerSwapEvent::TakerPaymentRefunded { .. } => { - return MmError::err(SwapRecreateError::SwapFinishedWithRefund) - }, + return MmError::err(SwapRecreateError::SwapFinishedWithRefund); + } }; let dex_fee = if repr.dex_fee_burn > MmNumber::default() { @@ -823,7 +827,7 @@ impl { let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.taker_coin.account_db_id().as_deref()) .expect("from_ctx should not fail at this point"); @@ -831,7 +835,7 @@ impl Default for Initialize { } impl InitialState - for Initialize +for Initialize { type StateMachine = TakerSwapStateMachine; } #[async_trait] impl State - for Initialize +for Initialize { type StateMachine = TakerSwapStateMachine; @@ -922,7 +926,7 @@ impl { let reason = AbortReason::FailedToGetMakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let taker_coin_start_block = match state_machine.taker_coin.current_block().compat().await { @@ -930,7 +934,7 @@ impl { let reason = AbortReason::FailedToGetTakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let total_payment_value = @@ -947,7 +951,7 @@ impl { let reason = AbortReason::FailedToGetTakerPaymentFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let maker_payment_spend_fee = match state_machine.maker_coin.get_receiver_trade_fee(stage).compat().await { @@ -955,7 +959,7 @@ impl { let reason = AbortReason::FailedToGetMakerPaymentSpendFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let prepared_params = TakerSwapPreparedParams { @@ -978,7 +982,7 @@ impl { impl TransitionFrom> for Initialized {} impl StorableState - for Initialized +for Initialized { type StateMachine = TakerSwapStateMachine; @@ -1025,7 +1029,7 @@ impl State - for Initialized +for Initialized { type StateMachine = TakerSwapStateMachine; @@ -1042,7 +1046,7 @@ impl { let reason = AbortReason::DidNotReceiveMakerNegotiation(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; debug!("Received maker negotiation message {:?}", maker_negotiation); @@ -1072,7 +1076,7 @@ impl { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let taker_coin_htlc_pub_from_maker = match state_machine @@ -1083,7 +1087,7 @@ impl { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let taker_coin_maker_address = match state_machine @@ -1094,7 +1098,7 @@ impl { let reason = AbortReason::FailedToParseAddress(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let unique_data = state_machine.unique_data(); @@ -1135,7 +1139,7 @@ impl { let reason = AbortReason::DidNotReceiveMakerNegotiated(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; drop(abort_handle); @@ -1219,13 +1223,12 @@ struct Negotiated TransitionFrom> - for Negotiated -{ -} +for Negotiated +{} #[async_trait] impl State - for Negotiated +for Negotiated { type StateMachine = TakerSwapStateMachine; @@ -1245,7 +1248,7 @@ impl { let reason = AbortReason::FailedToSendTakerFunding(format!("{:?}", e)); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; info!( @@ -1266,7 +1269,7 @@ impl StorableState - for Negotiated +for Negotiated { type StateMachine = TakerSwapStateMachine; @@ -1290,7 +1293,7 @@ struct TakerFundingSent State - for TakerFundingSent +for TakerFundingSent { type StateMachine = TakerSwapStateMachine; @@ -1330,7 +1333,7 @@ impl TransitionFrom> - for TakerFundingSent -{ -} +for TakerFundingSent +{} impl StorableState - for TakerFundingSent +for TakerFundingSent { type StateMachine = TakerSwapStateMachine; @@ -1432,13 +1434,12 @@ struct MakerPaymentAndFundingSpendPreimgReceived - TransitionFrom> - for MakerPaymentAndFundingSpendPreimgReceived -{ -} +TransitionFrom> +for MakerPaymentAndFundingSpendPreimgReceived +{} impl StorableState - for MakerPaymentAndFundingSpendPreimgReceived +for MakerPaymentAndFundingSpendPreimgReceived { type StateMachine = TakerSwapStateMachine; @@ -1465,7 +1466,7 @@ impl State - for MakerPaymentAndFundingSpendPreimgReceived +for MakerPaymentAndFundingSpendPreimgReceived { type StateMachine = TakerSwapStateMachine; @@ -1575,7 +1576,7 @@ impl - TransitionFrom> for TakerPaymentSent -{ -} +TransitionFrom> for TakerPaymentSent +{} + impl - TransitionFrom> - for TakerPaymentSent -{ -} +TransitionFrom> +for TakerPaymentSent +{} #[async_trait] impl State - for TakerPaymentSent +for TakerPaymentSent { type StateMachine = TakerSwapStateMachine; @@ -1669,7 +1669,7 @@ impl StorableState - for TakerPaymentSent +for TakerPaymentSent { type StateMachine = TakerSwapStateMachine; @@ -1771,22 +1771,21 @@ struct TakerFundingRefundRequired - TransitionFrom> for TakerFundingRefundRequired -{ -} +TransitionFrom> for TakerFundingRefundRequired +{} + impl - TransitionFrom> - for TakerFundingRefundRequired -{ -} +TransitionFrom> +for TakerFundingRefundRequired +{} + impl - TransitionFrom> for TakerFundingRefundRequired -{ -} +TransitionFrom> for TakerFundingRefundRequired +{} #[async_trait] impl State - for TakerFundingRefundRequired +for TakerFundingRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1815,7 +1814,7 @@ impl { let reason = AbortReason::TakerFundingRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let next_state = TakerFundingRefunded { @@ -1829,7 +1828,7 @@ impl StorableState - for TakerFundingRefundRequired +for TakerFundingRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1861,17 +1860,16 @@ struct TakerPaymentRefundRequired - TransitionFrom> for TakerPaymentRefundRequired -{ -} +TransitionFrom> for TakerPaymentRefundRequired +{} + impl - TransitionFrom> for TakerPaymentRefundRequired -{ -} +TransitionFrom> for TakerPaymentRefundRequired +{} #[async_trait] impl State - for TakerPaymentRefundRequired +for TakerPaymentRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1893,7 +1891,7 @@ impl { error!("Error {} on can_refund_htlc, retrying in 30 seconds", e); Timer::sleep(30.).await; - }, + } } } @@ -1918,7 +1916,7 @@ impl { let reason = AbortReason::TakerPaymentRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let next_state = TakerPaymentRefunded { @@ -1935,7 +1933,7 @@ impl StorableState - for TakerPaymentRefundRequired +for TakerPaymentRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1961,14 +1959,13 @@ struct MakerPaymentConfirmed - TransitionFrom> - for MakerPaymentConfirmed -{ -} +TransitionFrom> +for MakerPaymentConfirmed +{} #[async_trait] impl State - for MakerPaymentConfirmed +for MakerPaymentConfirmed { type StateMachine = TakerSwapStateMachine; @@ -2000,7 +1997,7 @@ impl StorableState - for MakerPaymentConfirmed +for MakerPaymentConfirmed { type StateMachine = TakerSwapStateMachine; @@ -2057,13 +2054,12 @@ struct TakerPaymentSpent - TransitionFrom> for TakerPaymentSpent -{ -} +TransitionFrom> for TakerPaymentSpent +{} #[async_trait] impl State - for TakerPaymentSpent +for TakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2083,7 +2079,7 @@ impl { let reason = AbortReason::CouldNotExtractSecret(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let args = SpendMakerPaymentArgs { @@ -2100,7 +2096,7 @@ impl { let reason = AbortReason::FailedToSpendMakerPayment(format!("{:?}", e)); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; info!( "Spent maker payment {} tx {:02x} during swap {}", @@ -2121,7 +2117,7 @@ impl StorableState - for TakerPaymentSpent +for TakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2156,12 +2152,11 @@ struct MakerPaymentSpent - TransitionFrom> for MakerPaymentSpent -{ -} +TransitionFrom> for MakerPaymentSpent +{} impl StorableState - for MakerPaymentSpent +for MakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2191,7 +2186,7 @@ impl State - for MakerPaymentSpent +for MakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2241,7 +2236,7 @@ impl Aborted { #[async_trait] impl LastState - for Aborted +for Aborted { type StateMachine = TakerSwapStateMachine; @@ -2254,7 +2249,7 @@ impl StorableState - for Aborted +for Aborted { type StateMachine = TakerSwapStateMachine; @@ -2266,23 +2261,24 @@ impl TransitionFrom> for Aborted {} + impl TransitionFrom> for Aborted {} + impl TransitionFrom> - for Aborted -{ -} +for Aborted +{} + impl - TransitionFrom> for Aborted -{ -} +TransitionFrom> for Aborted +{} + impl - TransitionFrom> for Aborted -{ -} +TransitionFrom> for Aborted +{} + impl - TransitionFrom> for Aborted -{ -} +TransitionFrom> for Aborted +{} struct Completed { maker_coin: PhantomData, @@ -2299,7 +2295,7 @@ impl Completed { } impl StorableState - for Completed +for Completed { type StateMachine = TakerSwapStateMachine; @@ -2308,7 +2304,7 @@ impl LastState - for Completed +for Completed { type StateMachine = TakerSwapStateMachine; @@ -2321,9 +2317,8 @@ impl - TransitionFrom> for Completed -{ -} +TransitionFrom> for Completed +{} struct TakerFundingRefunded { maker_coin: PhantomData, @@ -2333,7 +2328,7 @@ struct TakerFundingRefunded StorableState - for TakerFundingRefunded +for TakerFundingRefunded { type StateMachine = TakerSwapStateMachine; @@ -2354,7 +2349,7 @@ impl LastState - for TakerFundingRefunded +for TakerFundingRefunded { type StateMachine = TakerSwapStateMachine; @@ -2370,9 +2365,8 @@ impl - TransitionFrom> for TakerFundingRefunded -{ -} +TransitionFrom> for TakerFundingRefunded +{} struct TakerPaymentRefunded { maker_coin: PhantomData, @@ -2382,7 +2376,7 @@ struct TakerPaymentRefunded StorableState - for TakerPaymentRefunded +for TakerPaymentRefunded { type StateMachine = TakerSwapStateMachine; @@ -2400,7 +2394,7 @@ impl LastState - for TakerPaymentRefunded +for TakerPaymentRefunded { type StateMachine = TakerSwapStateMachine; @@ -2416,6 +2410,5 @@ impl - TransitionFrom> for TakerPaymentRefunded -{ -} +TransitionFrom> for TakerPaymentRefunded +{} From 3480e267e01cb3535ca2523f81d8e0a633f4d433 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sat, 20 Apr 2024 00:55:24 +0100 Subject: [PATCH 055/186] fmt --- .../utxo/utxo_block_header_storage/mod.rs | 6 +- mm2src/mm2_core/src/mm_ctx.rs | 28 +- mm2src/mm2_main/src/database.rs | 19 +- mm2src/mm2_main/src/database/my_orders.rs | 2 +- mm2src/mm2_main/src/database/my_swaps.rs | 2 +- mm2src/mm2_main/src/lp_ordermatch.rs | 270 +++++++++--------- .../src/lp_ordermatch/my_orders_storage.rs | 43 +-- mm2src/mm2_main/src/lp_stats.rs | 34 ++- mm2src/mm2_main/src/lp_swap.rs | 141 ++++----- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 187 ++++++------ .../mm2_main/src/lp_swap/my_swaps_storage.rs | 33 +-- mm2src/mm2_main/src/lp_swap/swap_v2_common.rs | 35 ++- mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs | 23 +- mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 241 +++++++++------- 14 files changed, 537 insertions(+), 527 deletions(-) diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index cb32984153..ffc3635b35 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -1,11 +1,9 @@ -#[cfg(not(target_arch = "wasm32"))] -mod sql_block_header_storage; +#[cfg(not(target_arch = "wasm32"))] mod sql_block_header_storage; #[cfg(not(target_arch = "wasm32"))] pub use sql_block_header_storage::SqliteBlockHeadersStorage; -#[cfg(target_arch = "wasm32")] -mod wasm; +#[cfg(target_arch = "wasm32")] mod wasm; #[cfg(target_arch = "wasm32")] pub use wasm::IDBBlockHeadersStorage; diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 101c530db0..91471d91ae 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -231,7 +231,7 @@ impl MmCtx { rpcport ) })? - } + }, None => 7783, // Default port if `rpcport` does not exist in the config }; if port < 1000 { @@ -246,7 +246,7 @@ impl MmCtx { } else { "127.0.0.1" } - .to_string(); + .to_string(); let ip: IpAddr = try_s!(rpcip.parse()); Ok(SocketAddr::new(ip, port as u16)) } @@ -266,7 +266,7 @@ impl MmCtx { return ERR!("IP address {} must be specified", ip); } Ok(()) - } + }, Ok(ServerName::DnsName(_)) => Ok(()), // NOTE: We need to have this wild card since `ServerName` is a non_exhaustive enum. Ok(_) => ERR!("Only IpAddress and DnsName are allowed in `alt_names`"), @@ -627,7 +627,7 @@ impl MmArc { None => { log::info!("MmCtx was dropped. Stop the loop"); break; - } + }, } } }; @@ -661,7 +661,7 @@ impl MmArc { ve.insert(self.weak()); try_s!(self.ffi_handle.pin(rid)); return Ok(rid); - } + }, } } } @@ -758,8 +758,8 @@ impl MmFutSpawner { impl SpawnFuture for MmFutSpawner { fn spawn(&self, f: F) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn(f) } @@ -767,8 +767,8 @@ impl SpawnFuture for MmFutSpawner { impl SpawnAbortable for MmFutSpawner { fn spawn_with_settings(&self, fut: F, settings: AbortSettings) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn_with_settings(fut, settings) } @@ -782,9 +782,9 @@ pub fn from_ctx( ctx_field: &Mutex>>, constructor: C, ) -> Result, String> - where - C: FnOnce() -> Result, - T: 'static + Send + Sync, +where + C: FnOnce() -> Result, + T: 'static + Send + Sync, { let mut ctx_field = try_s!(ctx_field.lock()); if let Some(ref ctx) = *ctx_field { @@ -877,9 +877,9 @@ pub fn log_sqlite_file_open_attempt(sqlite_file_path: &Path) { match sqlite_file_path.canonicalize() { Ok(absolute_path) => { log::debug!("Trying to open SQLite database file {}", absolute_path.display()); - } + }, Err(_) => { log::debug!("Trying to open SQLite database file {}", sqlite_file_path.display()); - } + }, } } diff --git a/mm2src/mm2_main/src/database.rs b/mm2src/mm2_main/src/database.rs index 19184fa1d4..b02e42fd1e 100644 --- a/mm2src/mm2_main/src/database.rs +++ b/mm2src/mm2_main/src/database.rs @@ -2,12 +2,9 @@ /// #[path = "database/my_orders.rs"] pub mod my_orders; -#[path = "database/my_swaps.rs"] -pub mod my_swaps; -#[path = "database/stats_nodes.rs"] -pub mod stats_nodes; -#[path = "database/stats_swaps.rs"] -pub mod stats_swaps; +#[path = "database/my_swaps.rs"] pub mod my_swaps; +#[path = "database/stats_nodes.rs"] pub mod stats_nodes; +#[path = "database/stats_swaps.rs"] pub mod stats_swaps; use crate::CREATE_MY_SWAPS_TABLE; use coins::find_unique_account_ids_any; @@ -39,11 +36,11 @@ pub async fn init_and_migrate_sql_db(ctx: &MmArc, db_id: Option<&str>) -> SqlRes migrate_sqlite_database(ctx, current_migration).await?; return Ok(()); } - } + }, Err(e) => { debug!("Error '{}' on getting current migration. The database is either empty or corrupted, trying to clean it first", e); clean_db(ctx, db_id); - } + }, }; info!("Trying to initialize the SQLite database"); @@ -59,11 +56,11 @@ fn init_db(ctx: &MmArc, db_id: Option<&str>) -> SqlResult<()> { let conn = conn.lock().unwrap(); run_optimization_pragmas(&conn)?; let init_batch = concat!( - "BEGIN; + "BEGIN; CREATE TABLE IF NOT EXISTS migration (current_migration INTEGER NOT_NULL UNIQUE); INSERT INTO migration (current_migration) VALUES (1);", - CREATE_MY_SWAPS_TABLE!(), - "COMMIT;" + CREATE_MY_SWAPS_TABLE!(), + "COMMIT;" ); conn.execute_batch(init_batch) } diff --git a/mm2src/mm2_main/src/database/my_orders.rs b/mm2src/mm2_main/src/database/my_orders.rs index b8f388fc3a..fe0fb41e8a 100644 --- a/mm2src/mm2_main/src/database/my_orders.rs +++ b/mm2src/mm2_main/src/database/my_orders.rs @@ -255,7 +255,7 @@ pub fn select_orders_by_filter( query_builder.limit(paging.limit); query_builder.offset(offset); offset - } + }, None => 0, }; diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index 5bc998e391..2f8fdaac77 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -242,7 +242,7 @@ pub fn select_uuids_by_my_swaps_filter( query_builder.limit(paging.limit); query_builder.offset(offset); offset - } + }, None => 0, }; diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 6756299ed7..867f0ea301 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -49,8 +49,7 @@ use mm2_number::{BigDecimal, BigRational, MmNumber, MmNumberMultiRepr}; use mm2_rpc::data::legacy::{MatchBy, Mm2RpcResult, OrderConfirmationsSettings, OrderType, RpcOrderbookEntry, SellBuyRequest, SellBuyResponse, TakerAction, TakerRequestForRpc}; use mm2_state_machine::prelude::*; -#[cfg(test)] -use mocktopus::macros::*; +#[cfg(test)] use mocktopus::macros::*; use my_orders_storage::{delete_my_maker_order, delete_my_taker_order, save_maker_order_on_update, save_my_new_maker_order, save_my_new_taker_order, MyActiveOrders, MyOrdersFilteringHistory, MyOrdersHistory, MyOrdersStorage}; @@ -96,24 +95,19 @@ cfg_wasm32! { pub type OrdermatchDbLocked<'a> = DbLocked<'a, OrdermatchDb>; } -#[path = "lp_ordermatch/best_orders.rs"] -mod best_orders; -#[path = "lp_ordermatch/lp_bot.rs"] -mod lp_bot; +#[path = "lp_ordermatch/best_orders.rs"] mod best_orders; +#[path = "lp_ordermatch/lp_bot.rs"] mod lp_bot; pub use lp_bot::{start_simple_market_maker_bot, stop_simple_market_maker_bot, StartSimpleMakerBotRequest, TradingBotEvent}; #[path = "lp_ordermatch/my_orders_storage.rs"] mod my_orders_storage; -#[path = "lp_ordermatch/new_protocol.rs"] -mod new_protocol; +#[path = "lp_ordermatch/new_protocol.rs"] mod new_protocol; #[path = "lp_ordermatch/order_requests_tracker.rs"] mod order_requests_tracker; -#[path = "lp_ordermatch/orderbook_depth.rs"] -mod orderbook_depth; -#[path = "lp_ordermatch/orderbook_rpc.rs"] -mod orderbook_rpc; +#[path = "lp_ordermatch/orderbook_depth.rs"] mod orderbook_depth; +#[path = "lp_ordermatch/orderbook_rpc.rs"] mod orderbook_rpc; #[cfg(all(test, not(target_arch = "wasm32")))] #[path = "ordermatch_tests.rs"] pub mod ordermatch_tests; @@ -159,8 +153,8 @@ pub enum OrderbookP2PHandlerError { P2PRequestError(String), #[display( - fmt = "Couldn't find an order {}, ignoring, it will be synced upon pubkey keep alive", - _0 + fmt = "Couldn't find an order {}, ignoring, it will be synced upon pubkey keep alive", + _0 )] OrderNotFound(Uuid), @@ -278,7 +272,7 @@ fn process_trie_delta( )), None => { orderbook.remove_order_trie_update(uuid); - } + }, } } @@ -317,13 +311,13 @@ async fn process_orders_keep_alive( P2PRequest::Ordermatch(req), propagated_from_peer.clone(), ) - .await? - .ok_or_else(|| { - MmError::new(OrderbookP2PHandlerError::P2PRequestError(format!( - "No response was received from peer {} for SyncPubkeyOrderbookState request!", - propagated_from_peer - ))) - })?; + .await? + .ok_or_else(|| { + MmError::new(OrderbookP2PHandlerError::P2PRequestError(format!( + "No response was received from peer {} for SyncPubkeyOrderbookState request!", + propagated_from_peer + ))) + })?; let mut orderbook = ordermatch_ctx.orderbook.lock(); for (pair, diff) in response.pair_orders_diff { @@ -399,13 +393,13 @@ async fn request_and_fill_orderbook(ctx: &MmArc, base: &str, rel: &str) -> Resul let response = try_s!(request_any_relay::(ctx.clone(), P2PRequest::Ordermatch(request)).await); let (pubkey_orders, protocol_infos, conf_infos) = match response { Some(( - GetOrderbookRes { - pubkey_orders, - protocol_infos, - conf_infos, - }, - _peer_id, - )) => (pubkey_orders, protocol_infos, conf_infos), + GetOrderbookRes { + pubkey_orders, + protocol_infos, + conf_infos, + }, + _peer_id, + )) => (pubkey_orders, protocol_infos, conf_infos), None => return Ok(()), }; @@ -421,7 +415,7 @@ async fn request_and_fill_orderbook(ctx: &MmArc, base: &str, rel: &str) -> Resul Err(e) => { warn!("Error {} decoding pubkey {}", e, pubkey); continue; - } + }, }; if is_my_order(&pubkey, &my_pubsecp, &orderbook.my_p2p_pubkeys) { @@ -487,9 +481,9 @@ fn delete_my_order(ctx: &MmArc, uuid: Uuid, p2p_privkey: Option(ctx: &MmArc, err_construct: F) -> MmResult, E> - where - E: NotMmError, - F: Fn(String) -> E, +where + E: NotMmError, + F: Fn(String) -> E, { match CryptoCtx::from_ctx(ctx).split_mm() { Ok(crypto_ctx) => Ok(Some(CryptoCtx::mm2_internal_pubkey_hex(crypto_ctx.as_ref()))), @@ -562,39 +556,39 @@ pub async fn process_msg(ctx: MmArc, from_peer: String, msg: &[u8], i_am_relay: let order: OrderbookItem = (created_msg, hex::encode(pubkey.to_bytes().as_slice())).into(); insert_or_update_order(&ctx, order); Ok(()) - } + }, new_protocol::OrdermatchMessage::PubkeyKeepAlive(keep_alive) => { process_orders_keep_alive(ctx, from_peer, pubkey.to_hex(), keep_alive, i_am_relay).await - } + }, new_protocol::OrdermatchMessage::TakerRequest(taker_request) => { let msg = TakerRequest::from_new_proto_and_pubkey(taker_request, pubkey.unprefixed().into()); process_taker_request(ctx, pubkey.unprefixed().into(), msg).await; Ok(()) - } + }, new_protocol::OrdermatchMessage::MakerReserved(maker_reserved) => { let msg = MakerReserved::from_new_proto_and_pubkey(maker_reserved, pubkey.unprefixed().into()); // spawn because process_maker_reserved may take significant time to run let spawner = ctx.spawner(); spawner.spawn(process_maker_reserved(ctx, pubkey.unprefixed().into(), msg)); Ok(()) - } + }, new_protocol::OrdermatchMessage::TakerConnect(taker_connect) => { process_taker_connect(ctx, pubkey, taker_connect.into()).await; Ok(()) - } + }, new_protocol::OrdermatchMessage::MakerConnected(maker_connected) => { process_maker_connected(ctx, pubkey, maker_connected.into()).await; Ok(()) - } + }, new_protocol::OrdermatchMessage::MakerOrderCancelled(cancelled_msg) => { delete_order(&ctx, &pubkey.to_hex(), cancelled_msg.uuid.into()); Ok(()) - } + }, new_protocol::OrdermatchMessage::MakerOrderUpdated(updated_msg) => { process_maker_order_updated(ctx, pubkey.to_hex(), updated_msg) - } + }, } - } + }, Err(e) => MmError::err(OrderbookP2PHandlerError::DecodeError(e.to_string())), } } @@ -636,8 +630,8 @@ impl From for TryFromBytesError { trait TryFromBytes { fn try_from_bytes(bytes: Vec) -> Result - where - Self: Sized; + where + Self: Sized; } impl TryFromBytes for String { @@ -671,13 +665,13 @@ pub fn process_peer_request(ctx: MmArc, request: OrdermatchRequest) -> Result { let response = process_sync_pubkey_orderbook_state(ctx, pubkey, trie_roots); response.map(|res| res.map(|r| encode_message(&r).expect("Serialization failed"))) - } + }, OrdermatchRequest::BestOrders { coin, action, volume } => { best_orders::process_best_orders_p2p_request(ctx, coin, action, volume) - } + }, OrdermatchRequest::BestOrdersByNumber { coin, action, number } => { best_orders::process_best_orders_p2p_request_by_number(ctx, coin, action, number) - } + }, OrdermatchRequest::OrderbookDepth { pairs } => orderbook_depth::process_orderbook_depth_p2p_request(ctx, pairs), } } @@ -743,7 +737,7 @@ fn get_pubkeys_orders(orderbook: &Orderbook, base: String, rel: String) -> GetPu uuid ); continue; - } + }, }; let uuids = uuids_by_pubkey.entry(order.pubkey.clone()).or_insert_with(Vec::new); protocol_infos.insert(order.uuid, order.base_rel_proto_info()); @@ -815,12 +809,12 @@ impl DeltaOrFullTrie { .map(|(key, value)| (key, value.map(From::from))) .collect(); DeltaOrFullTrie::Delta(new_map) - } + }, DeltaOrFullTrie::FullTrie(trie) => { trie.iter().for_each(|(key, val)| on_each(key, Some(val))); let new_trie = trie.into_iter().map(|(key, value)| (key, value.into())).collect(); DeltaOrFullTrie::FullTrie(new_trie) - } + }, } } } @@ -851,8 +845,8 @@ fn get_full_trie( db: &MemoryDB, getter: impl Fn(&Key) -> Option, ) -> Result, TrieDiffHistoryError> - where - Key: Clone + Eq + std::hash::Hash + TryFromBytes, +where + Key: Clone + Eq + std::hash::Hash + TryFromBytes, { let trie = TrieDB::::new(db, trie_root)?; let trie: Result, TrieDiffHistoryError> = trie @@ -931,10 +925,10 @@ fn process_sync_pubkey_orderbook_state( let delta_result = match pubkey_state.order_pairs_trie_state_history.get(&pair) { Some(history) => { DeltaOrFullTrie::from_history(history, root, *actual_pair_root, &orderbook.memory_db, order_getter) - } + }, None => { get_full_trie(actual_pair_root, &orderbook.memory_db, order_getter).map(DeltaOrFullTrie::FullTrie) - } + }, }; let delta = try_s!(delta_result); @@ -957,11 +951,11 @@ fn process_sync_pubkey_orderbook_state( if let Some(ref info) = o.conf_settings { conf_infos.insert(o.uuid, info.clone()); } - } + }, None => { protocol_infos.remove(uuid); conf_infos.remove(uuid); - } + }, }); (pair, new_trie) }) @@ -1011,10 +1005,10 @@ pub fn parse_orderbook_pair_from_topic(topic: &str) -> Option<(&str, &str)> { } else { None } - } + }, None => None, } - } + }, None => None, }, _ => None, @@ -1057,7 +1051,7 @@ fn maker_order_created_p2p_notify( Err(e) => { error!("Couldn't encode and sign the 'maker_order_created' message: {}", e); return; - } + }, }; let item: OrderbookItem = (message, hex::encode(key_pair.public_slice())).into(); insert_or_update_my_order(&ctx, item, order); @@ -1088,7 +1082,7 @@ fn maker_order_updated_p2p_notify( Err(e) => { error!("Couldn't encode and sign the 'maker_order_updated' message: {}", e); return; - } + }, }; process_my_maker_order_updated(&ctx, &message); broadcast_p2p_msg(&ctx, topic, encoded_msg, peer_id); @@ -1135,7 +1129,7 @@ impl BalanceTradeFeeUpdatedHandler for BalanceUpdateOrdermatchHandler { Err(e) => { log::warn!("Couldn't handle the 'balance_updated' event: {}", e); return; - } + }, }; let ordermatch_ctx = OrdermatchContext::from_ctx(&ctx).unwrap(); @@ -1158,9 +1152,9 @@ impl BalanceTradeFeeUpdatedHandler for BalanceUpdateOrdermatchHandler { order.clone(), MakerOrderCancellationReason::InsufficientBalance, ) - .compat() - .await - .ok(); + .compat() + .await + .ok(); continue; } } @@ -1605,12 +1599,12 @@ impl TakerOrder { if !uuids.contains(&reserved.maker_order_uuid) { return MatchReservedResult::NotMatched; } - } + }, MatchBy::Pubkeys(pubkeys) => { if !pubkeys.contains(&reserved.sender_pubkey) { return MatchReservedResult::NotMatched; } - } + }, } let my_base_amount = self.request.get_base_amount(); @@ -1628,18 +1622,18 @@ impl TakerOrder { } else { MatchReservedResult::NotMatched } - } + }, TakerAction::Sell => { let match_ticker = (self.request.base == reserved.rel || self.base_orderbook_ticker.as_ref() == Some(&reserved.rel)) && (self.request.rel == reserved.base - || self.rel_orderbook_ticker.as_ref() == Some(&reserved.base)); + || self.rel_orderbook_ticker.as_ref() == Some(&reserved.base)); if match_ticker && my_base_amount == other_rel_amount && my_rel_amount <= other_base_amount { MatchReservedResult::Matched } else { MatchReservedResult::NotMatched } - } + }, } } @@ -2047,7 +2041,7 @@ impl MakerOrder { } else { OrderMatchResult::NotMatched } - } + }, TakerAction::Sell => { let ticker_match = (self.base == taker.rel || self.base_orderbook_ticker.as_ref() == Some(&taker.rel)) && (self.rel == taker.base || self.rel_orderbook_ticker.as_ref() == Some(&taker.base)); @@ -2066,7 +2060,7 @@ impl MakerOrder { } else { OrderMatchResult::NotMatched } - } + }, } } @@ -2147,7 +2141,7 @@ impl From for MakerOrder { rel_orderbook_ticker: taker_order.base_orderbook_ticker, p2p_privkey: taker_order.p2p_privkey, } - } + }, } } } @@ -2345,7 +2339,7 @@ fn broadcast_ordermatch_message( Err(e) => { error!("Failed to encode and sign ordermatch message: {}", e); return; - } + }, }; broadcast_p2p_msg(ctx, topic, encoded_msg, peer_id); } @@ -2393,10 +2387,10 @@ impl TrieDiffHistory { while let Some(next_diff) = self.inner.remove(diff.next_root) { diff = next_diff; } - } + }, None => { self.inner.insert(insert_at, diff); - } + }, }; } @@ -2457,7 +2451,7 @@ fn pubkey_state_mut<'a>( RawEntryMut::Vacant(e) => { let state = OrderbookPubkeyState::with_history_timeout(Duration::new(TRIE_STATE_HISTORY_TIMEOUT, 0)); e.insert(from_pubkey.to_string(), state).1 - } + }, } } @@ -2549,7 +2543,7 @@ impl Orderbook { Err(e) => { error!("Error getting {} trie with root {:?}", e, prev_root); return; - } + }, }; let order_bytes = order.trie_state_bytes(); if let Err(e) = pair_trie.insert(order.uuid.as_bytes(), &order_bytes) { @@ -2658,7 +2652,7 @@ impl Orderbook { Err(_) => { error!("Failed to get existing trie with root {:?}", pair_state); return Some(order); - } + }, }; if pubkey_state.order_pairs_trie_state_history.get(&alb_ordered).is_some() { @@ -2908,11 +2902,11 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO Ok(None) => { error!("Coin {} is not found/enabled", maker_order.rel); return; - } + }, Err(e) => { error!("!lp_coinfind({}): {}", maker_order.rel, e); return; - } + }, }; let maker_coin = match lp_coinfind(&ctx, &maker_order.base).await { @@ -2920,11 +2914,11 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO Ok(None) => { error!("Coin {} is not found/enabled", maker_order.base); return; - } + }, Err(e) => { error!("!lp_coinfind({}): {}", maker_order.base, e); return; - } + }, }; let alice = bits256::from(maker_match.request.sender_pubkey.0); let maker_amount = maker_match.reserved.get_base_amount().clone(); @@ -2954,7 +2948,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO my_conf_settings, other_conf_settings, } - } + }, None => AtomicLocktimeVersion::V1, }; let lock_time = lp_atomic_locktime( @@ -2978,7 +2972,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO Err(e) => { error!("Error {} on secret generation", e); return; - } + }, }; let account_db_id = maker_coin.account_db_id(); @@ -3016,7 +3010,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO .run(Box::new(maker_swap_v2::Initialize::default())) .await .error_log(); - } + }, _ => todo!("implement fallback to the old protocol here"), } } else { @@ -3029,7 +3023,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO LEGACY_SWAP_TYPE, account_db_id.as_deref(), ) - .await + .await { error!("Error {} on new swap insertion", e); } @@ -3069,11 +3063,11 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat Ok(None) => { error!("Coin {} is not found/enabled", taker_coin_ticker); return; - } + }, Err(e) => { error!("!lp_coinfind({}): {}", taker_coin_ticker, e); return; - } + }, }; let maker_coin_ticker = taker_order.maker_coin_ticker(); @@ -3082,11 +3076,11 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat Ok(None) => { error!("Coin {} is not found/enabled", maker_coin_ticker); return; - } + }, Err(e) => { error!("!lp_coinfind({}): {}", maker_coin_ticker, e); return; - } + }, }; // lp_connected_alice is called only from process_maker_connected, which returns if CryptoCtx is not initialized @@ -3116,7 +3110,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat my_conf_settings, other_conf_settings, } - } + }, None => AtomicLocktimeVersion::V1, }; let locktime = lp_atomic_locktime( @@ -3141,7 +3135,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat Err(e) => { error!("Error {} on secret generation", e); return; - } + }, }; let secret_hash_algo = detect_secret_hash_algo(&maker_coin, &taker_coin); match (maker_coin, taker_coin) { @@ -3177,12 +3171,12 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat .run(Box::new(taker_swap_v2::Initialize::default())) .await .error_log(); - } + }, _ => todo!("implement fallback to the old protocol here"), } } else { #[cfg(any(test, feature = "run-docker-tests"))] - let fail_at = std::env::var("TAKER_FAIL_AT").map(FailAt::from).ok(); + let fail_at = std::env::var("TAKER_FAIL_AT").map(FailAt::from).ok(); if let Err(e) = insert_new_swap_to_db( ctx.clone(), @@ -3193,7 +3187,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat LEGACY_SWAP_TYPE, account_db_id.as_deref(), ) - .await + .await { error!("Error {} on new swap insertion", e); } @@ -3212,7 +3206,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat locktime, taker_order.p2p_privkey.map(SerializableSecp256k1Keypair::into_inner), #[cfg(any(test, feature = "run-docker-tests"))] - fail_at, + fail_at, ); run_taker_swap(RunTakerSwapInput::StartNew(taker_swap), ctx).await } @@ -3293,7 +3287,7 @@ pub async fn lp_ordermatch_loop(ctx: MmArc) { log::info!("Error {} on balance check to kickstart order {}, cancelling", e, uuid); to_cancel.push(uuid); continue; - } + }, }; let max_vol = match calc_max_maker_vol(&ctx, &base, ¤t_balance, FeeApproxStage::OrderIssue).await { @@ -3302,7 +3296,7 @@ pub async fn lp_ordermatch_loop(ctx: MmArc) { log::info!("Error {} on balance check to kickstart order {}, cancelling", e, uuid); to_cancel.push(uuid); continue; - } + }, }; if max_vol < order.available_amount() { order.max_base_vol = order.reserved_amount() + max_vol; @@ -3338,9 +3332,9 @@ pub async fn lp_ordermatch_loop(ctx: MmArc) { order.clone(), MakerOrderCancellationReason::InsufficientBalance, ) - .compat() - .await - .ok(); + .compat() + .await + .ok(); } } } @@ -3558,11 +3552,11 @@ async fn process_maker_reserved(ctx: MmArc, from_pubkey: H256Json, reserved_msg: if (my_order.match_reserved(&reserved_msg) == MatchReservedResult::Matched && my_order.matches.is_empty()) && base_coin.is_coin_protocol_supported(&reserved_msg.base_protocol_info, None, lock_time, false) && rel_coin.is_coin_protocol_supported( - &reserved_msg.rel_protocol_info, - Some(reserved_msg.rel_amount.clone()), - lock_time, - false, - ) + &reserved_msg.rel_protocol_info, + Some(reserved_msg.rel_amount.clone()), + lock_time, + false, + ) { let connect = TakerConnect { sender_pubkey: H256Json::from(our_public_id.bytes), @@ -3620,7 +3614,7 @@ async fn process_maker_connected(ctx: MmArc, from_pubkey: PublicKey, connected: connected.maker_order_uuid ); return; - } + }, }; if order_match.reserved.sender_pubkey != unprefixed_from.into() { @@ -3688,21 +3682,21 @@ async fn process_taker_request(ctx: MmArc, from_pubkey: H256Json, taker_request: atomic_locktime_v, ) as f64 * rel_coin.maker_locktime_multiplier()) - .ceil() as u64; + .ceil() as u64; if !order.matches.contains_key(&taker_request.uuid) && base_coin.is_coin_protocol_supported( - taker_request.base_protocol_info_for_maker(), - Some(base_amount.clone()), - maker_lock_duration, - true, - ) + taker_request.base_protocol_info_for_maker(), + Some(base_amount.clone()), + maker_lock_duration, + true, + ) && rel_coin.is_coin_protocol_supported( - taker_request.rel_protocol_info_for_maker(), - None, - maker_lock_duration, - true, - ) + taker_request.rel_protocol_info_for_maker(), + None, + maker_lock_duration, + true, + ) { let reserved = MakerReserved { dest_pub_key: taker_request.sender_pubkey, @@ -3780,7 +3774,7 @@ async fn process_taker_connect(ctx: MmArc, sender_pubkey: PublicKey, connect_msg connect_msg.taker_order_uuid ); return; - } + }, }; if order_match.request.sender_pubkey != sender_unprefixed.into() { log::warn!("Connect message sender pubkey != request message sender pubkey"); @@ -4815,7 +4809,7 @@ pub async fn update_maker_order(ctx: &MmArc, req: MakerOrderUpdateReq) -> Result try_s!(validate_price(new_price.clone())); update_msg.with_new_price(new_price.clone().into()); new_price - } + }, None => order_before_update.price.clone(), }; @@ -5089,7 +5083,7 @@ pub async fn orders_history_by_filter(ctx: MmArc, req: Json) -> Result Result (), } @@ -5243,7 +5237,7 @@ pub async fn cancel_order_rpc(ctx: MmArc, req: Json) -> Result> return Response::builder() .body(json::to_vec(&res).expect("Serialization failed")) .map_err(|e| ERRL!("{}", e)); - } + }, // error is returned Entry::Vacant(_) => (), } @@ -5523,7 +5517,7 @@ pub async fn cancel_orders_by(ctx: &MmArc, cancel_by: CancelBy) -> Result<(Vec { let mut to_remove = Vec::new(); for (uuid, order) in maker_orders.iter() { @@ -5543,7 +5537,7 @@ pub async fn cancel_orders_by(ctx: &MmArc, cancel_by: CancelBy) -> Result<(Vec { let mut to_remove = Vec::new(); for (uuid, order) in maker_orders.iter() { @@ -5563,7 +5557,7 @@ pub async fn cancel_orders_by(ctx: &MmArc, cancel_by: CancelBy) -> Result<(Vec match e.get() { OrderbookRequestingState::Requested => { // We are subscribed to the topic and the orderbook was requested already true - } + }, OrderbookRequestingState::NotRequested { subscribed_at } => { // We are subscribed to the topic. Also we didn't request the orderbook, // True if enough time has passed for the orderbook to fill by OrdermatchRequest::SyncPubkeyOrderbookState. *subscribed_at + ORDERBOOK_REQUESTING_TIMEOUT < current_timestamp - } + }, }, } }; @@ -5707,7 +5701,7 @@ fn choose_maker_confs_and_notas( maker_settings.rel_confs, maker_settings.rel_nota, ) - } + }, TakerAction::Buy => { let maker_coin_confs = if taker_settings.base_confs < maker_settings.base_confs { taker_settings.base_confs @@ -5725,7 +5719,7 @@ fn choose_maker_confs_and_notas( maker_settings.rel_confs, maker_settings.rel_nota, ) - } + }, }, None => ( maker_settings.base_confs, @@ -5825,12 +5819,12 @@ fn orderbook_address( coins::eth::addr_from_pubkey_str(pubkey) .map(OrderbookAddress::Transparent) .map_to_mm(OrderbookAddrErr::AddrFromPubkeyError) - } + }, CoinProtocol::UTXO | CoinProtocol::QTUM | CoinProtocol::QRC20 { .. } | CoinProtocol::BCH { .. } => { coins::utxo::address_by_conf_and_pubkey_str(coin, conf, pubkey, addr_format) .map(OrderbookAddress::Transparent) .map_to_mm(OrderbookAddrErr::AddrFromPubkeyError) - } + }, CoinProtocol::SLPTOKEN { platform, .. } => { let platform_conf = coin_conf(ctx, &platform); if platform_conf.is_null() { @@ -5844,12 +5838,12 @@ fn orderbook_address( .mm_err(|e| OrderbookAddrErr::AddrFromPubkeyError(e.to_string())), _ => MmError::err(OrderbookAddrErr::InvalidPlatformCoinProtocol(platform)), } - } + }, CoinProtocol::TENDERMINT(protocol) => Ok(coins::tendermint::account_id_from_pubkey_hex( &protocol.account_prefix, pubkey, ) - .map(|id| OrderbookAddress::Transparent(id.to_string()))?), + .map(|id| OrderbookAddress::Transparent(id.to_string()))?), CoinProtocol::TENDERMINTTOKEN(proto) => { let platform_conf = coin_conf(ctx, &proto.platform); if platform_conf.is_null() { @@ -5862,17 +5856,17 @@ fn orderbook_address( &platform.account_prefix, pubkey, ) - .map(|id| OrderbookAddress::Transparent(id.to_string()))?), + .map(|id| OrderbookAddress::Transparent(id.to_string()))?), _ => MmError::err(OrderbookAddrErr::InvalidPlatformCoinProtocol(format!( "Platform protocol {:?} is not TENDERMINT", platform_protocol ))), } - } + }, #[cfg(all(feature = "enable-solana", not(target_arch = "wasm32")))] CoinProtocol::SOLANA | CoinProtocol::SPLTOKEN { .. } => { MmError::err(OrderbookAddrErr::CoinIsNotSupported(coin.to_owned())) - } + }, CoinProtocol::ZHTLC { .. } => Ok(OrderbookAddress::Shielded), #[cfg(not(target_arch = "wasm32"))] // Todo: Shielded address is used for lightning for now, the lightning node public key can be used for the orderbook entry pubkey diff --git a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs index e568d7f99c..8d159472ed 100644 --- a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs +++ b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs @@ -7,8 +7,7 @@ use derive_more::Display; use futures::{FutureExt, TryFutureExt}; use mm2_core::mm_ctx::MmArc; use mm2_err_handle::prelude::*; -#[cfg(test)] -use mocktopus::macros::*; +#[cfg(test)] use mocktopus::macros::*; use uuid::Uuid; pub type MyOrdersResult = Result>; @@ -51,7 +50,7 @@ pub async fn save_my_new_maker_order(ctx: MmArc, order: &MakerOrder) -> MyOrders } pub async fn save_my_new_taker_order(ctx: MmArc, order: &TakerOrder) -> MyOrdersResult<()> { -// TODO db_id + // TODO db_id let storage = MyOrdersStorage::new(ctx, None); storage .save_new_active_taker_order(order) @@ -97,7 +96,7 @@ pub fn delete_my_taker_order(ctx: MmArc, order: TakerOrder, reason: TakerOrderCa .await .error_log_with_msg("!save_order_in_history"); } - } + }, } if save_in_history { @@ -228,7 +227,7 @@ mod native_impl { FsJsonError::Serializing(serializing) => MyOrdersError::ErrorSerializing(serializing.to_string()), FsJsonError::Deserializing(deserializing) => { MyOrdersError::ErrorDeserializing(deserializing.to_string()) - } + }, } } } @@ -240,7 +239,12 @@ mod native_impl { } impl MyOrdersStorage { - pub fn new(ctx: MmArc, db_id: Option<&str>) -> MyOrdersStorage { MyOrdersStorage { ctx, db_id: db_id.map(|e| e.to_string()) } } + pub fn new(ctx: MmArc, db_id: Option<&str>) -> MyOrdersStorage { + MyOrdersStorage { + ctx, + db_id: db_id.map(|e| e.to_string()), + } + } } #[async_trait] @@ -331,12 +335,12 @@ mod native_impl { async fn select_order_status(&self, uuid: Uuid) -> MyOrdersResult { let conn = self.ctx.sqlite_connection_v2(self.db_id.as_deref()); let conn = conn.lock().unwrap(); - select_status_by_uuid(&conn, &uuid) - .map_to_mm(|e| MyOrdersError::ErrorLoading(e.to_string())) + select_status_by_uuid(&conn, &uuid).map_to_mm(|e| MyOrdersError::ErrorLoading(e.to_string())) } async fn save_maker_order_in_filtering_history(&self, order: &MakerOrder) -> MyOrdersResult<()> { - insert_maker_order(&self.ctx, order.uuid, order, self.db_id.as_deref()).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) + insert_maker_order(&self.ctx, order.uuid, order, self.db_id.as_deref()) + .map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) } async fn save_taker_order_in_filtering_history(&self, order: &TakerOrder) -> MyOrdersResult<()> { @@ -345,15 +349,18 @@ mod native_impl { } async fn update_maker_order_in_filtering_history(&self, order: &MakerOrder) -> MyOrdersResult<()> { - update_maker_order(&self.ctx, order.uuid, order, self.db_id.as_deref()).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) + update_maker_order(&self.ctx, order.uuid, order, self.db_id.as_deref()) + .map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) } async fn update_order_status_in_filtering_history(&self, uuid: Uuid, status: String) -> MyOrdersResult<()> { - update_order_status(&self.ctx, uuid, status, self.db_id.as_deref()).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) + update_order_status(&self.ctx, uuid, status, self.db_id.as_deref()) + .map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) } async fn update_was_taker_in_filtering_history(&self, uuid: Uuid) -> MyOrdersResult<()> { - update_was_taker(&self.ctx, uuid, self.db_id.as_deref()).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) + update_was_taker(&self.ctx, uuid, self.db_id.as_deref()) + .map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) } } } @@ -797,9 +804,9 @@ mod tests { maker1.clone(), MakerOrderCancellationReason::InsufficientBalance, ) - .compat() - .await - .unwrap(); + .compat() + .await + .unwrap(); let actual_active_maker_orders = storage .load_active_taker_orders() @@ -975,9 +982,9 @@ mod tests { maker_order_to_filtering_history_item(&maker2, "Updated".to_owned(), false).unwrap(), taker_order_to_filtering_history_item(&taker1, "MyCustomStatus".to_owned()).unwrap(), ] - .into_iter() - .sorted_by(|x, y| x.uuid.cmp(&y.uuid)) - .collect(); + .into_iter() + .sorted_by(|x, y| x.uuid.cmp(&y.uuid)) + .collect(); assert_eq!(actual_items, expected_items); diff --git a/mm2src/mm2_main/src/lp_stats.rs b/mm2src/mm2_main/src/lp_stats.rs index 44cfb0231d..ceca75b4df 100644 --- a/mm2src/mm2_main/src/lp_stats.rs +++ b/mm2src/mm2_main/src/lp_stats.rs @@ -94,11 +94,22 @@ fn insert_node_info_to_db(ctx: &MmArc, node_info: &NodeInfo, db_id: Option<&str> } #[cfg(target_arch = "wasm32")] -fn insert_node_version_stat_to_db(_ctx: &MmArc, _node_version_stat: NodeVersionStat, _db_id: Option<&str>) -> Result<(), String> { Ok(()) } +fn insert_node_version_stat_to_db( + _ctx: &MmArc, + _node_version_stat: NodeVersionStat, + _db_id: Option<&str>, +) -> Result<(), String> { + Ok(()) +} #[cfg(not(target_arch = "wasm32"))] -fn insert_node_version_stat_to_db(ctx: &MmArc, node_version_stat: NodeVersionStat, db_id: Option<&str>) -> Result<(), String> { - crate::mm2::database::stats_nodes::insert_node_version_stat(ctx, node_version_stat, db_id).map_err(|e| e.to_string()) +fn insert_node_version_stat_to_db( + ctx: &MmArc, + node_version_stat: NodeVersionStat, + db_id: Option<&str>, +) -> Result<(), String> { + crate::mm2::database::stats_nodes::insert_node_version_stat(ctx, node_version_stat, db_id) + .map_err(|e| e.to_string()) } #[cfg(target_arch = "wasm32")] @@ -239,7 +250,6 @@ pub async fn start_version_stat_collection(ctx: MmArc, req: Json) -> NodeVersion let interval: f64 = json::from_value(req["interval"].clone())?; - let peers_addresses = select_peers_addresses_from_db(&ctx).map_to_mm(NodeVersionError::DatabaseError)?; let netid = ctx.conf["netid"].as_u64().unwrap_or(0) as u16; @@ -289,11 +299,11 @@ async fn stat_collection_loop(ctx: MmArc, interval: f64) { StatsCollectionStatus::Updating(i) => { interval = i; *state = StatsCollectionStatus::Running; - } + }, StatsCollectionStatus::Stopping => { *state = StatsCollectionStatus::Stopped; break; - } + }, StatsCollectionStatus::Stopped => *state = StatsCollectionStatus::Running, } } @@ -305,7 +315,7 @@ async fn stat_collection_loop(ctx: MmArc, interval: f64) { log::error!("Error selecting peers names from db: {}", e); Timer::sleep(10.).await; continue; - } + }, }; let peers: Vec = peers_names.keys().cloned().collect(); @@ -316,14 +326,14 @@ async fn stat_collection_loop(ctx: MmArc, interval: f64) { P2PRequest::NetworkInfo(NetworkInfoRequest::GetMm2Version), peers, ) - .await + .await { Ok(res) => res, Err(e) => { log::error!("Error getting nodes versions from peers: {}", e); Timer::sleep(10.).await; continue; - } + }, }; for (peer_id, response) in get_versions_res { @@ -344,7 +354,7 @@ async fn stat_collection_loop(ctx: MmArc, interval: f64) { if let Err(e) = insert_node_version_stat_to_db(&ctx, node_version_stat, db_id) { log::error!("Error inserting node {} version {} into db: {}", name, v, e); }; - } + }, PeerDecodedResponse::Err(e) => { log::error!( "Node {} responded to version request with error: {}", @@ -361,7 +371,7 @@ async fn stat_collection_loop(ctx: MmArc, interval: f64) { if let Err(e) = insert_node_version_stat_to_db(&ctx, node_version_stat, db_id) { log::error!("Error inserting node {} error into db: {}", name, e); }; - } + }, PeerDecodedResponse::None => { log::debug!("Node {} did not respond to version request", name.clone()); let node_version_stat = NodeVersionStat { @@ -374,7 +384,7 @@ async fn stat_collection_loop(ctx: MmArc, interval: f64) { if let Err(e) = insert_node_version_stat_to_db(&ctx, node_version_stat, db_id) { log::error!("Error inserting no response for node {} into db: {}", name, e); }; - } + }, } } } diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 81b0e2ac14..8d7f5c76ed 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -95,41 +95,26 @@ use uuid::Uuid; #[cfg(feature = "custom-swap-locktime")] use std::sync::atomic::{AtomicU64, Ordering}; -#[path = "lp_swap/check_balance.rs"] -mod check_balance; -#[path = "lp_swap/maker_swap.rs"] -mod maker_swap; -#[path = "lp_swap/maker_swap_v2.rs"] -pub mod maker_swap_v2; -#[path = "lp_swap/max_maker_vol_rpc.rs"] -mod max_maker_vol_rpc; -#[path = "lp_swap/my_swaps_storage.rs"] -mod my_swaps_storage; -#[path = "lp_swap/pubkey_banning.rs"] -mod pubkey_banning; -#[path = "lp_swap/recreate_swap_data.rs"] -mod recreate_swap_data; -#[path = "lp_swap/saved_swap.rs"] -mod saved_swap; -#[path = "lp_swap/swap_lock.rs"] -mod swap_lock; +#[path = "lp_swap/check_balance.rs"] mod check_balance; +#[path = "lp_swap/maker_swap.rs"] mod maker_swap; +#[path = "lp_swap/maker_swap_v2.rs"] pub mod maker_swap_v2; +#[path = "lp_swap/max_maker_vol_rpc.rs"] mod max_maker_vol_rpc; +#[path = "lp_swap/my_swaps_storage.rs"] mod my_swaps_storage; +#[path = "lp_swap/pubkey_banning.rs"] mod pubkey_banning; +#[path = "lp_swap/recreate_swap_data.rs"] mod recreate_swap_data; +#[path = "lp_swap/saved_swap.rs"] mod saved_swap; +#[path = "lp_swap/swap_lock.rs"] mod swap_lock; #[path = "lp_swap/komodefi.swap_v2.pb.rs"] #[rustfmt::skip] mod swap_v2_pb; -#[path = "lp_swap/swap_v2_common.rs"] -mod swap_v2_common; -#[path = "lp_swap/swap_v2_rpcs.rs"] -pub(crate) mod swap_v2_rpcs; -#[path = "lp_swap/swap_watcher.rs"] -pub(crate) mod swap_watcher; +#[path = "lp_swap/swap_v2_common.rs"] mod swap_v2_common; +#[path = "lp_swap/swap_v2_rpcs.rs"] pub(crate) mod swap_v2_rpcs; +#[path = "lp_swap/swap_watcher.rs"] pub(crate) mod swap_watcher; #[path = "lp_swap/taker_restart.rs"] pub(crate) mod taker_restart; -#[path = "lp_swap/taker_swap.rs"] -pub(crate) mod taker_swap; -#[path = "lp_swap/taker_swap_v2.rs"] -pub mod taker_swap_v2; -#[path = "lp_swap/trade_preimage.rs"] -mod trade_preimage; +#[path = "lp_swap/taker_swap.rs"] pub(crate) mod taker_swap; +#[path = "lp_swap/taker_swap_v2.rs"] pub mod taker_swap_v2; +#[path = "lp_swap/trade_preimage.rs"] mod trade_preimage; #[cfg(target_arch = "wasm32")] #[path = "lp_swap/swap_wasm_db.rs"] @@ -256,7 +241,7 @@ pub fn p2p_keypair_and_peer_id_to_broadcast(ctx: &MmArc, p2p_privkey: Option<&Ke None => { let crypto_ctx = CryptoCtx::from_ctx(ctx).expect("CryptoCtx must be initialized already"); (*crypto_ctx.mm2_internal_key_pair(), None) - } + }, } } @@ -272,7 +257,7 @@ pub fn p2p_private_and_peer_id_to_broadcast(ctx: &MmArc, p2p_privkey: Option<&Ke None => { let crypto_ctx = CryptoCtx::from_ctx(ctx).expect("CryptoCtx must be initialized already"); (crypto_ctx.mm2_internal_privkey_secret().take(), None) - } + }, } } @@ -320,7 +305,7 @@ pub fn broadcast_swap_message(ctx: &MmArc, topic: String, msg: T, Err(e) => { error!("Error encoding and signing swap message: {}", e); return; - } + }, }; broadcast_p2p_msg(ctx, topic, encoded_msg, from); } @@ -337,7 +322,7 @@ pub fn broadcast_p2p_tx_msg(ctx: &MmArc, topic: String, msg: &TransactionEnum, p Err(e) => { error!("Error encoding and signing tx message: {}", e); return; - } + }, }; broadcast_p2p_msg(ctx, topic, encoded_msg, from); } @@ -357,14 +342,14 @@ pub async fn process_swap_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PRequest error!("Error saving the swap {} status: {}", status.data.uuid(), e); } Ok(()) - } + }, Err(swap_status_err) => { let error = format!( "Couldn't deserialize swap msg to either 'SwapMsg': {} or to 'SwapStatus': {}", swap_msg_err, swap_status_err ); MmError::err(P2PRequestError::DecodeError(error)) - } + }, }; #[cfg(target_arch = "wasm32")] @@ -372,7 +357,7 @@ pub async fn process_swap_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PRequest "Couldn't deserialize 'SwapMsg': {}", swap_msg_err ))); - } + }, }; debug!("Processing swap msg {:?} for uuid {}", msg, uuid); @@ -1150,19 +1135,19 @@ pub async fn my_swap_status(ctx: MmArc, req: Json) -> Result>, let res_js = json!({ "result": MySwapStatusResponse::from(status) }); let res = try_s!(json::to_vec(&res_js)); Ok(try_s!(Response::builder().body(res))) - } + }, Some(MAKER_SWAP_V2_TYPE) => { let swap_data = try_s!(get_maker_swap_data_for_rpc(&ctx, &uuid, db_id.as_deref()).await); let res_js = json!({ "result": swap_data }); let res = try_s!(json::to_vec(&res_js)); Ok(try_s!(Response::builder().body(res))) - } + }, Some(TAKER_SWAP_V2_TYPE) => { let swap_data = try_s!(get_taker_swap_data_for_rpc(&ctx, &uuid, db_id.as_deref()).await); let res_js = json!({ "result": swap_data }); let res = try_s!(json::to_vec(&res_js)); Ok(try_s!(Response::builder().body(res))) - } + }, Some(unsupported_type) => ERR!("Got unsupported swap type from DB: {}", unsupported_type), None => ERR!("No swap with uuid {}", uuid), } @@ -1345,7 +1330,7 @@ pub async fn latest_swaps_for_pair( Ok(None) => { error!("No such swap with the uuid '{}'", uuid); continue; - } + }, Err(e) => return Err(MmError::new(LatestSwapsErr::UnableToLoadSavedSwaps(e.into_inner()))), }; swaps.push(swap); @@ -1376,7 +1361,7 @@ pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result { let swap_json = try_s!(json::to_value(MySwapStatusResponse::from(swap))); swaps.push(swap_json) - } + }, Ok(None) => warn!("No such swap with the uuid '{}'", uuid), Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), }, @@ -1384,14 +1369,14 @@ pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result { let swap_json = try_s!(json::to_value(data)); swaps.push(swap_json); - } + }, Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), }, TAKER_SWAP_V2_TYPE => match get_taker_swap_data_for_rpc(&ctx, uuid, Some(&db_result.pubkey)).await { Ok(data) => { let swap_json = try_s!(json::to_value(data)); swaps.push(swap_json); - } + }, Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), }, unknown_type => error!("Swap with the uuid '{}' has unknown type {}", uuid, unknown_type), @@ -1435,11 +1420,11 @@ pub async fn swap_kick_starts(ctx: MmArc) -> Result, String> { Ok(None) => { warn!("Swap {} is indexed, but doesn't exist in DB", uuid); continue; - } + }, Err(e) => { error!("Error {} on getting swap {} data from DB", e, uuid); continue; - } + }, }; info!("Kick starting the swap {}", swap.uuid()); let maker_coin_ticker = match swap.maker_coin_ticker() { @@ -1447,14 +1432,14 @@ pub async fn swap_kick_starts(ctx: MmArc) -> Result, String> { Err(e) => { error!("Error {} getting maker coin of swap: {}", e, swap.uuid()); continue; - } + }, }; let taker_coin_ticker = match swap.taker_coin_ticker() { Ok(t) => t, Err(e) => { error!("Error {} getting taker coin of swap {}", e, swap.uuid()); continue; - } + }, }; coins.insert(maker_coin_ticker.clone()); coins.insert(taker_coin_ticker.clone()); @@ -1472,7 +1457,7 @@ pub async fn swap_kick_starts(ctx: MmArc) -> Result, String> { Err(e) => { error!("Error {} getting DB repr of maker swap {}", e, maker_uuid); continue; - } + }, }; debug!("Got maker swap repr {:?}", maker_swap_repr); @@ -1497,7 +1482,7 @@ pub async fn swap_kick_starts(ctx: MmArc) -> Result, String> { Err(e) => { error!("Error {} getting DB repr of taker swap {}", e, taker_uuid); continue; - } + }, }; debug!("Got taker swap repr {:?}", taker_swap_repr); @@ -1528,11 +1513,11 @@ async fn kickstart_thread_handler(ctx: MmArc, swap: SavedSwap, maker_coin_ticker taker_coin_ticker ); Timer::sleep(5.).await; - } + }, Err(e) => { error!("Error {} on {} find attempt", e, taker_coin_ticker); return; - } + }, }; }; @@ -1546,11 +1531,11 @@ async fn kickstart_thread_handler(ctx: MmArc, swap: SavedSwap, maker_coin_ticker maker_coin_ticker ); Timer::sleep(5.).await; - } + }, Err(e) => { error!("Error {} on {} find attempt", e, maker_coin_ticker); return; - } + }, }; }; match swap { @@ -1563,8 +1548,8 @@ async fn kickstart_thread_handler(ctx: MmArc, swap: SavedSwap, maker_coin_ticker }, ctx, ) - .await; - } + .await; + }, SavedSwap::Taker(saved_swap) => { run_taker_swap( RunTakerSwapInput::KickStart { @@ -1574,8 +1559,8 @@ async fn kickstart_thread_handler(ctx: MmArc, swap: SavedSwap, maker_coin_ticker }, ctx, ) - .await; - } + .await; + }, } } @@ -1624,16 +1609,16 @@ pub async fn import_swaps(ctx: MmArc, req: Json) -> Result>, St LEGACY_SWAP_TYPE, accound_id.as_deref(), ) - .await + .await { error!("Error {} on new swap insertion", e); } } imported.push(swap.uuid().to_owned()); - } + }, Err(e) => { skipped.insert(swap.uuid().to_owned(), ERRL!("{}", e)); - } + }, } } let res = try_s!(json::to_vec(&json!({ @@ -1673,14 +1658,14 @@ pub async fn active_swaps_rpc(ctx: MmArc, req: Json) -> Result> Err(e) => { error!("Error on loading_from_db: {}", e); continue; - } + }, }; map.insert(*uuid, status); - } + }, unsupported_type => { error!("active_swaps_rpc doesn't support swap type {}", unsupported_type); continue; - } + }, } } Some(map) @@ -1744,7 +1729,7 @@ pub fn detect_secret_hash_algo(maker_coin: &MmCoinEnum, taker_coin: &MmCoinEnum) match (maker_coin, taker_coin) { (MmCoinEnum::Tendermint(_) | MmCoinEnum::TendermintToken(_) | MmCoinEnum::LightningCoin(_), _) => { SecretHashAlgo::SHA256 - } + }, // If taker is lightning coin the SHA256 of the secret will be sent as part of the maker signed invoice (_, MmCoinEnum::Tendermint(_) | MmCoinEnum::TendermintToken(_)) => SecretHashAlgo::SHA256, (_, _) => SecretHashAlgo::DHASH160, @@ -1855,19 +1840,19 @@ pub fn process_swap_v2_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PProcessRes match swap_message.inner { Some(swap_message::Inner::MakerNegotiation(maker_negotiation)) => { msg_store.maker_negotiation = Some(maker_negotiation) - } + }, Some(swap_message::Inner::TakerNegotiation(taker_negotiation)) => { msg_store.taker_negotiation = Some(taker_negotiation) - } + }, Some(swap_message::Inner::MakerNegotiated(maker_negotiated)) => { msg_store.maker_negotiated = Some(maker_negotiated) - } + }, Some(swap_message::Inner::TakerFundingInfo(taker_funding)) => msg_store.taker_funding = Some(taker_funding), Some(swap_message::Inner::MakerPaymentInfo(maker_payment)) => msg_store.maker_payment = Some(maker_payment), Some(swap_message::Inner::TakerPaymentInfo(taker_payment)) => msg_store.taker_payment = Some(taker_payment), Some(swap_message::Inner::TakerPaymentSpendPreimage(preimage)) => { msg_store.taker_payment_spend_preimage = Some(preimage) - } + }, None => return MmError::err(P2PProcessError::DecodeError("swap_message.inner is None".into())), } } @@ -2355,7 +2340,7 @@ mod lp_swap_tests { &rick_activation_params, maker_key_pair.private().secret, )) - .unwrap(); + .unwrap(); println!("Maker address {}", rick_maker.my_address().unwrap()); @@ -2366,7 +2351,7 @@ mod lp_swap_tests { &morty_activation_params, maker_key_pair.private().secret, )) - .unwrap(); + .unwrap(); let taker_ctx_conf = json!({ "netid": 1234, @@ -2390,7 +2375,7 @@ mod lp_swap_tests { &rick_activation_params, taker_key_pair.private().secret, )) - .unwrap(); + .unwrap(); let morty_taker = block_on(utxo_standard_coin_with_priv_key( &taker_ctx, @@ -2399,7 +2384,7 @@ mod lp_swap_tests { &morty_activation_params, taker_key_pair.private().secret, )) - .unwrap(); + .unwrap(); println!("Taker address {}", rick_taker.my_address().unwrap()); @@ -2432,7 +2417,7 @@ mod lp_swap_tests { maker_swap.fail_at = maker_fail_at; #[cfg(any(test, feature = "run-docker-tests"))] - let fail_at = std::env::var("TAKER_FAIL_AT").map(taker_swap::FailAt::from).ok(); + let fail_at = std::env::var("TAKER_FAIL_AT").map(taker_swap::FailAt::from).ok(); let taker_swap = TakerSwap::new( taker_ctx.clone(), @@ -2448,7 +2433,7 @@ mod lp_swap_tests { lock_duration, None, #[cfg(any(test, feature = "run-docker-tests"))] - fail_at, + fail_at, ); block_on(futures::future::join( @@ -2494,7 +2479,7 @@ mod lp_swap_tests { DexFee::Standard(t) => t, DexFee::WithBurn { .. } => { panic!("Wrong variant returned for MYCOIN from `dex_fee_amount_from_taker_coin`.") - } + }, }; let expected_mycoin_taker_fee = &kmd_taker_fee / &MmNumber::from("0.75"); @@ -2513,7 +2498,7 @@ mod lp_swap_tests { DexFee::Standard(t) => t, DexFee::WithBurn { .. } => { panic!("Wrong variant returned for MYCOIN from `dex_fee_amount_from_taker_coin`.") - } + }, }; let testcoin = coins::TestCoin::default(); @@ -2521,7 +2506,7 @@ mod lp_swap_tests { DexFee::Standard(t) => t, DexFee::WithBurn { .. } => { panic!("Wrong variant returned for TEST coin from `dex_fee_amount_from_taker_coin`.") - } + }, }; assert_eq!(testcoin_taker_fee * MmNumber::from("0.90"), mycoin_taker_fee); diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index 18c46849d6..c423acb1f7 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -43,8 +43,7 @@ cfg_wasm32!( ); // This is needed to have Debug on messages -#[allow(unused_imports)] -use prost::Message; +#[allow(unused_imports)] use prost::Message; /// Negotiation data representation to be stored in DB. #[derive(Clone, Debug, Deserialize, Serialize)] @@ -181,7 +180,7 @@ impl StateMachineStorage for MakerSwapStorage { insert_new_swap_v2(&ctx, sql_params, db_id.as_deref())?; Ok(()) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -227,7 +226,7 @@ impl StateMachineStorage for MakerSwapStorage { MakerSwapDbRepr::from_sql_row, )?) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -399,7 +398,7 @@ pub struct MakerSwapStateMachine -MakerSwapStateMachine + MakerSwapStateMachine { /// Timeout for taker payment's on-chain confirmation. #[inline] @@ -424,7 +423,7 @@ MakerSwapStateMachine #[async_trait] impl StorableStateMachine -for MakerSwapStateMachine + for MakerSwapStateMachine { type Storage = MakerSwapStorage; type Result = (); @@ -464,12 +463,12 @@ for MakerSwapStateMachine storage: MakerSwapStorage, mut repr: MakerSwapDbRepr, recreate_ctx: Self::RecreateCtx, - ) -> Result<(RestoredMachine, Box>), Self::RecreateError> { + ) -> Result<(RestoredMachine, Box>), Self::RecreateError> { if repr.events.is_empty() { return MmError::err(SwapRecreateError::ReprEventsEmpty); } - let current_state: Box> = match repr.events.remove(repr.events.len() - 1) + let current_state: Box> = match repr.events.remove(repr.events.len() - 1) { MakerSwapEvent::Initialized { maker_coin_start_block, @@ -622,7 +621,7 @@ for MakerSwapStateMachine MakerSwapEvent::Completed => return MmError::err(SwapRecreateError::SwapCompleted), MakerSwapEvent::MakerPaymentRefunded { .. } => { return MmError::err(SwapRecreateError::SwapFinishedWithRefund); - } + }, }; let dex_fee = if repr.dex_fee_burn > MmNumber::default() { @@ -711,7 +710,7 @@ for MakerSwapStateMachine .entry(maker_coin_ticker) .or_insert_with(Vec::new) .push(new_locked); - } + }, MakerSwapEvent::MakerPaymentSentFundingSpendGenerated { .. } => { let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.maker_coin.account_db_id().as_deref()) .expect("from_ctx should not fail at this point"); @@ -719,7 +718,7 @@ for MakerSwapStateMachine if let Some(maker_coin_locked) = swaps_ctx.locked_amounts.lock().unwrap().get_mut(ticker) { maker_coin_locked.retain(|locked| locked.swap_uuid != self.uuid); }; - } + }, MakerSwapEvent::WaitingForTakerFunding { .. } | MakerSwapEvent::TakerFundingReceived { .. } | MakerSwapEvent::MakerPaymentRefundRequired { .. } @@ -766,7 +765,7 @@ for MakerSwapStateMachine .entry(maker_coin_ticker) .or_insert_with(Vec::new) .push(new_locked); - } + }, MakerSwapEvent::MakerPaymentSentFundingSpendGenerated { .. } | MakerSwapEvent::MakerPaymentRefundRequired { .. } | MakerSwapEvent::MakerPaymentRefunded { .. } @@ -794,14 +793,14 @@ impl Default for Initialize { } impl InitialState -for Initialize + for Initialize { type StateMachine = MakerSwapStateMachine; } #[async_trait] impl State -for Initialize + for Initialize { type StateMachine = MakerSwapStateMachine; @@ -811,7 +810,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetMakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let taker_coin_start_block = match state_machine.taker_coin.current_block().compat().await { @@ -819,7 +818,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetTakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let preimage_value = TradePreimageValue::Exact(state_machine.maker_volume.to_decimal()); @@ -833,7 +832,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetMakerPaymentFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let taker_payment_spend_trade_fee = match state_machine.taker_coin.get_receiver_trade_fee(stage).compat().await @@ -842,7 +841,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetTakerPaymentSpendFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let prepared_params = MakerSwapPreparedParams { @@ -859,7 +858,7 @@ for Initialize Some(prepared_params), FeeApproxStage::StartSwap, ) - .await + .await { let reason = AbortReason::BalanceCheckFailure(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; @@ -890,7 +889,7 @@ struct Initialized { impl TransitionFrom> for Initialized {} impl StorableState -for Initialized + for Initialized { type StateMachine = MakerSwapStateMachine; @@ -906,7 +905,7 @@ for Initialized #[async_trait] impl State -for Initialized + for Initialized { type StateMachine = MakerSwapStateMachine; @@ -947,7 +946,7 @@ for Initialized Err(e) => { let reason = AbortReason::DidNotReceiveTakerNegotiation(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; drop(abort_handle); @@ -957,11 +956,11 @@ for Initialized Some(taker_negotiation::Action::Abort(abort)) => { let reason = AbortReason::TakerAbortedNegotiation(abort.reason); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, None => { let reason = AbortReason::ReceivedInvalidTakerNegotiation; return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let started_at_diff = state_machine.started_at.abs_diff(taker_data.started_at); @@ -988,7 +987,7 @@ for Initialized Err(e) => { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let maker_coin_htlc_pub_from_taker = @@ -997,7 +996,7 @@ for Initialized Err(e) => { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let next_state = WaitingForTakerFunding { @@ -1070,12 +1069,13 @@ struct WaitingForTakerFunding TransitionFrom> -for WaitingForTakerFunding -{} + for WaitingForTakerFunding +{ +} #[async_trait] impl State -for WaitingForTakerFunding + for WaitingForTakerFunding { type StateMachine = MakerSwapStateMachine; @@ -1108,7 +1108,7 @@ for WaitingForTakerFunding Err(e) => { let reason = AbortReason::DidNotReceiveTakerFundingInfo(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; drop(abort_handle); @@ -1118,7 +1118,7 @@ for WaitingForTakerFunding Err(e) => { let reason = AbortReason::FailedToParseTakerFunding(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let next_state = TakerFundingReceived { maker_coin_start_block: self.maker_coin_start_block, @@ -1132,7 +1132,7 @@ for WaitingForTakerFunding } impl StorableState -for WaitingForTakerFunding + for WaitingForTakerFunding { type StateMachine = MakerSwapStateMachine; @@ -1155,12 +1155,13 @@ struct TakerFundingReceived -TransitionFrom> for TakerFundingReceived -{} + TransitionFrom> for TakerFundingReceived +{ +} #[async_trait] impl State -for TakerFundingReceived + for TakerFundingReceived { type StateMachine = MakerSwapStateMachine; @@ -1201,7 +1202,7 @@ for TakerFundingReceived Err(e) => { let reason = AbortReason::FailedToGenerateFundingSpend(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let args = SendMakerPaymentArgs { @@ -1217,7 +1218,7 @@ for TakerFundingReceived Err(e) => { let reason = AbortReason::FailedToSendMakerPayment(format!("{:?}", e)); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; info!( "Sent maker payment {} tx {:02x} during swap {}", @@ -1239,7 +1240,7 @@ for TakerFundingReceived } impl StorableState -for TakerFundingReceived + for TakerFundingReceived { type StateMachine = MakerSwapStateMachine; @@ -1267,13 +1268,14 @@ struct MakerPaymentSentFundingSpendGenerated -TransitionFrom> -for MakerPaymentSentFundingSpendGenerated -{} + TransitionFrom> + for MakerPaymentSentFundingSpendGenerated +{ +} #[async_trait] impl State -for MakerPaymentSentFundingSpendGenerated + for MakerPaymentSentFundingSpendGenerated { type StateMachine = MakerSwapStateMachine; @@ -1330,7 +1332,7 @@ for MakerPaymentSentFundingSpendGenerated negotiation_data: self.negotiation_data, }; break Self::change_state(next_state, state_machine).await; - } + }, // it's not really possible as taker's funding time lock is 3 * lock_duration, though we have to // handle this case anyway Ok(Some(FundingTxSpend::RefundedTimelock(_))) => { @@ -1343,7 +1345,7 @@ for MakerPaymentSentFundingSpendGenerated }; break Self::change_state(next_state, state_machine).await; - } + }, Ok(Some(FundingTxSpend::RefundedSecret { secret, tx: _ })) => { let next_state = MakerPaymentRefundRequired { maker_coin_start_block: self.maker_coin_start_block, @@ -1354,15 +1356,15 @@ for MakerPaymentSentFundingSpendGenerated }; break Self::change_state(next_state, state_machine).await; - } + }, Ok(None) => { Timer::sleep(30.).await; - } + }, Err(e) => match e { SearchForFundingSpendErr::Rpc(e) => { error!("Rpc error {} on search_for_taker_funding_spend", e); Timer::sleep(30.).await; - } + }, // Other error cases are considered irrecoverable, so we should proceed to refund stage // handling using @ binding to trigger a compiler error when new variant is added e @ SearchForFundingSpendErr::InvalidInputTx(_) @@ -1377,7 +1379,7 @@ for MakerPaymentSentFundingSpendGenerated }; break Self::change_state(next_state, state_machine).await; - } + }, }, } } @@ -1385,7 +1387,7 @@ for MakerPaymentSentFundingSpendGenerated } impl StorableState -for MakerPaymentSentFundingSpendGenerated + for MakerPaymentSentFundingSpendGenerated { type StateMachine = MakerSwapStateMachine; @@ -1435,17 +1437,19 @@ struct MakerPaymentRefundRequired -TransitionFrom> -for MakerPaymentRefundRequired -{} + TransitionFrom> + for MakerPaymentRefundRequired +{ +} impl -TransitionFrom> for MakerPaymentRefundRequired -{} + TransitionFrom> for MakerPaymentRefundRequired +{ +} #[async_trait] impl State -for MakerPaymentRefundRequired + for MakerPaymentRefundRequired { type StateMachine = MakerSwapStateMachine; @@ -1471,7 +1475,7 @@ for MakerPaymentRefundRequired Err(e) => { let reason = AbortReason::MakerPaymentRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let next_state = MakerPaymentRefunded { @@ -1496,7 +1500,7 @@ for MakerPaymentRefundRequired Err(e) => { error!("Error {} on can_refund_htlc, retrying in 30 seconds", e); Timer::sleep(30.).await; - } + }, } } @@ -1526,7 +1530,7 @@ for MakerPaymentRefundRequired Err(e) => { let reason = AbortReason::MakerPaymentRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let next_state = MakerPaymentRefunded { @@ -1541,7 +1545,7 @@ for MakerPaymentRefundRequired } impl StorableState -for MakerPaymentRefundRequired + for MakerPaymentRefundRequired { type StateMachine = MakerSwapStateMachine; @@ -1568,13 +1572,14 @@ struct TakerPaymentReceived -TransitionFrom> -for TakerPaymentReceived -{} + TransitionFrom> + for TakerPaymentReceived +{ +} #[async_trait] impl State -for TakerPaymentReceived + for TakerPaymentReceived { type StateMachine = MakerSwapStateMachine; @@ -1615,7 +1620,7 @@ for TakerPaymentReceived reason: MakerPaymentRefundReason::DidNotGetTakerPaymentSpendPreimage(e), }; return Self::change_state(next_state, state_machine).await; - } + }, }; debug!("Received taker payment spend preimage message {:?}", preimage_data); @@ -1645,7 +1650,7 @@ for TakerPaymentReceived reason: MakerPaymentRefundReason::FailedToParseTakerPreimage(e.to_string()), }; return Self::change_state(next_state, state_machine).await; - } + }, }; let signature = match state_machine.taker_coin.parse_signature(&preimage_data.signature) { Ok(s) => s, @@ -1658,7 +1663,7 @@ for TakerPaymentReceived reason: MakerPaymentRefundReason::FailedToParseTakerSignature(e.to_string()), }; return Self::change_state(next_state, state_machine).await; - } + }, }; let tx_preimage = TxPreimageWithSig { preimage, signature }; @@ -1697,7 +1702,7 @@ for TakerPaymentReceived reason: MakerPaymentRefundReason::TakerPaymentSpendBroadcastFailed(format!("{:?}", e)), }; return Self::change_state(next_state, state_machine).await; - } + }, }; info!( "Spent taker payment {} tx {:02x} during swap {}", @@ -1717,7 +1722,7 @@ for TakerPaymentReceived } impl StorableState -for TakerPaymentReceived + for TakerPaymentReceived { type StateMachine = MakerSwapStateMachine; @@ -1747,12 +1752,13 @@ struct TakerPaymentSpent -TransitionFrom> for TakerPaymentSpent -{} + TransitionFrom> for TakerPaymentSpent +{ +} #[async_trait] impl State -for TakerPaymentSpent + for TakerPaymentSpent { type StateMachine = MakerSwapStateMachine; @@ -1762,7 +1768,7 @@ for TakerPaymentSpent } impl StorableState -for TakerPaymentSpent + for TakerPaymentSpent { type StateMachine = MakerSwapStateMachine; @@ -1827,7 +1833,7 @@ impl Aborted { #[async_trait] impl LastState -for Aborted + for Aborted { type StateMachine = MakerSwapStateMachine; @@ -1840,7 +1846,7 @@ for Aborted } impl StorableState -for Aborted + for Aborted { type StateMachine = MakerSwapStateMachine; @@ -1856,16 +1862,19 @@ impl TransitionFrom> for impl TransitionFrom> for Aborted {} impl -TransitionFrom> for Aborted -{} + TransitionFrom> for Aborted +{ +} impl -TransitionFrom> for Aborted -{} + TransitionFrom> for Aborted +{ +} impl -TransitionFrom> for Aborted -{} + TransitionFrom> for Aborted +{ +} struct Completed { maker_coin: PhantomData, @@ -1882,7 +1891,7 @@ impl Completed { } impl StorableState -for Completed + for Completed { type StateMachine = MakerSwapStateMachine; @@ -1891,7 +1900,7 @@ for Completed #[async_trait] impl LastState -for Completed + for Completed { type StateMachine = MakerSwapStateMachine; @@ -1904,8 +1913,9 @@ for Completed } impl -TransitionFrom> for Completed -{} + TransitionFrom> for Completed +{ +} struct MakerPaymentRefunded { taker_coin: PhantomData, @@ -1915,7 +1925,7 @@ struct MakerPaymentRefunded { } impl StorableState -for MakerPaymentRefunded + for MakerPaymentRefunded { type StateMachine = MakerSwapStateMachine; @@ -1936,7 +1946,7 @@ for MakerPaymentRefunded #[async_trait] impl LastState -for MakerPaymentRefunded + for MakerPaymentRefunded { type StateMachine = MakerSwapStateMachine; @@ -1952,5 +1962,6 @@ for MakerPaymentRefunded } impl -TransitionFrom> for MakerPaymentRefunded -{} + TransitionFrom> for MakerPaymentRefunded +{ +} diff --git a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs index f55847a581..4b07d97710 100644 --- a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs +++ b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs @@ -106,12 +106,7 @@ mod native_impl { ) -> MySwapsResult { let conn = self.ctx.sqlite_connection_v2(Some(db_id)); let conn = conn.lock().unwrap(); - Ok(select_uuids_by_my_swaps_filter( - &conn, - filter, - paging_options, - db_id, - )?) + Ok(select_uuids_by_my_swaps_filter(&conn, filter, paging_options, db_id)?) } } } @@ -230,7 +225,7 @@ mod wasm_impl { .await? .collect() .await? - } + }, (Some(my_coin), None) => { my_swaps_table .cursor_builder() @@ -240,7 +235,7 @@ mod wasm_impl { .await? .collect() .await? - } + }, (None, Some(other_coin)) => { my_swaps_table .cursor_builder() @@ -250,7 +245,7 @@ mod wasm_impl { .await? .collect() .await? - } + }, (None, None) => { my_swaps_table .cursor_builder() @@ -259,7 +254,7 @@ mod wasm_impl { .await? .collect() .await? - } + }, }; let uuids: BTreeSet = items @@ -279,7 +274,7 @@ mod wasm_impl { skipped: 0, pubkey: db_id.to_string(), }) - } + }, } } } @@ -299,7 +294,7 @@ mod wasm_impl { .position(|ordered_uuid| ordered_uuid.uuid == expected_uuid) .or_mm_err(|| MySwapsError::FromUuidNotFound(expected_uuid))? + 1 - } + }, None => (paging.page_number.get() - 1) * paging.limit, }; @@ -440,13 +435,13 @@ mod wasm_tests { (7, "c52659d7-4e13-41f5-9c1a-30cc2f646033", MAKER_SWAP_V2_TYPE), (8, "af5e0383-97f6-4408-8c03-a8eb8d17e46d", LEGACY_SWAP_TYPE), ] - .iter() - .map(|(started_at, uuid, swap_type)| OrderedUuid { - started_at: *started_at, - uuid: Uuid::parse_str(uuid).unwrap(), - swap_type: *swap_type, - }) - .collect(); + .iter() + .map(|(started_at, uuid, swap_type)| OrderedUuid { + started_at: *started_at, + uuid: Uuid::parse_str(uuid).unwrap(), + swap_type: *swap_type, + }) + .collect(); let paging = PagingOptions { limit: 2, diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs index 074927a889..ddfaeaef29 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs @@ -110,7 +110,8 @@ pub(super) async fn has_db_record_for( let conn = ctx.sqlite_connection_v2(db_id.as_deref()); let conn = conn.lock().unwrap(); does_swap_exist(&conn, &id_str, db_id.as_deref()) - }).await?) + }) + .await?) } #[cfg(target_arch = "wasm32")] @@ -134,8 +135,8 @@ pub(super) async fn store_swap_event( event: T::Event, db_id: Option<&str>, ) -> MmResult<(), SwapStateMachineError> - where - T::Event: DeserializeOwned + Serialize + Send + 'static, +where + T::Event: DeserializeOwned + Serialize + Send + 'static, { let id_str = id.to_string(); let db_id = db_id.map(|e| e.to_string()); @@ -153,7 +154,7 @@ pub(super) async fn store_swap_event( update_swap_events(&conn, &id_str, &serialized_events, db_id.as_deref())?; Ok(()) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -217,7 +218,7 @@ pub(super) async fn get_unfinished_swaps_uuids( select_unfinished_swaps_uuids(&conn, swap_type, db_id.as_deref()) .map_to_mm(|e| SwapStateMachineError::StorageError(e.to_string())) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -248,13 +249,9 @@ pub(super) async fn mark_swap_as_finished( async_blocking(move || { let conn = ctx.sqlite_connection_v2(db_id.as_deref()); let conn = conn.lock().unwrap(); - Ok(set_swap_is_finished( - &conn, - &id.to_string(), - db_id.as_deref(), - )?) + Ok(set_swap_is_finished(&conn, &id.to_string(), db_id.as_deref())?) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -318,7 +315,7 @@ pub(super) async fn acquire_reentrancy_lock_impl(ctx: &MmArc, uuid: Uuid) -> MmR attempts += 1; Timer::sleep(40.).await; } - } + }, } } } @@ -345,7 +342,7 @@ pub(super) trait GetSwapCoins { /// Generic function for upgraded swaps kickstart handling. /// It is implemented only for UtxoStandardCoin/UtxoStandardCoin case temporary. pub(super) async fn swap_kickstart_handler< - T: StorableStateMachine>, + T: StorableStateMachine>, >( ctx: MmArc, swap_repr: ::DbRepr, @@ -368,11 +365,11 @@ pub(super) async fn swap_kickstart_handler< uuid, taker_coin_ticker, ); Timer::sleep(1.).await; - } + }, Err(e) => { error!("Error {} on {} find attempt", e, taker_coin_ticker); return; - } + }, }; }; @@ -387,11 +384,11 @@ pub(super) async fn swap_kickstart_handler< uuid, maker_coin_ticker, ); Timer::sleep(1.).await; - } + }, Err(e) => { error!("Error {} on {} find attempt", e, maker_coin_ticker); return; - } + }, }; }; @@ -403,7 +400,7 @@ pub(super) async fn swap_kickstart_handler< maker_coin_ticker, taker_coin_ticker ); return; - } + }, }; let recreate_context = SwapRecreateCtx { maker_coin, taker_coin }; @@ -413,7 +410,7 @@ pub(super) async fn swap_kickstart_handler< Err(e) => { error!("Error {} on trying to recreate the swap {}", e, uuid); return; - } + }, }; if let Err(e) = state_machine.kickstart(state).await { diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs index 0661de8de4..eb007afbad 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs @@ -44,15 +44,12 @@ pub(super) async fn get_swap_type(ctx: &MmArc, uuid: &Uuid, db_id: Option<&str>) const SELECT_SWAP_TYPE_BY_UUID: &str = "SELECT swap_type FROM my_swaps WHERE uuid = :uuid;"; let conn = ctx.sqlite_connection_v2(db_id.as_deref()); let conn = conn.lock().unwrap(); - let maybe_swap_type = query_single_row( - &conn, - SELECT_SWAP_TYPE_BY_UUID, - &[(":uuid", uuid.as_str())], - |row| row.get(0), - )?; + let maybe_swap_type = query_single_row(&conn, SELECT_SWAP_TYPE_BY_UUID, &[(":uuid", uuid.as_str())], |row| { + row.get(0) + })?; Ok(maybe_swap_type) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -192,7 +189,7 @@ async fn get_swap_data_for_rpc_impl( )?; Ok(swap_data) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -319,15 +316,15 @@ async fn get_swap_data_by_uuid_and_type( SavedSwap::Maker(m) => SwapRpcData::MakerV1(m), SavedSwap::Taker(t) => SwapRpcData::TakerV1(t), })) - } + }, MAKER_SWAP_V2_TYPE => { let data = get_maker_swap_data_for_rpc(ctx, &uuid, db_id).await?; Ok(data.map(SwapRpcData::MakerV2)) - } + }, TAKER_SWAP_V2_TYPE => { let data = get_taker_swap_data_for_rpc(ctx, &uuid, db_id).await?; Ok(data.map(SwapRpcData::TakerV2)) - } + }, unsupported => MmError::err(GetSwapDataErr::UnsupportedSwapType(unsupported)), } } @@ -371,7 +368,7 @@ impl HttpStatusCode for MySwapStatusError { MySwapStatusError::NoSwapWithUuid(_) => StatusCode::BAD_REQUEST, MySwapStatusError::DbError(_) | MySwapStatusError::UnsupportedSwapType(_) => { StatusCode::INTERNAL_SERVER_ERROR - } + }, } } } @@ -522,7 +519,7 @@ pub(crate) async fn active_swaps_rpc( match get_swap_data_by_uuid_and_type(&ctx, None, *uuid, *swap_type).await { Ok(Some(data)) => { statuses.insert(*uuid, data); - } + }, Ok(None) => warn!("Swap {} data doesn't exist in DB", uuid), Err(e) => error!("Error {} while trying to get swap {} data", e, uuid), } diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index 3abe896415..c0b4d957cc 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -44,8 +44,7 @@ cfg_wasm32!( ); // This is needed to have Debug on messages -#[allow(unused_imports)] -use prost::Message; +#[allow(unused_imports)] use prost::Message; /// Negotiation data representation to be stored in DB. #[derive(Clone, Debug, Deserialize, Serialize)] @@ -213,7 +212,7 @@ impl StateMachineStorage for TakerSwapStorage { insert_new_swap_v2(&ctx, sql_params, db_id.as_deref())?; Ok(()) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -259,7 +258,7 @@ impl StateMachineStorage for TakerSwapStorage { TakerSwapDbRepr::from_sql_row, )?) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -433,7 +432,7 @@ pub struct TakerSwapStateMachine -TakerSwapStateMachine + TakerSwapStateMachine { fn maker_payment_conf_timeout(&self) -> u64 { self.started_at + self.lock_duration / 3 } @@ -454,7 +453,7 @@ TakerSwapStateMachine #[async_trait] impl StorableStateMachine -for TakerSwapStateMachine + for TakerSwapStateMachine { type Storage = TakerSwapStorage; type Result = (); @@ -494,12 +493,12 @@ for TakerSwapStateMachine storage: TakerSwapStorage, mut repr: TakerSwapDbRepr, recreate_ctx: Self::RecreateCtx, - ) -> Result<(RestoredMachine, Box>), Self::RecreateError> { + ) -> Result<(RestoredMachine, Box>), Self::RecreateError> { if repr.events.is_empty() { return MmError::err(SwapRecreateError::ReprEventsEmpty); } - let current_state: Box> = match repr.events.remove(repr.events.len() - 1) + let current_state: Box> = match repr.events.remove(repr.events.len() - 1) { TakerSwapEvent::Initialized { maker_coin_start_block, @@ -735,10 +734,10 @@ for TakerSwapStateMachine TakerSwapEvent::Completed => return MmError::err(SwapRecreateError::SwapCompleted), TakerSwapEvent::TakerFundingRefunded { .. } => { return MmError::err(SwapRecreateError::SwapFinishedWithRefund); - } + }, TakerSwapEvent::TakerPaymentRefunded { .. } => { return MmError::err(SwapRecreateError::SwapFinishedWithRefund); - } + }, }; let dex_fee = if repr.dex_fee_burn > MmNumber::default() { @@ -827,7 +826,7 @@ for TakerSwapStateMachine .entry(taker_coin_ticker) .or_insert_with(Vec::new) .push(new_locked); - } + }, TakerSwapEvent::TakerFundingSent { .. } => { let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.taker_coin.account_db_id().as_deref()) .expect("from_ctx should not fail at this point"); @@ -835,7 +834,7 @@ for TakerSwapStateMachine if let Some(taker_coin_locked) = swaps_ctx.locked_amounts.lock().unwrap().get_mut(ticker) { taker_coin_locked.retain(|locked| locked.swap_uuid != self.uuid); }; - } + }, TakerSwapEvent::Negotiated { .. } | TakerSwapEvent::TakerFundingRefundRequired { .. } | TakerSwapEvent::MakerPaymentAndFundingSpendPreimgReceived { .. } @@ -876,7 +875,7 @@ for TakerSwapStateMachine .entry(taker_coin_ticker) .or_insert_with(Vec::new) .push(new_locked); - } + }, TakerSwapEvent::TakerFundingSent { .. } | TakerSwapEvent::TakerFundingRefundRequired { .. } | TakerSwapEvent::MakerPaymentAndFundingSpendPreimgReceived { .. } @@ -909,14 +908,14 @@ impl Default for Initialize { } impl InitialState -for Initialize + for Initialize { type StateMachine = TakerSwapStateMachine; } #[async_trait] impl State -for Initialize + for Initialize { type StateMachine = TakerSwapStateMachine; @@ -926,7 +925,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetMakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let taker_coin_start_block = match state_machine.taker_coin.current_block().compat().await { @@ -934,7 +933,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetTakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let total_payment_value = @@ -951,7 +950,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetTakerPaymentFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let maker_payment_spend_fee = match state_machine.maker_coin.get_receiver_trade_fee(stage).compat().await { @@ -959,7 +958,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetMakerPaymentSpendFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let prepared_params = TakerSwapPreparedParams { @@ -982,7 +981,7 @@ for Initialize Some(prepared_params), FeeApproxStage::StartSwap, ) - .await + .await { let reason = AbortReason::BalanceCheckFailure(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; @@ -1013,7 +1012,7 @@ struct Initialized { impl TransitionFrom> for Initialized {} impl StorableState -for Initialized + for Initialized { type StateMachine = TakerSwapStateMachine; @@ -1029,7 +1028,7 @@ for Initialized #[async_trait] impl State -for Initialized + for Initialized { type StateMachine = TakerSwapStateMachine; @@ -1046,7 +1045,7 @@ for Initialized Err(e) => { let reason = AbortReason::DidNotReceiveMakerNegotiation(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; debug!("Received maker negotiation message {:?}", maker_negotiation); @@ -1076,7 +1075,7 @@ for Initialized Err(e) => { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let taker_coin_htlc_pub_from_maker = match state_machine @@ -1087,7 +1086,7 @@ for Initialized Err(e) => { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let taker_coin_maker_address = match state_machine @@ -1098,7 +1097,7 @@ for Initialized Err(e) => { let reason = AbortReason::FailedToParseAddress(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let unique_data = state_machine.unique_data(); @@ -1139,7 +1138,7 @@ for Initialized Err(e) => { let reason = AbortReason::DidNotReceiveMakerNegotiated(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; drop(abort_handle); @@ -1223,12 +1222,13 @@ struct Negotiated TransitionFrom> -for Negotiated -{} + for Negotiated +{ +} #[async_trait] impl State -for Negotiated + for Negotiated { type StateMachine = TakerSwapStateMachine; @@ -1248,7 +1248,7 @@ for Negotiated Err(e) => { let reason = AbortReason::FailedToSendTakerFunding(format!("{:?}", e)); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; info!( @@ -1269,7 +1269,7 @@ for Negotiated } impl StorableState -for Negotiated + for Negotiated { type StateMachine = TakerSwapStateMachine; @@ -1293,7 +1293,7 @@ struct TakerFundingSent State -for TakerFundingSent + for TakerFundingSent { type StateMachine = TakerSwapStateMachine; @@ -1333,7 +1333,7 @@ for TakerFundingSent reason: TakerFundingRefundReason::DidNotReceiveMakerPayment(e), }; return Self::change_state(next_state, state_machine).await; - } + }, }; drop(abort_handle); @@ -1350,7 +1350,7 @@ for TakerFundingSent reason: TakerFundingRefundReason::FailedToParseMakerPayment(e.to_string()), }; return Self::change_state(next_state, state_machine).await; - } + }, }; let preimage_tx = match state_machine @@ -1367,7 +1367,7 @@ for TakerFundingSent reason: TakerFundingRefundReason::FailedToParseFundingSpendPreimg(e.to_string()), }; return Self::change_state(next_state, state_machine).await; - } + }, }; let preimage_sig = match state_machine @@ -1384,7 +1384,7 @@ for TakerFundingSent reason: TakerFundingRefundReason::FailedToParseFundingSpendSig(e.to_string()), }; return Self::change_state(next_state, state_machine).await; - } + }, }; let next_state = MakerPaymentAndFundingSpendPreimgReceived { @@ -1403,11 +1403,12 @@ for TakerFundingSent } impl TransitionFrom> -for TakerFundingSent -{} + for TakerFundingSent +{ +} impl StorableState -for TakerFundingSent + for TakerFundingSent { type StateMachine = TakerSwapStateMachine; @@ -1434,12 +1435,13 @@ struct MakerPaymentAndFundingSpendPreimgReceived -TransitionFrom> -for MakerPaymentAndFundingSpendPreimgReceived -{} + TransitionFrom> + for MakerPaymentAndFundingSpendPreimgReceived +{ +} impl StorableState -for MakerPaymentAndFundingSpendPreimgReceived + for MakerPaymentAndFundingSpendPreimgReceived { type StateMachine = TakerSwapStateMachine; @@ -1466,7 +1468,7 @@ for MakerPaymentAndFundingSpendPreimgReceived #[async_trait] impl State -for MakerPaymentAndFundingSpendPreimgReceived + for MakerPaymentAndFundingSpendPreimgReceived { type StateMachine = TakerSwapStateMachine; @@ -1576,7 +1578,7 @@ for MakerPaymentAndFundingSpendPreimgReceived reason: TakerFundingRefundReason::FailedToSendTakerPayment(format!("{:?}", e)), }; return Self::change_state(next_state, state_machine).await; - } + }, }; info!( @@ -1607,17 +1609,19 @@ struct TakerPaymentSent -TransitionFrom> for TakerPaymentSent -{} + TransitionFrom> for TakerPaymentSent +{ +} impl -TransitionFrom> -for TakerPaymentSent -{} + TransitionFrom> + for TakerPaymentSent +{ +} #[async_trait] impl State -for TakerPaymentSent + for TakerPaymentSent { type StateMachine = TakerSwapStateMachine; @@ -1669,7 +1673,7 @@ for TakerPaymentSent reason: TakerPaymentRefundReason::FailedToGenerateSpendPreimage(e.to_string()), }; return Self::change_state(next_state, state_machine).await; - } + }, }; let preimage_msg = TakerPaymentSpendPreimage { @@ -1706,7 +1710,7 @@ for TakerPaymentSent reason: TakerPaymentRefundReason::MakerDidNotSpendInTime(format!("{}", e)), }; return Self::change_state(next_state, state_machine).await; - } + }, }; info!( "Found taker payment spend {} tx {:02x} during swap {}", @@ -1728,7 +1732,7 @@ for TakerPaymentSent } impl StorableState -for TakerPaymentSent + for TakerPaymentSent { type StateMachine = TakerSwapStateMachine; @@ -1771,21 +1775,24 @@ struct TakerFundingRefundRequired -TransitionFrom> for TakerFundingRefundRequired -{} + TransitionFrom> for TakerFundingRefundRequired +{ +} impl -TransitionFrom> -for TakerFundingRefundRequired -{} + TransitionFrom> + for TakerFundingRefundRequired +{ +} impl -TransitionFrom> for TakerFundingRefundRequired -{} + TransitionFrom> for TakerFundingRefundRequired +{ +} #[async_trait] impl State -for TakerFundingRefundRequired + for TakerFundingRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1814,7 +1821,7 @@ for TakerFundingRefundRequired Err(e) => { let reason = AbortReason::TakerFundingRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let next_state = TakerFundingRefunded { @@ -1828,7 +1835,7 @@ for TakerFundingRefundRequired } impl StorableState -for TakerFundingRefundRequired + for TakerFundingRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1860,16 +1867,18 @@ struct TakerPaymentRefundRequired -TransitionFrom> for TakerPaymentRefundRequired -{} + TransitionFrom> for TakerPaymentRefundRequired +{ +} impl -TransitionFrom> for TakerPaymentRefundRequired -{} + TransitionFrom> for TakerPaymentRefundRequired +{ +} #[async_trait] impl State -for TakerPaymentRefundRequired + for TakerPaymentRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1891,7 +1900,7 @@ for TakerPaymentRefundRequired Err(e) => { error!("Error {} on can_refund_htlc, retrying in 30 seconds", e); Timer::sleep(30.).await; - } + }, } } @@ -1916,7 +1925,7 @@ for TakerPaymentRefundRequired Err(e) => { let reason = AbortReason::TakerPaymentRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let next_state = TakerPaymentRefunded { @@ -1933,7 +1942,7 @@ for TakerPaymentRefundRequired } impl StorableState -for TakerPaymentRefundRequired + for TakerPaymentRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1959,13 +1968,14 @@ struct MakerPaymentConfirmed -TransitionFrom> -for MakerPaymentConfirmed -{} + TransitionFrom> + for MakerPaymentConfirmed +{ +} #[async_trait] impl State -for MakerPaymentConfirmed + for MakerPaymentConfirmed { type StateMachine = TakerSwapStateMachine; @@ -1997,7 +2007,7 @@ for MakerPaymentConfirmed reason: TakerFundingRefundReason::FailedToSendTakerPayment(format!("{:?}", e)), }; return Self::change_state(next_state, state_machine).await; - } + }, }; info!( @@ -2019,7 +2029,7 @@ for MakerPaymentConfirmed } impl StorableState -for MakerPaymentConfirmed + for MakerPaymentConfirmed { type StateMachine = TakerSwapStateMachine; @@ -2054,12 +2064,13 @@ struct TakerPaymentSpent -TransitionFrom> for TakerPaymentSpent -{} + TransitionFrom> for TakerPaymentSpent +{ +} #[async_trait] impl State -for TakerPaymentSpent + for TakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2079,7 +2090,7 @@ for TakerPaymentSpent Err(e) => { let reason = AbortReason::CouldNotExtractSecret(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let args = SpendMakerPaymentArgs { @@ -2096,7 +2107,7 @@ for TakerPaymentSpent Err(e) => { let reason = AbortReason::FailedToSpendMakerPayment(format!("{:?}", e)); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; info!( "Spent maker payment {} tx {:02x} during swap {}", @@ -2117,7 +2128,7 @@ for TakerPaymentSpent } impl StorableState -for TakerPaymentSpent + for TakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2152,11 +2163,12 @@ struct MakerPaymentSpent -TransitionFrom> for MakerPaymentSpent -{} + TransitionFrom> for MakerPaymentSpent +{ +} impl StorableState -for MakerPaymentSpent + for MakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2186,7 +2198,7 @@ for MakerPaymentSpent #[async_trait] impl State -for MakerPaymentSpent + for MakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2236,7 +2248,7 @@ impl Aborted { #[async_trait] impl LastState -for Aborted + for Aborted { type StateMachine = TakerSwapStateMachine; @@ -2249,7 +2261,7 @@ for Aborted } impl StorableState -for Aborted + for Aborted { type StateMachine = TakerSwapStateMachine; @@ -2265,20 +2277,24 @@ impl TransitionFrom> for impl TransitionFrom> for Aborted {} impl TransitionFrom> -for Aborted -{} + for Aborted +{ +} impl -TransitionFrom> for Aborted -{} + TransitionFrom> for Aborted +{ +} impl -TransitionFrom> for Aborted -{} + TransitionFrom> for Aborted +{ +} impl -TransitionFrom> for Aborted -{} + TransitionFrom> for Aborted +{ +} struct Completed { maker_coin: PhantomData, @@ -2295,7 +2311,7 @@ impl Completed { } impl StorableState -for Completed + for Completed { type StateMachine = TakerSwapStateMachine; @@ -2304,7 +2320,7 @@ for Completed #[async_trait] impl LastState -for Completed + for Completed { type StateMachine = TakerSwapStateMachine; @@ -2317,8 +2333,9 @@ for Completed } impl -TransitionFrom> for Completed -{} + TransitionFrom> for Completed +{ +} struct TakerFundingRefunded { maker_coin: PhantomData, @@ -2328,7 +2345,7 @@ struct TakerFundingRefunded StorableState -for TakerFundingRefunded + for TakerFundingRefunded { type StateMachine = TakerSwapStateMachine; @@ -2349,7 +2366,7 @@ for TakerFundingRefunded #[async_trait] impl LastState -for TakerFundingRefunded + for TakerFundingRefunded { type StateMachine = TakerSwapStateMachine; @@ -2365,8 +2382,9 @@ for TakerFundingRefunded } impl -TransitionFrom> for TakerFundingRefunded -{} + TransitionFrom> for TakerFundingRefunded +{ +} struct TakerPaymentRefunded { maker_coin: PhantomData, @@ -2376,7 +2394,7 @@ struct TakerPaymentRefunded StorableState -for TakerPaymentRefunded + for TakerPaymentRefunded { type StateMachine = TakerSwapStateMachine; @@ -2394,7 +2412,7 @@ for TakerPaymentRefunded #[async_trait] impl LastState -for TakerPaymentRefunded + for TakerPaymentRefunded { type StateMachine = TakerSwapStateMachine; @@ -2410,5 +2428,6 @@ for TakerPaymentRefunded } impl -TransitionFrom> for TakerPaymentRefunded -{} + TransitionFrom> for TakerPaymentRefunded +{ +} From b9375e03eda326d70605493831158468b04c4816 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sat, 20 Apr 2024 04:19:44 +0100 Subject: [PATCH 056/186] finish sqlite refactoring and rename sqlite_connection_v2 to sql_connection --- mm2src/coins/lightning/ln_utils.rs | 15 ++- .../sql_tx_history_storage_v2.rs | 12 +-- .../utxo/utxo_block_header_storage/mod.rs | 2 +- .../storage/blockdb/blockdb_sql_storage.rs | 15 ++- mm2src/mm2_core/src/mm_ctx.rs | 97 ++++++++----------- .../src/account/storage/sqlite_storage.rs | 15 ++- mm2src/mm2_main/src/database.rs | 8 +- mm2src/mm2_main/src/database/my_orders.rs | 10 +- mm2src/mm2_main/src/database/my_swaps.rs | 4 +- mm2src/mm2_main/src/database/stats_nodes.rs | 10 +- mm2src/mm2_main/src/lp_native_dex.rs | 4 +- .../src/lp_ordermatch/my_orders_storage.rs | 25 ++--- mm2src/mm2_main/src/lp_swap.rs | 2 +- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 2 +- .../mm2_main/src/lp_swap/my_swaps_storage.rs | 2 +- mm2src/mm2_main/src/lp_swap/swap_v2_common.rs | 10 +- mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs | 4 +- mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 2 +- mm2src/mm2_main/src/ordermatch_tests.rs | 11 +-- mm2src/mm2_test_helpers/src/for_tests.rs | 19 ++-- 20 files changed, 123 insertions(+), 146 deletions(-) diff --git a/mm2src/coins/lightning/ln_utils.rs b/mm2src/coins/lightning/ln_utils.rs index a66af3fda4..08085ff3a3 100644 --- a/mm2src/coins/lightning/ln_utils.rs +++ b/mm2src/coins/lightning/ln_utils.rs @@ -69,14 +69,11 @@ pub async fn init_persister( } pub async fn init_db(ctx: &MmArc, ticker: String) -> EnableLightningResult { - let db = SqliteLightningDB::new( - ticker, - ctx.sqlite_connection - .ok_or(MmError::new(EnableLightningError::DbError( - "sqlite_connection is not initialized".into(), - )))? - .clone(), - ); + let shared = ctx.sqlite_connection_res(None).map_to_mm(|_| { + EnableLightningError::DbError("'MmCtx::sqlite_connection' is not found or initialized".to_owned()) + })?; + + let db = SqliteLightningDB::new(ticker, shared); if !db.is_db_initialized().await? { db.init_db().await?; @@ -148,7 +145,7 @@ pub async fn init_channel_manager( return MmError::err(EnableLightningError::UnsupportedMode( "Lightning network".into(), "electrum".into(), - )) + )); }, }; let best_header = get_best_header(&rpc_client).await?; diff --git a/mm2src/coins/tx_history_storage/sql_tx_history_storage_v2.rs b/mm2src/coins/tx_history_storage/sql_tx_history_storage_v2.rs index 49993e4c6a..d7870edab6 100644 --- a/mm2src/coins/tx_history_storage/sql_tx_history_storage_v2.rs +++ b/mm2src/coins/tx_history_storage/sql_tx_history_storage_v2.rs @@ -376,12 +376,12 @@ pub struct SqliteTxHistoryStorage(Arc>); impl SqliteTxHistoryStorage { pub fn new(ctx: &MmArc) -> Result> { - let sqlite_connection = ctx - .sqlite_connection - .ok_or(MmError::new(CreateTxHistoryStorageError::Internal( - "sqlite_connection is not initialized".to_owned(), - )))?; - Ok(SqliteTxHistoryStorage(sqlite_connection.clone())) + // TODO db_id + let sqlite_connection = ctx.sqlite_connection_res(None).map_to_mm(|_| { + CreateTxHistoryStorageError::Internal("'MmCtx::sqlite_connection' is not found or initialized".to_owned()) + })?; + + Ok(SqliteTxHistoryStorage(sqlite_connection)) } } diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index ffc3635b35..11b5bfa751 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -34,7 +34,7 @@ impl BlockHeaderStorage { db_id: Option<&str>, ) -> Result { let sqlite_connection = ctx - .sqlite_connection_res_v2(db_id) + .sqlite_connection_res(db_id) .map_err(|_| BlockHeaderStorageError::Internal("sqlite_connection is not initialized".to_owned()))?; Ok(BlockHeaderStorage { diff --git a/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs b/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs index 2de560c3c5..4338527f38 100644 --- a/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs +++ b/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs @@ -69,20 +69,17 @@ impl BlockDbImpl { #[cfg(all(test))] pub(crate) async fn new( - ctx: &MmArc, + _ctx: &MmArc, ticker: String, _path: PathBuf, _db_id: Option<&str>, ) -> ZcoinStorageRes { - let ctx = ctx.clone(); + let conn = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); + let conn_clone = conn.clone(); async_blocking(move || { - let conn = ctx - .sqlite_connection - .clone_or(Arc::new(Mutex::new(Connection::open_in_memory().unwrap()))); - let conn_clone = conn.clone(); - let conn_clone = conn_clone.lock().unwrap(); - run_optimization_pragmas(&conn_clone).map_err(|err| ZcoinStorageError::DbError(err.to_string()))?; - conn_clone + let conn_lock = conn_clone.lock().unwrap(); + run_optimization_pragmas(&conn_lock).map_err(|err| ZcoinStorageError::DbError(err.to_string()))?; + conn_lock .execute( "CREATE TABLE IF NOT EXISTS compactblocks ( height INTEGER PRIMARY KEY, diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 91471d91ae..361005631d 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -18,7 +18,7 @@ use std::collections::HashSet; use std::fmt; use std::future::Future; use std::ops::Deref; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; cfg_wasm32! { use mm2_rpc::wasm_rpc::WasmRpcSender; @@ -34,7 +34,7 @@ cfg_native! { use mm2_metrics::MmMetricsError; use std::net::{IpAddr, SocketAddr, AddrParseError}; use std::path::{Path, PathBuf}; - use std::sync::{MutexGuard, Mutex}; + use std::sync::MutexGuard; } /// Default interval to export and record metrics to log. @@ -124,9 +124,7 @@ pub struct MmCtx { pub wasm_rpc: Constructible, /// Deprecated, please use `async_sqlite_connection` for new implementations. #[cfg(not(target_arch = "wasm32"))] - pub sqlite_connection: Constructible>>, - #[cfg(not(target_arch = "wasm32"))] - pub sqlite_connection_v2: Constructible>>>, + pub sqlite_connection: Constructible>>>, /// Deprecated, please create `shared_async_sqlite_conn` for new implementations and call db `KOMODEFI-shared.db`. #[cfg(not(target_arch = "wasm32"))] pub shared_sqlite_conn: Constructible>>, @@ -185,8 +183,6 @@ impl MmCtx { #[cfg(not(target_arch = "wasm32"))] sqlite_connection: Constructible::default(), #[cfg(not(target_arch = "wasm32"))] - sqlite_connection_v2: Constructible::default(), - #[cfg(not(target_arch = "wasm32"))] shared_sqlite_conn: Constructible::default(), #[cfg(not(target_arch = "wasm32"))] async_sqlite_connection: Constructible::default(), @@ -231,7 +227,7 @@ impl MmCtx { rpcport ) })? - }, + } None => 7783, // Default port if `rpcport` does not exist in the config }; if port < 1000 { @@ -246,7 +242,7 @@ impl MmCtx { } else { "127.0.0.1" } - .to_string(); + .to_string(); let ip: IpAddr = try_s!(rpcip.parse()); Ok(SocketAddr::new(ip, port as u16)) } @@ -266,7 +262,7 @@ impl MmCtx { return ERR!("IP address {} must be specified", ip); } Ok(()) - }, + } Ok(ServerName::DnsName(_)) => Ok(()), // NOTE: We need to have this wild card since `ServerName` is a non_exhaustive enum. Ok(_) => ERR!("Only IpAddress and DnsName are allowed in `alt_names`"), @@ -364,20 +360,11 @@ impl MmCtx { pub fn init_sqlite_connection(&self, db_id: Option<&str>) -> Result<(), String> { let sqlite_file_path = self.dbdir(db_id).join("MM2.db"); log_sqlite_file_open_attempt(&sqlite_file_path); - let connection = try_s!(Connection::open(sqlite_file_path)); - try_s!(self.sqlite_connection.pin(Arc::new(Mutex::new(connection)))); - Ok(()) - } - - #[cfg(not(target_arch = "wasm32"))] - pub fn init_sqlite_connection_v2(&self, db_id: Option<&str>) -> Result<(), String> { - let sqlite_file_path = self.dbdir(db_id).join("MM2.db"); - log_sqlite_file_open_attempt(&sqlite_file_path); let connection = try_s!(Connection::open(sqlite_file_path)); let mut store = HashMap::new(); store.insert(self.rmd160_hex(), Arc::new(Mutex::new(connection))); - try_s!(self.sqlite_connection_v2.pin(Arc::new(Mutex::new(store)))); + try_s!(self.sqlite_connection.pin(Arc::new(Mutex::new(store)))); Ok(()) } @@ -394,25 +381,21 @@ impl MmCtx { #[cfg(not(target_arch = "wasm32"))] pub async fn init_async_sqlite_connection(&self, db_id: Option<&str>) -> Result<(), String> { - let sqlite_file_path = self.dbdir(db_id).join(ASYNC_SQLITE_DB_ID); + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex()); + let sqlite_file_path = self.dbdir(Some(&db_id)).join(ASYNC_SQLITE_DB_ID); log_sqlite_file_open_attempt(&sqlite_file_path); let async_conn = try_s!(AsyncConnection::open(sqlite_file_path).await); let mut store = HashMap::new(); - store.insert(self.rmd160_hex(), Arc::new(AsyncMutex::new(async_conn))); + store.insert(db_id, Arc::new(AsyncMutex::new(async_conn))); try_s!(self.async_sqlite_connection.pin(Arc::new(AsyncMutex::new(store)))); Ok(()) } #[cfg(not(target_arch = "wasm32"))] - pub fn sqlite_conn_opt(&self) -> Option> { - self.sqlite_connection.as_option().map(|conn| conn.lock().unwrap()) - } - - #[cfg(not(target_arch = "wasm32"))] - pub fn sqlite_conn_opt_v2(&self, db_id: Option<&str>) -> Option { - if let Some(connections) = self.sqlite_connection_v2.as_option() { + pub fn sqlite_conn_opt(&self, db_id: Option<&str>) -> Option { + if let Some(connections) = self.sqlite_connection.as_option() { let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex()); let connections = connections.lock().unwrap(); return if let Some(connection) = connections.get(&db_id) { @@ -425,7 +408,7 @@ impl MmCtx { Connection::open(sqlite_file_path).expect("failed to open db"), )); let mut store = HashMap::new(); - store.insert(self.rmd160_hex(), connection.clone()); + store.insert(db_id, connection.clone()); drop(connections); Some(connection) }; @@ -435,18 +418,10 @@ impl MmCtx { } #[cfg(not(target_arch = "wasm32"))] - pub fn sqlite_connection(&self) -> MutexGuard { - self.sqlite_connection - .or(&|| panic!("sqlite_connection is not initialized")) - .lock() - .unwrap() - } - - #[cfg(not(target_arch = "wasm32"))] - pub fn sqlite_connection_v2(&self, db_id: Option<&str>) -> SyncSqliteConnectionArc { + pub fn sqlite_connection(&self, db_id: Option<&str>) -> SyncSqliteConnectionArc { let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex()); let connections = self - .sqlite_connection_v2 + .sqlite_connection .or(&|| panic!("sqlite_connection is not initialized")) .lock() .unwrap(); @@ -460,18 +435,17 @@ impl MmCtx { Connection::open(sqlite_file_path).expect("failed to open db"), )); let mut store = HashMap::new(); - store.insert(self.rmd160_hex(), connection.clone()); + store.insert(db_id, connection.clone()); - drop(connections); connection }; } #[cfg(not(target_arch = "wasm32"))] - pub fn sqlite_connection_res_v2(&self, db_id: Option<&str>) -> Result { + pub fn sqlite_connection_res(&self, db_id: Option<&str>) -> Result { let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex()); let connections = self - .sqlite_connection_v2 + .sqlite_connection .ok_or("sqlite_connection is not initialized".to_string())? .lock() .unwrap(); @@ -483,13 +457,24 @@ impl MmCtx { let connection = Arc::new(Mutex::new(try_s!(Connection::open(sqlite_file_path)))); let mut store = HashMap::new(); - store.insert(self.rmd160_hex(), connection.clone()); + store.insert(db_id, connection.clone()); drop(connections); Ok(connection) } } + #[cfg(not(target_arch = "wasm32"))] + pub fn init_sqlite_connection_for_test(&self, db_id: Option<&str>) -> Result { + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex()); + let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); + let mut store = HashMap::new(); + store.insert(db_id, connection.clone()); + try_s!(self.sqlite_connection.pin(Arc::new(Mutex::new(store)))); + + Ok(connection) + } + #[cfg(not(target_arch = "wasm32"))] pub fn shared_sqlite_conn(&self) -> MutexGuard { self.shared_sqlite_conn @@ -627,7 +612,7 @@ impl MmArc { None => { log::info!("MmCtx was dropped. Stop the loop"); break; - }, + } } } }; @@ -661,7 +646,7 @@ impl MmArc { ve.insert(self.weak()); try_s!(self.ffi_handle.pin(rid)); return Ok(rid); - }, + } } } } @@ -758,8 +743,8 @@ impl MmFutSpawner { impl SpawnFuture for MmFutSpawner { fn spawn(&self, f: F) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn(f) } @@ -767,8 +752,8 @@ impl SpawnFuture for MmFutSpawner { impl SpawnAbortable for MmFutSpawner { fn spawn_with_settings(&self, fut: F, settings: AbortSettings) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn_with_settings(fut, settings) } @@ -782,9 +767,9 @@ pub fn from_ctx( ctx_field: &Mutex>>, constructor: C, ) -> Result, String> -where - C: FnOnce() -> Result, - T: 'static + Send + Sync, + where + C: FnOnce() -> Result, + T: 'static + Send + Sync, { let mut ctx_field = try_s!(ctx_field.lock()); if let Some(ref ctx) = *ctx_field { @@ -877,9 +862,9 @@ pub fn log_sqlite_file_open_attempt(sqlite_file_path: &Path) { match sqlite_file_path.canonicalize() { Ok(absolute_path) => { log::debug!("Trying to open SQLite database file {}", absolute_path.display()); - }, + } Err(_) => { log::debug!("Trying to open SQLite database file {}", sqlite_file_path.display()); - }, + } } } diff --git a/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs b/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs index 916854de63..bf604ca5a0 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs @@ -14,7 +14,7 @@ use mm2_err_handle::prelude::*; use mm2_number::BigDecimal; use std::collections::{BTreeMap, BTreeSet}; use std::str::FromStr; -use std::sync::{Arc, MutexGuard}; +use std::sync::MutexGuard; const DEVICE_PUBKEY_MAX_LENGTH: usize = 20; const BALANCE_MAX_LENGTH: usize = 255; @@ -116,13 +116,12 @@ pub(crate) struct SqliteAccountStorage { impl SqliteAccountStorage { pub(crate) fn new(ctx: &MmArc) -> AccountStorageResult { - let shared = ctx - .sqlite_connection - .as_option() - .or_mm_err(|| AccountStorageError::Internal("'MmCtx::sqlite_connection' is not initialized".to_owned()))?; - Ok(SqliteAccountStorage { - conn: Arc::clone(shared), - }) + // TODO db_id + let conn = ctx.sqlite_connection_res(None).map_to_mm(|_| { + AccountStorageError::Internal("'MmCtx::sqlite_connection' is not found or initialized".to_owned()) + })?; + + Ok(SqliteAccountStorage { conn }) } fn lock_conn_mutex(&self) -> AccountStorageResult> { diff --git a/mm2src/mm2_main/src/database.rs b/mm2src/mm2_main/src/database.rs index b02e42fd1e..267fd1d35f 100644 --- a/mm2src/mm2_main/src/database.rs +++ b/mm2src/mm2_main/src/database.rs @@ -19,7 +19,7 @@ use stats_swaps::create_and_fill_stats_swaps_from_json_statements; const SELECT_MIGRATION: &str = "SELECT * FROM migration ORDER BY current_migration DESC LIMIT 1;"; fn get_current_migration(ctx: &MmArc, db_id: Option<&str>) -> SqlResult { - let conn = ctx.sqlite_connection_v2(db_id); + let conn = ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); conn.query_row(SELECT_MIGRATION, [], |row| row.get(0)) } @@ -52,7 +52,7 @@ pub async fn init_and_migrate_sql_db(ctx: &MmArc, db_id: Option<&str>) -> SqlRes } fn init_db(ctx: &MmArc, db_id: Option<&str>) -> SqlResult<()> { - let conn = ctx.sqlite_connection_v2(db_id); + let conn = ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); run_optimization_pragmas(&conn)?; let init_batch = concat!( @@ -66,7 +66,7 @@ fn init_db(ctx: &MmArc, db_id: Option<&str>) -> SqlResult<()> { } fn clean_db(ctx: &MmArc, db_id: Option<&str>) { - let conn = ctx.sqlite_connection_v2(db_id); + let conn = ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); if let Err(e) = conn.execute_batch( "DROP TABLE migration; @@ -152,7 +152,7 @@ pub async fn migrate_sqlite_database(ctx: &MmArc, current_migration: i64) -> Sql while let Some(statements_with_params) = statements_for_migration(ctx, current_migration).await { // `statements_for_migration` locks the [`MmCtx::sqlite_connection`] mutex, // so we can't create a transaction outside of this loop. - let conn = ctx.sqlite_connection_v2(Some(&db_id)); + let conn = ctx.sqlite_connection(Some(&db_id)); let conn = conn.lock().unwrap(); let transaction = conn.unchecked_transaction()?; for (statement, params) in statements_with_params { diff --git a/mm2src/mm2_main/src/database/my_orders.rs b/mm2src/mm2_main/src/database/my_orders.rs index fe0fb41e8a..36ca83f40e 100644 --- a/mm2src/mm2_main/src/database/my_orders.rs +++ b/mm2src/mm2_main/src/database/my_orders.rs @@ -56,7 +56,7 @@ pub fn insert_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder, db_id: Op 0.to_string(), "Created".to_string(), ]; - let conn = ctx.sqlite_connection_v2(db_id); + let conn = ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); conn.execute(INSERT_MY_ORDER, params_from_iter(params.iter())) .map(|_| ()) @@ -82,7 +82,7 @@ pub fn insert_taker_order(ctx: &MmArc, uuid: Uuid, order: &TakerOrder, db_id: Op 0.to_string(), "Created".to_string(), ]; - let conn = ctx.sqlite_connection_v2(db_id); + let conn = ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); conn.execute(INSERT_MY_ORDER, params_from_iter(params.iter())) .map(|_| ()) @@ -97,7 +97,7 @@ pub fn update_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder, db_id: Op order.updated_at.unwrap_or(0).to_string(), "Updated".to_string(), ]; - let conn = ctx.sqlite_connection_v2(db_id); + let conn = ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); conn.execute(UPDATE_MY_ORDER, params_from_iter(params.iter())) .map(|_| ()) @@ -111,7 +111,7 @@ pub fn update_was_taker(ctx: &MmArc, uuid: Uuid, db_id: Option<&str>) -> SqlResu now_ms().to_string(), 1.to_string(), ]; - let conn = ctx.sqlite_connection_v2(db_id); + let conn = ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); conn.execute(UPDATE_WAS_TAKER, params_from_iter(params.iter())) .map(|_| ()) @@ -120,7 +120,7 @@ pub fn update_was_taker(ctx: &MmArc, uuid: Uuid, db_id: Option<&str>) -> SqlResu pub fn update_order_status(ctx: &MmArc, uuid: Uuid, status: String, db_id: Option<&str>) -> SqlResult<()> { debug!("Updating order {} in the SQLite database", uuid); let params = vec![uuid.to_string(), now_ms().to_string(), status]; - let conn = ctx.sqlite_connection_v2(db_id); + let conn = ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); conn.execute(UPDATE_ORDER_STATUS, params_from_iter(params.iter())) .map(|_| ()) diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index 2f8fdaac77..6a27bb0cf3 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -73,7 +73,7 @@ pub fn insert_new_swap( db_id: Option<&str>, ) -> SqlResult<()> { debug!("Inserting new swap {} to the SQLite database", uuid); - let conn = ctx.sqlite_connection_v2(db_id); + let conn = ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); let params = [my_coin, other_coin, uuid, started_at, &swap_type.to_string()]; conn.execute(INSERT_MY_SWAP, params).map(|_| ()) @@ -124,7 +124,7 @@ const INSERT_MY_SWAP_V2: &str = r#"INSERT INTO my_swaps ( );"#; pub fn insert_new_swap_v2(ctx: &MmArc, params: &[(&str, &dyn ToSql)], db_id: Option<&str>) -> SqlResult<()> { - let conn = ctx.sqlite_connection_v2(db_id); + let conn = ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); conn.execute(INSERT_MY_SWAP_V2, params).map(|_| ()) } diff --git a/mm2src/mm2_main/src/database/stats_nodes.rs b/mm2src/mm2_main/src/database/stats_nodes.rs index 0319673d4b..0347e5468f 100644 --- a/mm2src/mm2_main/src/database/stats_nodes.rs +++ b/mm2src/mm2_main/src/database/stats_nodes.rs @@ -37,7 +37,7 @@ pub fn insert_node_info(ctx: &MmArc, node_info: &NodeInfo, db_id: Option<&str>) node_info.address.clone(), node_info.peer_id.clone(), ]; - let conn = ctx.sqlite_connection_v2(db_id); + let conn = ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); conn.execute(INSERT_NODE, params_from_iter(params.iter())).map(|_| ()) } @@ -45,13 +45,13 @@ pub fn insert_node_info(ctx: &MmArc, node_info: &NodeInfo, db_id: Option<&str>) pub fn delete_node_info(ctx: &MmArc, name: String, db_id: Option<&str>) -> SqlResult<()> { debug!("Deleting info about node {} from the SQLite database", name); let params = vec![name]; - let conn = ctx.sqlite_connection_v2(db_id); + let conn = ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); conn.execute(DELETE_NODE, params_from_iter(params.iter())).map(|_| ()) } pub fn select_peers_addresses(ctx: &MmArc, db_id: Option<&str>) -> SqlResult, SqlError> { - let conn = ctx.sqlite_connection_v2(db_id); + let conn = ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); let mut stmt = conn.prepare(SELECT_PEERS_ADDRESSES)?; let peers_addresses = stmt @@ -62,7 +62,7 @@ pub fn select_peers_addresses(ctx: &MmArc, db_id: Option<&str>) -> SqlResult) -> SqlResult, SqlError> { - let conn = ctx.sqlite_connection_v2(db_id); + let conn = ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); let mut stmt = conn.prepare(SELECT_PEERS_NAMES)?; let peers_names = stmt @@ -83,7 +83,7 @@ pub fn insert_node_version_stat(ctx: &MmArc, node_version_stat: NodeVersionStat, node_version_stat.timestamp.to_string(), node_version_stat.error.unwrap_or_default(), ]; - let conn = ctx.sqlite_connection_v2(db_id); + let conn = ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); conn.execute(INSERT_STAT, params_from_iter(params.iter())).map(|_| ()) } diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index ac80cc831c..00c5b4cd10 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -406,7 +406,7 @@ fn fix_shared_dbdir(ctx: &MmCtx) -> MmInitResult<()> { #[cfg(not(target_arch = "wasm32"))] fn migrate_db(ctx: &MmArc, db_id: Option<&str>) -> MmInitResult<()> { let migration_num_path = ctx.dbdir(db_id).join(".migration"); - let mut current_migration = match std::fs::read(&migration_num_path) { + let mut current_migration = match fs::read(&migration_num_path) { Ok(bytes) => { let mut num_bytes = [0; 8]; if bytes.len() == 8 { @@ -470,8 +470,6 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { fix_directories(&ctx, Some(db_id))?; ctx.init_sqlite_connection(Some(db_id)) .map_to_mm(MmInitError::ErrorSqliteInitializing)?; - ctx.init_sqlite_connection_v2(Some(db_id)) - .map_to_mm(MmInitError::ErrorSqliteInitializing)?; ctx.init_shared_sqlite_conn() .map_to_mm(MmInitError::ErrorSqliteInitializing)?; ctx.init_async_sqlite_connection(Some(db_id)) diff --git a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs index 8d159472ed..43ac70eac7 100644 --- a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs +++ b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs @@ -7,7 +7,8 @@ use derive_more::Display; use futures::{FutureExt, TryFutureExt}; use mm2_core::mm_ctx::MmArc; use mm2_err_handle::prelude::*; -#[cfg(test)] use mocktopus::macros::*; +#[cfg(test)] +use mocktopus::macros::*; use uuid::Uuid; pub type MyOrdersResult = Result>; @@ -96,7 +97,7 @@ pub fn delete_my_taker_order(ctx: MmArc, order: TakerOrder, reason: TakerOrderCa .await .error_log_with_msg("!save_order_in_history"); } - }, + } } if save_in_history { @@ -227,7 +228,7 @@ mod native_impl { FsJsonError::Serializing(serializing) => MyOrdersError::ErrorSerializing(serializing.to_string()), FsJsonError::Deserializing(deserializing) => { MyOrdersError::ErrorDeserializing(deserializing.to_string()) - }, + } } } } @@ -326,14 +327,14 @@ mod native_impl { filter: &MyOrdersFilter, paging_options: Option<&PagingOptions>, ) -> MyOrdersResult { - let conn = self.ctx.sqlite_connection_v2(self.db_id.as_deref()); + let conn = self.ctx.sqlite_connection(self.db_id.as_deref()); let conn = conn.lock().unwrap(); select_orders_by_filter(&conn, filter, paging_options) .map_to_mm(|e| MyOrdersError::ErrorLoading(e.to_string())) } async fn select_order_status(&self, uuid: Uuid) -> MyOrdersResult { - let conn = self.ctx.sqlite_connection_v2(self.db_id.as_deref()); + let conn = self.ctx.sqlite_connection(self.db_id.as_deref()); let conn = conn.lock().unwrap(); select_status_by_uuid(&conn, &uuid).map_to_mm(|e| MyOrdersError::ErrorLoading(e.to_string())) } @@ -411,7 +412,7 @@ mod wasm_impl { } impl MyOrdersStorage { - pub fn new(ctx: MmArc) -> MyOrdersStorage { + pub fn new(ctx: MmArc, _db_id: Option<&str>) -> MyOrdersStorage { MyOrdersStorage { ctx: OrdermatchContext::from_ctx(&ctx).expect("!OrdermatchContext::from_ctx"), } @@ -804,9 +805,9 @@ mod tests { maker1.clone(), MakerOrderCancellationReason::InsufficientBalance, ) - .compat() - .await - .unwrap(); + .compat() + .await + .unwrap(); let actual_active_maker_orders = storage .load_active_taker_orders() @@ -982,9 +983,9 @@ mod tests { maker_order_to_filtering_history_item(&maker2, "Updated".to_owned(), false).unwrap(), taker_order_to_filtering_history_item(&taker1, "MyCustomStatus".to_owned()).unwrap(), ] - .into_iter() - .sorted_by(|x, y| x.uuid.cmp(&y.uuid)) - .collect(); + .into_iter() + .sorted_by(|x, y| x.uuid.cmp(&y.uuid)) + .collect(); assert_eq!(actual_items, expected_items); diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 8d7f5c76ed..a46b114c8b 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -1031,7 +1031,7 @@ pub async fn insert_new_swap_to_db( #[cfg(not(target_arch = "wasm32"))] fn add_swap_to_db_index(ctx: &MmArc, swap: &SavedSwap, db_id: Option<&str>) { - if let Some(conn) = ctx.sqlite_conn_opt_v2(db_id) { + if let Some(conn) = ctx.sqlite_conn_opt(db_id) { let conn = conn.lock().unwrap(); crate::mm2::database::stats_swaps::add_swap_to_index(&conn, swap) } diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index c423acb1f7..8ba94ec667 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -218,7 +218,7 @@ impl StateMachineStorage for MakerSwapStorage { let db_id = self.db_id.clone(); async_blocking(move || { - let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = ctx.sqlite_connection(db_id.as_deref()); let conn = conn.lock().unwrap(); Ok(conn.query_row( SELECT_MY_SWAP_V2_BY_UUID, diff --git a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs index 4b07d97710..dfe74434c8 100644 --- a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs +++ b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs @@ -104,7 +104,7 @@ mod native_impl { paging_options: Option<&PagingOptions>, db_id: &str, ) -> MySwapsResult { - let conn = self.ctx.sqlite_connection_v2(Some(db_id)); + let conn = self.ctx.sqlite_connection(Some(db_id)); let conn = conn.lock().unwrap(); Ok(select_uuids_by_my_swaps_filter(&conn, filter, paging_options, db_id)?) } diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs index ddfaeaef29..446a234e16 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs @@ -107,7 +107,7 @@ pub(super) async fn has_db_record_for( let db_id = db_id.map(|e| e.to_string()); Ok(async_blocking(move || { - let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = ctx.sqlite_connection(db_id.as_deref()); let conn = conn.lock().unwrap(); does_swap_exist(&conn, &id_str, db_id.as_deref()) }) @@ -142,14 +142,14 @@ where let db_id = db_id.map(|e| e.to_string()); async_blocking(move || { - let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = ctx.sqlite_connection(db_id.as_deref()); let conn = conn.lock().unwrap(); let events_json = get_swap_events(&conn, &id_str, db_id.as_deref())?; let mut events: Vec = serde_json::from_str(&events_json)?; events.push(event); drop_mutability!(events); let serialized_events = serde_json::to_string(&events)?; - let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = ctx.sqlite_connection(db_id.as_deref()); let conn = conn.lock().unwrap(); update_swap_events(&conn, &id_str, &serialized_events, db_id.as_deref())?; Ok(()) @@ -213,7 +213,7 @@ pub(super) async fn get_unfinished_swaps_uuids( ) -> MmResult, SwapStateMachineError> { let db_id = db_id.map(|e| e.to_string()); async_blocking(move || { - let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = ctx.sqlite_connection(db_id.as_deref()); let conn = conn.lock().unwrap(); select_unfinished_swaps_uuids(&conn, swap_type, db_id.as_deref()) .map_to_mm(|e| SwapStateMachineError::StorageError(e.to_string())) @@ -247,7 +247,7 @@ pub(super) async fn mark_swap_as_finished( ) -> MmResult<(), SwapStateMachineError> { let db_id = db_id.map(|e| e.to_string()); async_blocking(move || { - let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = ctx.sqlite_connection(db_id.as_deref()); let conn = conn.lock().unwrap(); Ok(set_swap_is_finished(&conn, &id.to_string(), db_id.as_deref())?) }) diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs index eb007afbad..aa626c13df 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs @@ -42,7 +42,7 @@ pub(super) async fn get_swap_type(ctx: &MmArc, uuid: &Uuid, db_id: Option<&str>) async_blocking(move || { const SELECT_SWAP_TYPE_BY_UUID: &str = "SELECT swap_type FROM my_swaps WHERE uuid = :uuid;"; - let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = ctx.sqlite_connection(db_id.as_deref()); let conn = conn.lock().unwrap(); let maybe_swap_type = query_single_row(&conn, SELECT_SWAP_TYPE_BY_UUID, &[(":uuid", uuid.as_str())], |row| { row.get(0) @@ -179,7 +179,7 @@ async fn get_swap_data_for_rpc_impl( let db_id = db_id.map(|e| e.to_string()); async_blocking(move || { - let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = ctx.sqlite_connection(db_id.as_deref()); let conn = conn.lock().unwrap(); let swap_data = query_single_row( &conn, diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index c0b4d957cc..4176c8992b 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -250,7 +250,7 @@ impl StateMachineStorage for TakerSwapStorage { let db_id = self.db_id.clone(); async_blocking(move || { - let conn = ctx.sqlite_connection_v2(db_id.as_deref()); + let conn = ctx.sqlite_connection(db_id.as_deref()); let conn = conn.lock().unwrap(); Ok(conn.query_row( SELECT_MY_SWAP_V2_BY_UUID, diff --git a/mm2src/mm2_main/src/ordermatch_tests.rs b/mm2src/mm2_main/src/ordermatch_tests.rs index b0657b0d00..94d9732260 100644 --- a/mm2src/mm2_main/src/ordermatch_tests.rs +++ b/mm2src/mm2_main/src/ordermatch_tests.rs @@ -3,7 +3,6 @@ use crate::mm2::lp_ordermatch::new_protocol::{MakerOrderUpdated, PubkeyKeepAlive use coins::{MmCoin, TestCoin}; use common::{block_on, executor::spawn}; use crypto::privkey::key_pair_from_seed; -use db_common::sqlite::rusqlite::Connection; use futures::{channel::mpsc, StreamExt}; use mm2_core::mm_ctx::{MmArc, MmCtx}; use mm2_libp2p::AdexBehaviourCmd; @@ -15,7 +14,6 @@ use rand::{seq::SliceRandom, thread_rng, Rng}; use secp256k1::PublicKey; use std::collections::HashSet; use std::iter::{self, FromIterator}; -use std::sync::Mutex; #[test] fn test_match_maker_order_and_taker_request() { @@ -1044,8 +1042,7 @@ fn test_cancel_by_single_coin() { let ctx = mm_ctx_with_iguana(None); let rx = prepare_for_cancel_by(&ctx); - let connection = Connection::open_in_memory().unwrap(); - let _ = ctx.sqlite_connection.pin(Arc::new(Mutex::new(connection))); + let _ = ctx.init_sqlite_connection_for_test(None); delete_my_maker_order.mock_safe(|_, _, _| MockResult::Return(Box::new(futures01::future::ok(())))); delete_my_taker_order.mock_safe(|_, _, _| MockResult::Return(Box::new(futures01::future::ok(())))); @@ -1063,8 +1060,7 @@ fn test_cancel_by_pair() { let ctx = mm_ctx_with_iguana(None); let rx = prepare_for_cancel_by(&ctx); - let connection = Connection::open_in_memory().unwrap(); - let _ = ctx.sqlite_connection.pin(Arc::new(Mutex::new(connection))); + let _ = ctx.init_sqlite_connection_for_test(None); delete_my_maker_order.mock_safe(|_, _, _| MockResult::Return(Box::new(futures01::future::ok(())))); delete_my_taker_order.mock_safe(|_, _, _| MockResult::Return(Box::new(futures01::future::ok(())))); @@ -1086,8 +1082,7 @@ fn test_cancel_by_all() { let ctx = mm_ctx_with_iguana(None); let rx = prepare_for_cancel_by(&ctx); - let connection = Connection::open_in_memory().unwrap(); - let _ = ctx.sqlite_connection.pin(Arc::new(Mutex::new(connection))); + let _ = ctx.init_sqlite_connection_for_test(None); delete_my_maker_order.mock_safe(|_, _, _| MockResult::Return(Box::new(futures01::future::ok(())))); delete_my_taker_order.mock_safe(|_, _, _| MockResult::Return(Box::new(futures01::future::ok(())))); diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index 35283f9800..93449cef42 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -1014,8 +1014,10 @@ pub fn mm_ctx_with_custom_db_with_conf(conf: Option) -> MmArc { } let ctx = ctx_builder.into_mm_arc(); - let connection = Connection::open_in_memory().unwrap(); - let _ = ctx.sqlite_connection.pin(Arc::new(Mutex::new(connection))); + let mut connections = HashMap::new(); + let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); + connections.insert(ctx.rmd160_hex(), connection); + let _ = ctx.sqlite_connection.pin(Arc::new(Mutex::new(connections))); let connection = Connection::open_in_memory().unwrap(); let _ = ctx.shared_sqlite_conn.pin(Arc::new(Mutex::new(connection))); @@ -1044,6 +1046,7 @@ pub struct RaiiKill { pub handle: Child, running: bool, } + impl RaiiKill { pub fn from_handle(handle: Child) -> RaiiKill { RaiiKill { handle, running: true } } pub fn running(&mut self) -> bool { @@ -1059,6 +1062,7 @@ impl RaiiKill { } } } + impl Drop for RaiiKill { fn drop(&mut self) { // The cached `running` check might provide some protection against killing a wrong process under the same PID, @@ -1079,6 +1083,7 @@ pub struct RaiiDump { #[cfg(not(target_arch = "wasm32"))] pub log_path: PathBuf, } + #[cfg(not(target_arch = "wasm32"))] impl Drop for RaiiDump { fn drop(&mut self) { @@ -2122,7 +2127,7 @@ pub async fn init_lightning(mm: &MarketMakerIt, coin: &str) -> Json { pub async fn init_lightning_status(mm: &MarketMakerIt, task_id: u64) -> Json { let request = mm - .rpc(&json! ({ + .rpc(&json!({ "userpass": mm.userpass, "method": "task::enable_lightning::status", "mmrpc": "2.0", @@ -2796,7 +2801,7 @@ pub async fn max_maker_vol(mm: &MarketMakerIt, coin: &str) -> RpcResponse { } pub async fn disable_coin(mm: &MarketMakerIt, coin: &str, force_disable: bool) -> DisableResult { - let req = json! ({ + let req = json!({ "userpass": mm.userpass, "method": "disable_coin", "coin": coin, @@ -2812,7 +2817,7 @@ pub async fn disable_coin(mm: &MarketMakerIt, coin: &str, force_disable: bool) - /// Returns a `DisableCoinError` error. pub async fn disable_coin_err(mm: &MarketMakerIt, coin: &str, force_disable: bool) -> DisableCoinError { let disable = mm - .rpc(&json! ({ + .rpc(&json!({ "userpass": mm.userpass, "method": "disable_coin", "coin": coin, @@ -2826,7 +2831,7 @@ pub async fn disable_coin_err(mm: &MarketMakerIt, coin: &str, force_disable: boo pub async fn assert_coin_not_found_on_balance(mm: &MarketMakerIt, coin: &str) { let balance = mm - .rpc(&json! ({ + .rpc(&json!({ "userpass": mm.userpass, "method": "my_balance", "coin": coin @@ -3159,7 +3164,7 @@ pub async fn test_qrc20_history_impl(local_start: Option) { ]); let mut mm = MarketMakerIt::start_async( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), From 4c3a61c4414f9ac8c7c8bf0eb1fc5422994de01d Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sat, 20 Apr 2024 04:26:14 +0100 Subject: [PATCH 057/186] cargo fmt --- mm2src/mm2_core/src/mm_ctx.rs | 28 +++++++++---------- .../src/lp_ordermatch/my_orders_storage.rs | 19 ++++++------- 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 361005631d..f1174076ec 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -227,7 +227,7 @@ impl MmCtx { rpcport ) })? - } + }, None => 7783, // Default port if `rpcport` does not exist in the config }; if port < 1000 { @@ -242,7 +242,7 @@ impl MmCtx { } else { "127.0.0.1" } - .to_string(); + .to_string(); let ip: IpAddr = try_s!(rpcip.parse()); Ok(SocketAddr::new(ip, port as u16)) } @@ -262,7 +262,7 @@ impl MmCtx { return ERR!("IP address {} must be specified", ip); } Ok(()) - } + }, Ok(ServerName::DnsName(_)) => Ok(()), // NOTE: We need to have this wild card since `ServerName` is a non_exhaustive enum. Ok(_) => ERR!("Only IpAddress and DnsName are allowed in `alt_names`"), @@ -612,7 +612,7 @@ impl MmArc { None => { log::info!("MmCtx was dropped. Stop the loop"); break; - } + }, } } }; @@ -646,7 +646,7 @@ impl MmArc { ve.insert(self.weak()); try_s!(self.ffi_handle.pin(rid)); return Ok(rid); - } + }, } } } @@ -743,8 +743,8 @@ impl MmFutSpawner { impl SpawnFuture for MmFutSpawner { fn spawn(&self, f: F) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn(f) } @@ -752,8 +752,8 @@ impl SpawnFuture for MmFutSpawner { impl SpawnAbortable for MmFutSpawner { fn spawn_with_settings(&self, fut: F, settings: AbortSettings) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn_with_settings(fut, settings) } @@ -767,9 +767,9 @@ pub fn from_ctx( ctx_field: &Mutex>>, constructor: C, ) -> Result, String> - where - C: FnOnce() -> Result, - T: 'static + Send + Sync, +where + C: FnOnce() -> Result, + T: 'static + Send + Sync, { let mut ctx_field = try_s!(ctx_field.lock()); if let Some(ref ctx) = *ctx_field { @@ -862,9 +862,9 @@ pub fn log_sqlite_file_open_attempt(sqlite_file_path: &Path) { match sqlite_file_path.canonicalize() { Ok(absolute_path) => { log::debug!("Trying to open SQLite database file {}", absolute_path.display()); - } + }, Err(_) => { log::debug!("Trying to open SQLite database file {}", sqlite_file_path.display()); - } + }, } } diff --git a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs index 43ac70eac7..d065a517b5 100644 --- a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs +++ b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs @@ -7,8 +7,7 @@ use derive_more::Display; use futures::{FutureExt, TryFutureExt}; use mm2_core::mm_ctx::MmArc; use mm2_err_handle::prelude::*; -#[cfg(test)] -use mocktopus::macros::*; +#[cfg(test)] use mocktopus::macros::*; use uuid::Uuid; pub type MyOrdersResult = Result>; @@ -97,7 +96,7 @@ pub fn delete_my_taker_order(ctx: MmArc, order: TakerOrder, reason: TakerOrderCa .await .error_log_with_msg("!save_order_in_history"); } - } + }, } if save_in_history { @@ -228,7 +227,7 @@ mod native_impl { FsJsonError::Serializing(serializing) => MyOrdersError::ErrorSerializing(serializing.to_string()), FsJsonError::Deserializing(deserializing) => { MyOrdersError::ErrorDeserializing(deserializing.to_string()) - } + }, } } } @@ -805,9 +804,9 @@ mod tests { maker1.clone(), MakerOrderCancellationReason::InsufficientBalance, ) - .compat() - .await - .unwrap(); + .compat() + .await + .unwrap(); let actual_active_maker_orders = storage .load_active_taker_orders() @@ -983,9 +982,9 @@ mod tests { maker_order_to_filtering_history_item(&maker2, "Updated".to_owned(), false).unwrap(), taker_order_to_filtering_history_item(&taker1, "MyCustomStatus".to_owned()).unwrap(), ] - .into_iter() - .sorted_by(|x, y| x.uuid.cmp(&y.uuid)) - .collect(); + .into_iter() + .sorted_by(|x, y| x.uuid.cmp(&y.uuid)) + .collect(); assert_eq!(actual_items, expected_items); From 12537c02cdcbceb45c978c7f795a0b730d6a6af0 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sat, 20 Apr 2024 08:44:44 +0100 Subject: [PATCH 058/186] initialize only default pubkey in lp_init --- mm2src/mm2_main/src/lp_native_dex.rs | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 00c5b4cd10..ca1160dd95 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -463,21 +463,16 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { #[cfg(not(target_arch = "wasm32"))] { - let db_ids = find_unique_account_ids_any(&ctx) + fix_directories(&ctx, None)?; + ctx.init_sqlite_connection(None) + .map_to_mm(MmInitError::ErrorSqliteInitializing)?; + ctx.init_shared_sqlite_conn() + .map_to_mm(MmInitError::ErrorSqliteInitializing)?; + ctx.init_async_sqlite_connection(None) .await - .map_to_mm(MmInitError::Internal)?; - for db_id in db_ids.iter() { - fix_directories(&ctx, Some(db_id))?; - ctx.init_sqlite_connection(Some(db_id)) - .map_to_mm(MmInitError::ErrorSqliteInitializing)?; - ctx.init_shared_sqlite_conn() - .map_to_mm(MmInitError::ErrorSqliteInitializing)?; - ctx.init_async_sqlite_connection(Some(db_id)) - .await - .map_to_mm(MmInitError::ErrorSqliteInitializing)?; - init_and_migrate_sql_db(&ctx, Some(db_id)).await?; - migrate_db(&ctx, Some(db_id))?; - } + .map_to_mm(MmInitError::ErrorSqliteInitializing)?; + init_and_migrate_sql_db(&ctx, None).await?; + migrate_db(&ctx, None)?; } init_message_service(&ctx).await?; From 77b43f5e65d84b78c87d8bb29e31eb295866554e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sat, 20 Apr 2024 08:49:11 +0100 Subject: [PATCH 059/186] cargo clippy --- mm2src/mm2_main/src/lp_native_dex.rs | 52 +++++++++++++++------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index ca1160dd95..00c352a31c 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -19,7 +19,7 @@ // use bitcrypto::sha256; -use coins::{find_unique_account_ids_any, register_balance_update_handler}; +use coins::register_balance_update_handler; use common::executor::{SpawnFuture, Timer}; use common::log::{info, warn}; use crypto::{from_hw_error, CryptoCtx, HwError, HwProcessingError, HwRpcError, WithHwRpcError}; @@ -63,8 +63,10 @@ cfg_native! { use rustls_pemfile as pemfile; } -#[path = "lp_init/init_context.rs"] mod init_context; -#[path = "lp_init/init_hw.rs"] pub mod init_hw; +#[path = "lp_init/init_context.rs"] +mod init_context; +#[path = "lp_init/init_hw.rs"] +pub mod init_hw; cfg_wasm32! { use mm2_net::wasm_event_stream::handle_worker_stream; @@ -122,9 +124,9 @@ pub type MmInitResult = Result>; #[derive(Clone, Debug, Display, Serialize)] pub enum P2PInitError { #[display( - fmt = "Invalid WSS key/cert at {:?}. The file must contain {}'", - path, - expected_format + fmt = "Invalid WSS key/cert at {:?}. The file must contain {}'", + path, + expected_format )] InvalidWssCert { path: PathBuf, expected_format: String }, #[display(fmt = "Error deserializing '{}' config field: {}", field, error)] @@ -222,7 +224,7 @@ impl From for MmInitError { match e { P2PInitError::ErrorDeserializingConfig { field, error } => { MmInitError::ErrorDeserializingConfig { field, error } - }, + } P2PInitError::FieldNotFoundInConfig { field } => MmInitError::FieldNotFoundInConfig { field }, P2PInitError::Internal(e) => MmInitError::Internal(e), other => MmInitError::P2PError(other), @@ -240,7 +242,7 @@ impl From for MmInitError { match e { OrdermatchInitError::ErrorDeserializingConfig { field, error } => { MmInitError::ErrorDeserializingConfig { field, error } - }, + } OrdermatchInitError::Internal(internal) => MmInitError::Internal(internal), } } @@ -251,7 +253,7 @@ impl From for MmInitError { match e { WalletInitError::ErrorDeserializingConfig { field, error } => { MmInitError::ErrorDeserializingConfig { field, error } - }, + } other => MmInitError::WalletInitError(other.to_string()), } } @@ -262,7 +264,7 @@ impl From for MmInitError { match e { InitMessageServiceError::ErrorDeserializingConfig { field, error } => { MmInitError::ErrorDeserializingConfig { field, error } - }, + } } } } @@ -415,7 +417,7 @@ fn migrate_db(ctx: &MmArc, db_id: Option<&str>) -> MmInitResult<()> { } else { 0 } - }, + } Err(_) => 0, }; @@ -630,7 +632,7 @@ pub async fn init_p2p(ctx: MmArc) -> P2PResult<()> { connected_peers_count as f64 ); }) - .await; + .await; let (cmd_tx, event_rx, peer_id) = spawn_result?; ctx.peer_id.pin(peer_id.to_string()).map_to_mm(P2PInitError::Internal)?; let p2p_context = P2PContext::new(cmd_tx); @@ -713,8 +715,8 @@ fn light_node_type(ctx: &MmArc) -> P2PResult { /// Returns non-empty vector of keys/certs or an error. #[cfg(not(target_arch = "wasm32"))] fn extract_cert_from_file(path: PathBuf, parser: P, expected_format: String) -> P2PResult> -where - P: Fn(&mut dyn io::BufRead) -> Result, io::Error>, + where + P: Fn(&mut dyn io::BufRead) -> Result, io::Error>, { let certfile = fs::File::open(path.as_path()).map_to_mm(|e| P2PInitError::ErrorReadingCertFile { path: path.clone(), @@ -751,14 +753,14 @@ fn wss_certs(ctx: &MmArc) -> P2PResult> { pemfile::pkcs8_private_keys, "Private key, DER-encoded ASN.1 in either PKCS#8 or PKCS#1 format".to_owned(), ) - // or try to extract all PKCS1 private keys - .or_else(|_| { - extract_cert_from_file( - certs.server_priv_key.clone(), - pemfile::rsa_private_keys, - "Private key, DER-encoded ASN.1 in either PKCS#8 or PKCS#1 format".to_owned(), - ) - })?; + // or try to extract all PKCS1 private keys + .or_else(|_| { + extract_cert_from_file( + certs.server_priv_key.clone(), + pemfile::rsa_private_keys, + "Private key, DER-encoded ASN.1 in either PKCS#8 or PKCS#1 format".to_owned(), + ) + })?; // `extract_cert_from_file` returns either non-empty vector or an error. let server_priv_key = rustls::PrivateKey(server_priv_keys.remove(0)); @@ -767,9 +769,9 @@ fn wss_certs(ctx: &MmArc) -> P2PResult> { pemfile::certs, "Certificate, DER-encoded X.509 format".to_owned(), )? - .into_iter() - .map(rustls::Certificate) - .collect(); + .into_iter() + .map(rustls::Certificate) + .collect(); Ok(Some(WssCerts { server_priv_key, certs })) } From fbe1fe21a9db23624261b2c3625b47c0786518ce Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sun, 21 Apr 2024 05:05:36 +0100 Subject: [PATCH 060/186] stats_nodes multi key db --- mm2src/mm2_main/src/lp_native_dex.rs | 52 +++++------ mm2src/mm2_main/src/lp_ordermatch.rs | 5 +- mm2src/mm2_main/src/lp_stats.rs | 133 +++++++++++++++++---------- 3 files changed, 111 insertions(+), 79 deletions(-) diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 00c352a31c..ccf7408bfd 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -63,10 +63,8 @@ cfg_native! { use rustls_pemfile as pemfile; } -#[path = "lp_init/init_context.rs"] -mod init_context; -#[path = "lp_init/init_hw.rs"] -pub mod init_hw; +#[path = "lp_init/init_context.rs"] mod init_context; +#[path = "lp_init/init_hw.rs"] pub mod init_hw; cfg_wasm32! { use mm2_net::wasm_event_stream::handle_worker_stream; @@ -124,9 +122,9 @@ pub type MmInitResult = Result>; #[derive(Clone, Debug, Display, Serialize)] pub enum P2PInitError { #[display( - fmt = "Invalid WSS key/cert at {:?}. The file must contain {}'", - path, - expected_format + fmt = "Invalid WSS key/cert at {:?}. The file must contain {}'", + path, + expected_format )] InvalidWssCert { path: PathBuf, expected_format: String }, #[display(fmt = "Error deserializing '{}' config field: {}", field, error)] @@ -224,7 +222,7 @@ impl From for MmInitError { match e { P2PInitError::ErrorDeserializingConfig { field, error } => { MmInitError::ErrorDeserializingConfig { field, error } - } + }, P2PInitError::FieldNotFoundInConfig { field } => MmInitError::FieldNotFoundInConfig { field }, P2PInitError::Internal(e) => MmInitError::Internal(e), other => MmInitError::P2PError(other), @@ -242,7 +240,7 @@ impl From for MmInitError { match e { OrdermatchInitError::ErrorDeserializingConfig { field, error } => { MmInitError::ErrorDeserializingConfig { field, error } - } + }, OrdermatchInitError::Internal(internal) => MmInitError::Internal(internal), } } @@ -253,7 +251,7 @@ impl From for MmInitError { match e { WalletInitError::ErrorDeserializingConfig { field, error } => { MmInitError::ErrorDeserializingConfig { field, error } - } + }, other => MmInitError::WalletInitError(other.to_string()), } } @@ -264,7 +262,7 @@ impl From for MmInitError { match e { InitMessageServiceError::ErrorDeserializingConfig { field, error } => { MmInitError::ErrorDeserializingConfig { field, error } - } + }, } } } @@ -417,7 +415,7 @@ fn migrate_db(ctx: &MmArc, db_id: Option<&str>) -> MmInitResult<()> { } else { 0 } - } + }, Err(_) => 0, }; @@ -456,7 +454,7 @@ fn init_wasm_event_streaming(ctx: &MmArc) { } pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { - init_ordermatch_context(&ctx)?; + init_ordermatch_context(&ctx, None)?; init_p2p(ctx.clone()).await?; if !CryptoCtx::is_init(&ctx)? { @@ -632,7 +630,7 @@ pub async fn init_p2p(ctx: MmArc) -> P2PResult<()> { connected_peers_count as f64 ); }) - .await; + .await; let (cmd_tx, event_rx, peer_id) = spawn_result?; ctx.peer_id.pin(peer_id.to_string()).map_to_mm(P2PInitError::Internal)?; let p2p_context = P2PContext::new(cmd_tx); @@ -715,8 +713,8 @@ fn light_node_type(ctx: &MmArc) -> P2PResult { /// Returns non-empty vector of keys/certs or an error. #[cfg(not(target_arch = "wasm32"))] fn extract_cert_from_file(path: PathBuf, parser: P, expected_format: String) -> P2PResult> - where - P: Fn(&mut dyn io::BufRead) -> Result, io::Error>, +where + P: Fn(&mut dyn io::BufRead) -> Result, io::Error>, { let certfile = fs::File::open(path.as_path()).map_to_mm(|e| P2PInitError::ErrorReadingCertFile { path: path.clone(), @@ -753,14 +751,14 @@ fn wss_certs(ctx: &MmArc) -> P2PResult> { pemfile::pkcs8_private_keys, "Private key, DER-encoded ASN.1 in either PKCS#8 or PKCS#1 format".to_owned(), ) - // or try to extract all PKCS1 private keys - .or_else(|_| { - extract_cert_from_file( - certs.server_priv_key.clone(), - pemfile::rsa_private_keys, - "Private key, DER-encoded ASN.1 in either PKCS#8 or PKCS#1 format".to_owned(), - ) - })?; + // or try to extract all PKCS1 private keys + .or_else(|_| { + extract_cert_from_file( + certs.server_priv_key.clone(), + pemfile::rsa_private_keys, + "Private key, DER-encoded ASN.1 in either PKCS#8 or PKCS#1 format".to_owned(), + ) + })?; // `extract_cert_from_file` returns either non-empty vector or an error. let server_priv_key = rustls::PrivateKey(server_priv_keys.remove(0)); @@ -769,9 +767,9 @@ fn wss_certs(ctx: &MmArc) -> P2PResult> { pemfile::certs, "Certificate, DER-encoded X.509 format".to_owned(), )? - .into_iter() - .map(rustls::Certificate) - .collect(); + .into_iter() + .map(rustls::Certificate) + .collect(); Ok(Some(WssCerts { server_priv_key, certs })) } diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 867f0ea301..379b30eeca 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -2735,7 +2735,8 @@ struct OrdermatchContext { ordermatch_db: ConstructibleDb, } -pub fn init_ordermatch_context(ctx: &MmArc) -> OrdermatchInitResult<()> { +#[allow(unused)] +pub fn init_ordermatch_context(ctx: &MmArc, db_id: Option<&str>) -> OrdermatchInitResult<()> { // Helper #[derive(Deserialize)] struct CoinConf { @@ -2768,7 +2769,7 @@ pub fn init_ordermatch_context(ctx: &MmArc) -> OrdermatchInitResult<()> { orderbook_tickers, original_tickers, #[cfg(target_arch = "wasm32")] - ordermatch_db: ConstructibleDb::new(ctx, None), + ordermatch_db: ConstructibleDb::new(ctx, db_id), }; from_ctx(&ctx.ordermatch_ctx, move || Ok(ordermatch_context)) diff --git a/mm2src/mm2_main/src/lp_stats.rs b/mm2src/mm2_main/src/lp_stats.rs index ceca75b4df..21e5e08df0 100644 --- a/mm2src/mm2_main/src/lp_stats.rs +++ b/mm2src/mm2_main/src/lp_stats.rs @@ -1,8 +1,14 @@ /// The module is responsible for mm2 network stats collection /// +use crate::mm2::lp_network::{add_reserved_peer_addresses, lp_network_ports, request_peers, NetIdError, P2PRequest, + ParseAddressError, PeerDecodedResponse}; + +use coins::find_unique_account_ids_active; +#[cfg(not(target_arch = "wasm32"))] use common::async_blocking; use common::executor::{SpawnFuture, Timer}; use common::{log, HttpStatusCode}; use derive_more::Display; +use futures::future::try_join_all; use futures::lock::Mutex as AsyncMutex; use http::StatusCode; use mm2_core::mm_ctx::{from_ctx, MmArc}; @@ -10,11 +16,8 @@ use mm2_err_handle::prelude::*; use mm2_libp2p::{encode_message, NetworkInfo, PeerId, RelayAddress, RelayAddressError}; use serde_json::{self as json, Value as Json}; use std::collections::{HashMap, HashSet}; -use std::sync::Arc; - -use crate::mm2::lp_network::{add_reserved_peer_addresses, lp_network_ports, request_peers, NetIdError, P2PRequest, - ParseAddressError, PeerDecodedResponse}; use std::str::FromStr; +use std::sync::Arc; pub type NodeVersionResult = Result>; @@ -37,6 +40,8 @@ pub enum NodeVersionError { CurrentlyStopping, #[display(fmt = "start_version_stat_collection is not running")] NotRunning, + #[display(fmt = "Invalid request: {}", _0)] + InternalError(String), } impl HttpStatusCode for NodeVersionError { @@ -49,7 +54,9 @@ impl HttpStatusCode for NodeVersionError { | NodeVersionError::AlreadyRunning | NodeVersionError::CurrentlyStopping | NodeVersionError::NotRunning => StatusCode::METHOD_NOT_ALLOWED, - NodeVersionError::DatabaseError(_) => StatusCode::INTERNAL_SERVER_ERROR, + NodeVersionError::DatabaseError(_) | NodeVersionError::InternalError(_) => { + StatusCode::INTERNAL_SERVER_ERROR + }, } } } @@ -70,7 +77,7 @@ impl From for NodeVersionError { fn from(e: RelayAddressError) -> Self { NodeVersionError::InvalidAddress(e.to_string()) } } -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone)] pub struct NodeInfo { pub name: String, pub address: String, @@ -124,9 +131,8 @@ fn delete_node_info_from_db(ctx: &MmArc, name: String, db_id: Option<&str>) -> R fn select_peers_addresses_from_db(_ctx: &MmArc) -> Result, String> { Ok(Vec::new()) } #[cfg(not(target_arch = "wasm32"))] -fn select_peers_addresses_from_db(ctx: &MmArc) -> Result, String> { - let _db_id: Option<&str> = None; // TODO - crate::mm2::database::stats_nodes::select_peers_addresses(ctx, None).map_err(|e| e.to_string()) +fn select_peers_addresses_from_db(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { + crate::mm2::database::stats_nodes::select_peers_addresses(ctx, db_id).map_err(|e| e.to_string()) } #[cfg(target_arch = "wasm32")] @@ -154,8 +160,22 @@ pub async fn add_node_to_version_stat(ctx: MmArc, req: Json) -> NodeVersionResul peer_id: node_info.peer_id, }; - let _db_id: Option<&str> = None; // TODO - insert_node_info_to_db(&ctx, &node_info_with_ipv4_addr, None).map_to_mm(NodeVersionError::DatabaseError)?; + let db_ids = find_unique_account_ids_active(&ctx) + .await + .map_to_mm(NodeVersionError::InternalError)?; + let futures = db_ids + .iter() + .map(|db_id| { + let ctx = ctx.clone(); + let node_info_with_ipv4_addr = node_info_with_ipv4_addr.clone(); + let db_id = db_id.clone(); + async_blocking(move || { + insert_node_info_to_db(&ctx, &node_info_with_ipv4_addr, Some(&db_id)) + .map_to_mm(NodeVersionError::DatabaseError) + }) + }) + .collect::>(); + try_join_all(futures).await?; Ok("success".into()) } @@ -171,9 +191,22 @@ pub async fn remove_node_from_version_stat(_ctx: MmArc, _req: Json) -> NodeVersi #[cfg(not(target_arch = "wasm32"))] pub async fn remove_node_from_version_stat(ctx: MmArc, req: Json) -> NodeVersionResult { let node_name: String = json::from_value(req["name"].clone())?; - - let _db_id: Option<&str> = None; // TODO - delete_node_info_from_db(&ctx, node_name, None).map_to_mm(NodeVersionError::DatabaseError)?; + let db_ids = find_unique_account_ids_active(&ctx) + .await + .map_to_mm(NodeVersionError::InternalError)?; + + let futures = db_ids + .iter() + .map(|db_id| { + let ctx = ctx.clone(); + let node_name = node_name.clone(); + let db_id = db_id.clone(); + async_blocking(move || { + delete_node_info_from_db(&ctx, node_name, Some(&db_id)).map_to_mm(NodeVersionError::DatabaseError) + }) + }) + .collect::>(); + try_join_all(futures).await?; Ok("success".into()) } @@ -237,21 +270,10 @@ pub async fn start_version_stat_collection(_ctx: MmArc, _req: Json) -> NodeVersi #[cfg(not(target_arch = "wasm32"))] pub async fn start_version_stat_collection(ctx: MmArc, req: Json) -> NodeVersionResult { - let stats_ctx = StatsContext::from_ctx(&ctx).unwrap(); - { - let state = stats_ctx.status.lock().await; - if *state == StatsCollectionStatus::Stopping { - return MmError::err(NodeVersionError::CurrentlyStopping); - } - if *state != StatsCollectionStatus::Stopped { - return MmError::err(NodeVersionError::AlreadyRunning); - } - } - + let db_ids = find_unique_account_ids_active(&ctx) + .await + .map_to_mm(NodeVersionError::InternalError)?; let interval: f64 = json::from_value(req["interval"].clone())?; - - let peers_addresses = select_peers_addresses_from_db(&ctx).map_to_mm(NodeVersionError::DatabaseError)?; - let netid = ctx.conf["netid"].as_u64().unwrap_or(0) as u16; let network_info = if ctx.p2p_in_memory() { NetworkInfo::InMemory @@ -260,31 +282,46 @@ pub async fn start_version_stat_collection(ctx: MmArc, req: Json) -> NodeVersion NetworkInfo::Distributed { network_ports } }; - for (peer_id, address) in peers_addresses { - let peer_id = peer_id - .parse::() - .map_to_mm(|e| NodeVersionError::PeerIdParseError(peer_id, e.to_string()))?; + for db_id in db_ids { + let stats_ctx = StatsContext::from_ctx(&ctx).unwrap(); + { + let state = stats_ctx.status.lock().await; + if *state == StatsCollectionStatus::Stopping { + return MmError::err(NodeVersionError::CurrentlyStopping); + } + if *state != StatsCollectionStatus::Stopped { + return MmError::err(NodeVersionError::AlreadyRunning); + } + } + + let peers_addresses = + select_peers_addresses_from_db(&ctx, Some(&db_id)).map_to_mm(NodeVersionError::DatabaseError)?; - let relay_addr = RelayAddress::from_str(&address)?; - let multi_address = relay_addr.try_to_multiaddr(network_info)?; + for (peer_id, address) in peers_addresses { + let peer_id = peer_id + .parse::() + .map_to_mm(|e| NodeVersionError::PeerIdParseError(peer_id, e.to_string()))?; - let mut addresses = HashSet::new(); - addresses.insert(multi_address); - add_reserved_peer_addresses(&ctx, peer_id, addresses); - } + let relay_addr = RelayAddress::from_str(&address)?; + let multi_address = relay_addr.try_to_multiaddr(network_info)?; - let spawner = ctx.spawner(); - spawner.spawn(stat_collection_loop(ctx, interval)); + let mut addresses = HashSet::new(); + addresses.insert(multi_address); + add_reserved_peer_addresses(&ctx, peer_id, addresses); + } + + let spawner = ctx.spawner(); + spawner.spawn(stat_collection_loop(ctx.clone(), interval, db_id.to_owned())); + } Ok("success".into()) } #[cfg(not(target_arch = "wasm32"))] -async fn stat_collection_loop(ctx: MmArc, interval: f64) { +async fn stat_collection_loop(ctx: MmArc, interval: f64, db_id: String) { use common::now_sec; use crate::mm2::database::stats_nodes::select_peers_names; - let mut interval = interval; loop { if ctx.is_stopping() { @@ -308,8 +345,7 @@ async fn stat_collection_loop(ctx: MmArc, interval: f64) { } } - let db_id: Option<&str> = None; // TODO - let peers_names = match select_peers_names(&ctx, db_id) { + let peers_names = match select_peers_names(&ctx, Some(&db_id)) { Ok(n) => n, Err(e) => { log::error!("Error selecting peers names from db: {}", e); @@ -350,8 +386,7 @@ async fn stat_collection_loop(ctx: MmArc, interval: f64) { timestamp, error: None, }; - let db_id: Option<&str> = None; // TODO - if let Err(e) = insert_node_version_stat_to_db(&ctx, node_version_stat, db_id) { + if let Err(e) = insert_node_version_stat_to_db(&ctx, node_version_stat, Some(&db_id)) { log::error!("Error inserting node {} version {} into db: {}", name, v, e); }; }, @@ -367,8 +402,7 @@ async fn stat_collection_loop(ctx: MmArc, interval: f64) { timestamp, error: Some(e.clone()), }; - let db_id: Option<&str> = None; // TODO - if let Err(e) = insert_node_version_stat_to_db(&ctx, node_version_stat, db_id) { + if let Err(e) = insert_node_version_stat_to_db(&ctx, node_version_stat, Some(&db_id)) { log::error!("Error inserting node {} error into db: {}", name, e); }; }, @@ -380,8 +414,7 @@ async fn stat_collection_loop(ctx: MmArc, interval: f64) { timestamp, error: None, }; - let db_id: Option<&str> = None; // TODO - if let Err(e) = insert_node_version_stat_to_db(&ctx, node_version_stat, db_id) { + if let Err(e) = insert_node_version_stat_to_db(&ctx, node_version_stat, Some(&db_id)) { log::error!("Error inserting no response for node {} into db: {}", name, e); }; }, From 795f718263964786de34f446f28bcb33a8275a41 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sun, 21 Apr 2024 05:11:56 +0100 Subject: [PATCH 061/186] fix find_unique_account_ids todos --- mm2src/coins/lp_coins.rs | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 3d86ded9c6..4d1204d5c1 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -139,7 +139,7 @@ macro_rules! try_tx_fus { return Box::new(futures01::future::err(crate::TransactionErr::TxRecoverable( TransactionEnum::from($tx), ERRL!("{:?}", err), - ))) + ))); }, } }; @@ -156,7 +156,7 @@ macro_rules! try_tx_s { file!(), line!(), err - ))) + ))); }, } }; @@ -167,7 +167,7 @@ macro_rules! try_tx_s { return Err(crate::TransactionErr::TxRecoverable( TransactionEnum::from($tx), format!("{}:{}] {:?}", file!(), line!(), err), - )) + )); }, } }; @@ -208,6 +208,7 @@ pub mod lp_price; pub mod watcher_common; pub mod coin_errors; + use coin_errors::{MyAddressError, ValidatePaymentError, ValidatePaymentFut}; #[doc(hidden)] @@ -215,6 +216,7 @@ use coin_errors::{MyAddressError, ValidatePaymentError, ValidatePaymentFut}; pub mod coins_tests; pub mod eth; + use eth::GetValidEthWithdrawAddError; use eth::{eth_coin_from_conf_and_request, get_eth_address, EthCoin, EthGasDetailsErr, EthTxFeeDetails, GetEthAddressError, SignedEthTx}; @@ -224,6 +226,7 @@ pub mod hd_confirm_address; pub mod hd_pubkey; pub mod hd_wallet; + use hd_wallet::{HDAccountAddressId, HDAddress}; pub mod hd_wallet_storage; @@ -232,9 +235,11 @@ pub mod hd_wallet_storage; pub mod my_tx_history_v2; pub mod qrc20; + use qrc20::{qrc20_coin_with_policy, Qrc20ActivationParams, Qrc20Coin, Qrc20FeeDetails}; pub mod rpc_command; + use rpc_command::{get_new_address::{GetNewAddressTaskManager, GetNewAddressTaskManagerShared}, init_account_balance::{AccountBalanceTaskManager, AccountBalanceTaskManagerShared}, init_create_account::{CreateAccountTaskManager, CreateAccountTaskManagerShared}, @@ -242,12 +247,14 @@ use rpc_command::{get_new_address::{GetNewAddressTaskManager, GetNewAddressTaskM init_withdraw::{WithdrawTaskManager, WithdrawTaskManagerShared}}; pub mod tendermint; + use tendermint::{CosmosTransaction, CustomTendermintMsgType, TendermintCoin, TendermintFeeDetails, TendermintProtocolInfo, TendermintToken, TendermintTokenProtocolInfo}; #[doc(hidden)] #[allow(unused_variables)] pub mod test_coin; + pub use test_coin::TestCoin; pub mod tx_history_storage; @@ -261,6 +268,7 @@ pub mod tx_history_storage; not(target_arch = "wasm32") ))] pub mod solana; + #[cfg(all( feature = "enable-solana", not(target_os = "ios"), @@ -277,6 +285,7 @@ pub use solana::spl::SplToken; pub use solana::{SolanaActivationParams, SolanaCoin, SolanaFeeDetails}; pub mod utxo; + use utxo::bch::{bch_coin_with_policy, BchActivationRequest, BchCoin}; use utxo::qtum::{self, qtum_coin_with_policy, Qrc20AddressError, QtumCoin, QtumDelegationOps, QtumDelegationRequest, QtumStakingInfosDetails, ScriptHashTypeNotSupported}; @@ -289,10 +298,12 @@ use utxo::UtxoActivationParams; use utxo::{BlockchainNetwork, GenerateTxError, UtxoFeeDetails, UtxoTx}; pub mod nft; + use nft::nft_errors::GetNftInfoError; use script::Script; pub mod z_coin; + use crate::coin_errors::ValidatePaymentResult; use crate::utxo::swap_proto_v2_scripts; use crate::utxo::utxo_common::{payment_script, WaitForOutputSpendErr}; @@ -4096,11 +4107,11 @@ pub async fn lp_coininit(ctx: &MmArc, ticker: &str, req: &Json) -> Result return ERR!("Lightning protocol is not supported by lp_coininit"), #[cfg(all(feature = "enable-solana", not(target_arch = "wasm32")))] CoinProtocol::SOLANA => { - return ERR!("Solana protocol is not supported by lp_coininit - use enable_solana_with_tokens instead") + return ERR!("Solana protocol is not supported by lp_coininit - use enable_solana_with_tokens instead"); }, #[cfg(all(feature = "enable-solana", not(target_arch = "wasm32")))] CoinProtocol::SPLTOKEN { .. } => { - return ERR!("SplToken protocol is not supported by lp_coininit - use enable_spl instead") + return ERR!("SplToken protocol is not supported by lp_coininit - use enable_spl instead"); }, }; @@ -4144,7 +4155,7 @@ pub async fn lp_register_coin( let mut coins = cctx.coins.lock().await; match coins.raw_entry_mut().from_key(&ticker) { RawEntryMut::Occupied(_oe) => { - return MmError::err(RegisterCoinError::CoinIsInitializedAlready { coin: ticker.clone() }) + return MmError::err(RegisterCoinError::CoinIsInitializedAlready { coin: ticker.clone() }); }, RawEntryMut::Vacant(ve) => ve.insert(ticker.clone(), MmCoinStruct::new(coin.clone())), }; @@ -4197,19 +4208,16 @@ pub async fn find_unique_account_ids_active(ctx: &MmArc) -> Result Result, String> { // Using a HashSet to ensure uniqueness efficiently let mut account_ids = HashSet::new(); - // Add default wallet pubkey + // Add default wallet pubkey as coin.account_db_id() will return None by default account_ids.insert(ctx.rmd160_hex()); let cctx = try_s!(CoinsContext::from_ctx(ctx)); let coins = cctx.coins.lock().await; let coins = coins.values().collect::>(); - #[cfg(not(target_arch = "wasm32"))] for coin in coins.iter() { if let Some(account) = coin.inner.account_db_id() { if active_only && coin.is_available() { @@ -4224,7 +4232,6 @@ async fn find_unique_account_ids(ctx: &MmArc, active_only: bool) -> Result Delega _ => { return MmError::err(DelegationError::CoinDoesntSupportDelegation { coin: coin.ticker().to_string(), - }) + }); }, } } @@ -4374,7 +4381,7 @@ pub async fn get_staking_infos(ctx: MmArc, req: GetStakingInfosRequest) -> Staki _ => { return MmError::err(StakingInfosError::CoinDoesntSupportStakingInfos { coin: coin.ticker().to_string(), - }) + }); }, } } @@ -4387,7 +4394,7 @@ pub async fn add_delegation(ctx: MmArc, req: AddDelegateRequest) -> DelegationRe _ => { return MmError::err(DelegationError::CoinDoesntSupportDelegation { coin: coin.ticker().to_string(), - }) + }); }, }; match req.staking_details { From 88754f7955ce400f6e69aef5a3486cb800118ed5 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 22 Apr 2024 08:09:37 +0100 Subject: [PATCH 062/186] minor changes --- .../utxo/utxo_block_header_storage/mod.rs | 12 ++-- .../utxo/utxo_builder/utxo_coin_builder.rs | 60 +++++++++---------- mm2src/mm2_core/src/mm_ctx.rs | 37 ++++++------ 3 files changed, 56 insertions(+), 53 deletions(-) diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index 11b5bfa751..b508570096 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -1,9 +1,11 @@ -#[cfg(not(target_arch = "wasm32"))] mod sql_block_header_storage; +#[cfg(not(target_arch = "wasm32"))] +mod sql_block_header_storage; #[cfg(not(target_arch = "wasm32"))] pub use sql_block_header_storage::SqliteBlockHeadersStorage; -#[cfg(target_arch = "wasm32")] mod wasm; +#[cfg(target_arch = "wasm32")] +mod wasm; #[cfg(target_arch = "wasm32")] pub use wasm::IDBBlockHeadersStorage; @@ -29,7 +31,7 @@ impl Debug for BlockHeaderStorage { impl BlockHeaderStorage { #[cfg(all(not(test), not(target_arch = "wasm32")))] pub(crate) fn new_from_ctx( - ctx: MmArc, + ctx: &MmArc, ticker: String, db_id: Option<&str>, ) -> Result { @@ -47,7 +49,7 @@ impl BlockHeaderStorage { #[cfg(target_arch = "wasm32")] pub(crate) fn new_from_ctx( - ctx: MmArc, + ctx: &MmArc, ticker: String, db_id: Option<&str>, ) -> Result { @@ -58,7 +60,7 @@ impl BlockHeaderStorage { #[cfg(all(test, not(target_arch = "wasm32")))] pub(crate) fn new_from_ctx( - _ctx: MmArc, + _ctx: &MmArc, ticker: String, _db_id: Option<&str>, ) -> Result { diff --git a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs index c4dfa9dcd2..af3d45ea80 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs @@ -63,9 +63,9 @@ pub enum UtxoCoinBuildError { ErrorDetectingDecimals(String), InvalidBlockchainNetwork(String), #[display( - fmt = "Failed to connect to at least 1 of {:?} in {} seconds.", - electrum_servers, - seconds + fmt = "Failed to connect to at least 1 of {:?} in {} seconds.", + electrum_servers, + seconds )] FailedToConnectToElectrums { electrum_servers: Vec, @@ -80,7 +80,7 @@ pub enum UtxoCoinBuildError { HwContextNotInitialized, HDWalletStorageError(HDWalletStorageError), #[display( - fmt = "Coin doesn't support Trezor hardware wallet. Please consider adding the 'trezor_coin' field to the coins config" + fmt = "Coin doesn't support Trezor hardware wallet. Please consider adding the 'trezor_coin' field to the coins config" )] CoinDoesntSupportTrezor, BlockHeaderStorageError(BlockHeaderStorageError), @@ -134,7 +134,7 @@ impl From for UtxoCoinBuildError { #[async_trait] pub trait UtxoCoinBuilder: - UtxoFieldsWithIguanaSecretBuilder + UtxoFieldsWithGlobalHDBuilder + UtxoFieldsWithHardwareWalletBuilder +UtxoFieldsWithIguanaSecretBuilder + UtxoFieldsWithGlobalHDBuilder + UtxoFieldsWithHardwareWalletBuilder { type ResultCoin; type Error: NotMmError; @@ -148,7 +148,7 @@ pub trait UtxoCoinBuilder: PrivKeyBuildPolicy::IguanaPrivKey(priv_key) => self.build_utxo_fields_with_iguana_secret(priv_key).await, PrivKeyBuildPolicy::GlobalHDAccount(global_hd_ctx) => { self.build_utxo_fields_with_global_hd(global_hd_ctx).await - }, + } PrivKeyBuildPolicy::Trezor => self.build_utxo_fields_with_trezor().await, } } @@ -229,8 +229,8 @@ async fn build_utxo_coin_fields_with_conf_and_policy( conf: UtxoCoinConf, priv_key_policy: PrivKeyPolicy, ) -> UtxoCoinBuildResult -where - Builder: UtxoCoinBuilderCommonOps + Sync + ?Sized, + where + Builder: UtxoCoinBuilderCommonOps + Sync + ?Sized, { let key_pair = priv_key_policy.activated_key_or_err()?; let addr_format = builder.address_format()?; @@ -241,9 +241,9 @@ where conf.address_prefixes.clone(), conf.bech32_hrp.clone(), ) - .as_pkh() - .build() - .map_to_mm(UtxoCoinBuildError::Internal)?; + .as_pkh() + .build() + .map_to_mm(UtxoCoinBuildError::Internal)?; let my_script_pubkey = output_script(&my_address).map(|script| script.to_bytes())?; let derivation_method = DerivationMethod::SingleAddress(my_address); @@ -440,7 +440,7 @@ pub trait UtxoCoinBuilderCommonOps { } else { from_req } - }, + } None => format_from_conf, }; @@ -504,7 +504,7 @@ pub trait UtxoCoinBuilderCommonOps { .map_to_mm(UtxoCoinBuildError::ErrorDetectingFeeMethod)?, }; TxFee::Dynamic(fee_method) - }, + } Some(fee) => TxFee::FixedPerKb(fee), }; Ok(tx_fee) @@ -534,7 +534,7 @@ pub trait UtxoCoinBuilderCommonOps { let native = self.native_client()?; Ok(UtxoRpcClientEnum::Native(native)) } - }, + } UtxoRpcMode::Electrum { servers } => { let electrum = self .electrum_client( @@ -545,7 +545,7 @@ pub trait UtxoCoinBuilderCommonOps { ) .await?; Ok(UtxoRpcClientEnum::Electrum(electrum)) - }, + } } } @@ -581,7 +581,7 @@ pub trait UtxoCoinBuilderCommonOps { }; let storage_ticker = self.ticker().replace('-', "_"); let block_headers_storage = - BlockHeaderStorage::new_from_ctx(self.ctx().clone(), storage_ticker, db_id.as_deref()) + BlockHeaderStorage::new_from_ctx(self.ctx(), storage_ticker, db_id.as_deref()) .map_to_mm(|e| UtxoCoinBuildError::Internal(e.to_string()))?; if !block_headers_storage.is_initialized_for().await? { block_headers_storage.init().await?; @@ -690,14 +690,14 @@ pub trait UtxoCoinBuilderCommonOps { .as_str() .or_mm_err(|| UtxoConfError::CurrencyNameIsNotSet)?; (name, false) - }, + } } }; let data_dir = coin_daemon_data_dir(name, is_asset_chain); let confname = format!("{}.conf", name); return Ok(data_dir.join(&confname[..])); - }, + } }; let (confpath, rel_to_home) = match declared_confpath.strip_prefix("~/") { @@ -829,8 +829,8 @@ fn read_native_mode_conf( "Error parsing the native wallet configuration '{}': {}", filename.as_ref().display(), err - ) - }, + ); + } }; let rpc_port = match read_property(&conf, network, "rpcport") { Some(port) => port.parse::().ok(), @@ -888,12 +888,12 @@ fn spawn_electrum_version_loop( match event { ElectrumProtoVerifierEvent::Connected(electrum_addr) => { check_electrum_server_version(weak_client.clone(), client_name.clone(), electrum_addr).await - }, + } ElectrumProtoVerifierEvent::Disconnected(electrum_addr) => { if let Some(client) = weak_client.upgrade() { client.reset_protocol_version(&electrum_addr).await.error_log(); } - }, + } } } }; @@ -928,7 +928,7 @@ async fn check_electrum_server_version( remove_server(client, &electrum_addr).await; }; return; - }, + } }; // check if the version is allowed @@ -938,7 +938,7 @@ async fn check_electrum_server_version( error!("Error on parse protocol_version: {:?}", e); remove_server(client, &electrum_addr).await; return; - }, + } }; if !available_protocols.contains(&actual_version) { @@ -976,10 +976,10 @@ async fn wait_for_protocol_version_checked(client: &ElectrumClientImpl) -> Resul } Retry(()) }) - .repeat_every_secs(0.5) - .attempts(10) - .await - .map_err(|_exceed| ERRL!("Failed protocol version verifying of at least 1 of Electrums in 5 seconds.")) - // Flatten `Result< Result<(), String>, String >` - .flatten() + .repeat_every_secs(0.5) + .attempts(10) + .await + .map_err(|_exceed| ERRL!("Failed protocol version verifying of at least 1 of Electrums in 5 seconds.")) + // Flatten `Result< Result<(), String>, String >` + .flatten() } diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index f1174076ec..e1dcf4339a 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -40,6 +40,7 @@ cfg_native! { /// Default interval to export and record metrics to log. const EXPORT_METRICS_INTERVAL: f64 = 5. * 60.; pub const ASYNC_SQLITE_DB_ID: &str = "KOMODEFI.db"; +pub const SYNC_SQLITE_DB_ID: &str = "MM2.db"; #[cfg(not(target_arch = "wasm32"))] pub type AsyncSqliteConnectionArc = Arc>; @@ -227,7 +228,7 @@ impl MmCtx { rpcport ) })? - }, + } None => 7783, // Default port if `rpcport` does not exist in the config }; if port < 1000 { @@ -242,7 +243,7 @@ impl MmCtx { } else { "127.0.0.1" } - .to_string(); + .to_string(); let ip: IpAddr = try_s!(rpcip.parse()); Ok(SocketAddr::new(ip, port as u16)) } @@ -262,7 +263,7 @@ impl MmCtx { return ERR!("IP address {} must be specified", ip); } Ok(()) - }, + } Ok(ServerName::DnsName(_)) => Ok(()), // NOTE: We need to have this wild card since `ServerName` is a non_exhaustive enum. Ok(_) => ERR!("Only IpAddress and DnsName are allowed in `alt_names`"), @@ -358,7 +359,7 @@ impl MmCtx { #[cfg(not(target_arch = "wasm32"))] pub fn init_sqlite_connection(&self, db_id: Option<&str>) -> Result<(), String> { - let sqlite_file_path = self.dbdir(db_id).join("MM2.db"); + let sqlite_file_path = self.dbdir(db_id).join(SYNC_SQLITE_DB_ID); log_sqlite_file_open_attempt(&sqlite_file_path); let connection = try_s!(Connection::open(sqlite_file_path)); @@ -401,7 +402,7 @@ impl MmCtx { return if let Some(connection) = connections.get(&db_id) { Some(connection.clone()) } else { - let sqlite_file_path = self.dbdir(Some(&db_id)).join("MM2.db"); + let sqlite_file_path = self.dbdir(Some(&db_id)).join(SYNC_SQLITE_DB_ID); log_sqlite_file_open_attempt(&sqlite_file_path); let connection = Arc::new(Mutex::new( @@ -428,7 +429,7 @@ impl MmCtx { return if let Some(connection) = connections.get(&db_id) { connection.clone() } else { - let sqlite_file_path = self.dbdir(Some(&db_id)).join("MM2.db"); + let sqlite_file_path = self.dbdir(Some(&db_id)).join(SYNC_SQLITE_DB_ID); log_sqlite_file_open_attempt(&sqlite_file_path); let connection = Arc::new(Mutex::new( @@ -452,7 +453,7 @@ impl MmCtx { if let Some(connection) = connections.get(&db_id) { Ok(connection.clone()) } else { - let sqlite_file_path = self.dbdir(Some(&db_id)).join("MM2.db"); + let sqlite_file_path = self.dbdir(Some(&db_id)).join(SYNC_SQLITE_DB_ID); log_sqlite_file_open_attempt(&sqlite_file_path); let connection = Arc::new(Mutex::new(try_s!(Connection::open(sqlite_file_path)))); @@ -612,7 +613,7 @@ impl MmArc { None => { log::info!("MmCtx was dropped. Stop the loop"); break; - }, + } } } }; @@ -646,7 +647,7 @@ impl MmArc { ve.insert(self.weak()); try_s!(self.ffi_handle.pin(rid)); return Ok(rid); - }, + } } } } @@ -743,8 +744,8 @@ impl MmFutSpawner { impl SpawnFuture for MmFutSpawner { fn spawn(&self, f: F) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn(f) } @@ -752,8 +753,8 @@ impl SpawnFuture for MmFutSpawner { impl SpawnAbortable for MmFutSpawner { fn spawn_with_settings(&self, fut: F, settings: AbortSettings) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn_with_settings(fut, settings) } @@ -767,9 +768,9 @@ pub fn from_ctx( ctx_field: &Mutex>>, constructor: C, ) -> Result, String> -where - C: FnOnce() -> Result, - T: 'static + Send + Sync, + where + C: FnOnce() -> Result, + T: 'static + Send + Sync, { let mut ctx_field = try_s!(ctx_field.lock()); if let Some(ref ctx) = *ctx_field { @@ -862,9 +863,9 @@ pub fn log_sqlite_file_open_attempt(sqlite_file_path: &Path) { match sqlite_file_path.canonicalize() { Ok(absolute_path) => { log::debug!("Trying to open SQLite database file {}", absolute_path.display()); - }, + } Err(_) => { log::debug!("Trying to open SQLite database file {}", sqlite_file_path.display()); - }, + } } } From cc3a02fe49dee08e3492347ab87c860580483d7d Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 22 Apr 2024 08:15:06 +0100 Subject: [PATCH 063/186] cargo fmt --- .../utxo/utxo_block_header_storage/mod.rs | 6 +- .../utxo/utxo_builder/utxo_coin_builder.rs | 61 +++++++++---------- mm2src/mm2_core/src/mm_ctx.rs | 28 ++++----- 3 files changed, 46 insertions(+), 49 deletions(-) diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index b508570096..3669a5432a 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -1,11 +1,9 @@ -#[cfg(not(target_arch = "wasm32"))] -mod sql_block_header_storage; +#[cfg(not(target_arch = "wasm32"))] mod sql_block_header_storage; #[cfg(not(target_arch = "wasm32"))] pub use sql_block_header_storage::SqliteBlockHeadersStorage; -#[cfg(target_arch = "wasm32")] -mod wasm; +#[cfg(target_arch = "wasm32")] mod wasm; #[cfg(target_arch = "wasm32")] pub use wasm::IDBBlockHeadersStorage; diff --git a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs index af3d45ea80..adf806044f 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs @@ -63,9 +63,9 @@ pub enum UtxoCoinBuildError { ErrorDetectingDecimals(String), InvalidBlockchainNetwork(String), #[display( - fmt = "Failed to connect to at least 1 of {:?} in {} seconds.", - electrum_servers, - seconds + fmt = "Failed to connect to at least 1 of {:?} in {} seconds.", + electrum_servers, + seconds )] FailedToConnectToElectrums { electrum_servers: Vec, @@ -80,7 +80,7 @@ pub enum UtxoCoinBuildError { HwContextNotInitialized, HDWalletStorageError(HDWalletStorageError), #[display( - fmt = "Coin doesn't support Trezor hardware wallet. Please consider adding the 'trezor_coin' field to the coins config" + fmt = "Coin doesn't support Trezor hardware wallet. Please consider adding the 'trezor_coin' field to the coins config" )] CoinDoesntSupportTrezor, BlockHeaderStorageError(BlockHeaderStorageError), @@ -134,7 +134,7 @@ impl From for UtxoCoinBuildError { #[async_trait] pub trait UtxoCoinBuilder: -UtxoFieldsWithIguanaSecretBuilder + UtxoFieldsWithGlobalHDBuilder + UtxoFieldsWithHardwareWalletBuilder + UtxoFieldsWithIguanaSecretBuilder + UtxoFieldsWithGlobalHDBuilder + UtxoFieldsWithHardwareWalletBuilder { type ResultCoin; type Error: NotMmError; @@ -148,7 +148,7 @@ UtxoFieldsWithIguanaSecretBuilder + UtxoFieldsWithGlobalHDBuilder + UtxoFieldsWi PrivKeyBuildPolicy::IguanaPrivKey(priv_key) => self.build_utxo_fields_with_iguana_secret(priv_key).await, PrivKeyBuildPolicy::GlobalHDAccount(global_hd_ctx) => { self.build_utxo_fields_with_global_hd(global_hd_ctx).await - } + }, PrivKeyBuildPolicy::Trezor => self.build_utxo_fields_with_trezor().await, } } @@ -229,8 +229,8 @@ async fn build_utxo_coin_fields_with_conf_and_policy( conf: UtxoCoinConf, priv_key_policy: PrivKeyPolicy, ) -> UtxoCoinBuildResult - where - Builder: UtxoCoinBuilderCommonOps + Sync + ?Sized, +where + Builder: UtxoCoinBuilderCommonOps + Sync + ?Sized, { let key_pair = priv_key_policy.activated_key_or_err()?; let addr_format = builder.address_format()?; @@ -241,9 +241,9 @@ async fn build_utxo_coin_fields_with_conf_and_policy( conf.address_prefixes.clone(), conf.bech32_hrp.clone(), ) - .as_pkh() - .build() - .map_to_mm(UtxoCoinBuildError::Internal)?; + .as_pkh() + .build() + .map_to_mm(UtxoCoinBuildError::Internal)?; let my_script_pubkey = output_script(&my_address).map(|script| script.to_bytes())?; let derivation_method = DerivationMethod::SingleAddress(my_address); @@ -440,7 +440,7 @@ pub trait UtxoCoinBuilderCommonOps { } else { from_req } - } + }, None => format_from_conf, }; @@ -504,7 +504,7 @@ pub trait UtxoCoinBuilderCommonOps { .map_to_mm(UtxoCoinBuildError::ErrorDetectingFeeMethod)?, }; TxFee::Dynamic(fee_method) - } + }, Some(fee) => TxFee::FixedPerKb(fee), }; Ok(tx_fee) @@ -534,7 +534,7 @@ pub trait UtxoCoinBuilderCommonOps { let native = self.native_client()?; Ok(UtxoRpcClientEnum::Native(native)) } - } + }, UtxoRpcMode::Electrum { servers } => { let electrum = self .electrum_client( @@ -545,7 +545,7 @@ pub trait UtxoCoinBuilderCommonOps { ) .await?; Ok(UtxoRpcClientEnum::Electrum(electrum)) - } + }, } } @@ -580,9 +580,8 @@ pub trait UtxoCoinBuilderCommonOps { None => None, }; let storage_ticker = self.ticker().replace('-', "_"); - let block_headers_storage = - BlockHeaderStorage::new_from_ctx(self.ctx(), storage_ticker, db_id.as_deref()) - .map_to_mm(|e| UtxoCoinBuildError::Internal(e.to_string()))?; + let block_headers_storage = BlockHeaderStorage::new_from_ctx(self.ctx(), storage_ticker, db_id.as_deref()) + .map_to_mm(|e| UtxoCoinBuildError::Internal(e.to_string()))?; if !block_headers_storage.is_initialized_for().await? { block_headers_storage.init().await?; } @@ -690,14 +689,14 @@ pub trait UtxoCoinBuilderCommonOps { .as_str() .or_mm_err(|| UtxoConfError::CurrencyNameIsNotSet)?; (name, false) - } + }, } }; let data_dir = coin_daemon_data_dir(name, is_asset_chain); let confname = format!("{}.conf", name); return Ok(data_dir.join(&confname[..])); - } + }, }; let (confpath, rel_to_home) = match declared_confpath.strip_prefix("~/") { @@ -830,7 +829,7 @@ fn read_native_mode_conf( filename.as_ref().display(), err ); - } + }, }; let rpc_port = match read_property(&conf, network, "rpcport") { Some(port) => port.parse::().ok(), @@ -888,12 +887,12 @@ fn spawn_electrum_version_loop( match event { ElectrumProtoVerifierEvent::Connected(electrum_addr) => { check_electrum_server_version(weak_client.clone(), client_name.clone(), electrum_addr).await - } + }, ElectrumProtoVerifierEvent::Disconnected(electrum_addr) => { if let Some(client) = weak_client.upgrade() { client.reset_protocol_version(&electrum_addr).await.error_log(); } - } + }, } } }; @@ -928,7 +927,7 @@ async fn check_electrum_server_version( remove_server(client, &electrum_addr).await; }; return; - } + }, }; // check if the version is allowed @@ -938,7 +937,7 @@ async fn check_electrum_server_version( error!("Error on parse protocol_version: {:?}", e); remove_server(client, &electrum_addr).await; return; - } + }, }; if !available_protocols.contains(&actual_version) { @@ -976,10 +975,10 @@ async fn wait_for_protocol_version_checked(client: &ElectrumClientImpl) -> Resul } Retry(()) }) - .repeat_every_secs(0.5) - .attempts(10) - .await - .map_err(|_exceed| ERRL!("Failed protocol version verifying of at least 1 of Electrums in 5 seconds.")) - // Flatten `Result< Result<(), String>, String >` - .flatten() + .repeat_every_secs(0.5) + .attempts(10) + .await + .map_err(|_exceed| ERRL!("Failed protocol version verifying of at least 1 of Electrums in 5 seconds.")) + // Flatten `Result< Result<(), String>, String >` + .flatten() } diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index e1dcf4339a..d6298ef0ba 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -228,7 +228,7 @@ impl MmCtx { rpcport ) })? - } + }, None => 7783, // Default port if `rpcport` does not exist in the config }; if port < 1000 { @@ -243,7 +243,7 @@ impl MmCtx { } else { "127.0.0.1" } - .to_string(); + .to_string(); let ip: IpAddr = try_s!(rpcip.parse()); Ok(SocketAddr::new(ip, port as u16)) } @@ -263,7 +263,7 @@ impl MmCtx { return ERR!("IP address {} must be specified", ip); } Ok(()) - } + }, Ok(ServerName::DnsName(_)) => Ok(()), // NOTE: We need to have this wild card since `ServerName` is a non_exhaustive enum. Ok(_) => ERR!("Only IpAddress and DnsName are allowed in `alt_names`"), @@ -613,7 +613,7 @@ impl MmArc { None => { log::info!("MmCtx was dropped. Stop the loop"); break; - } + }, } } }; @@ -647,7 +647,7 @@ impl MmArc { ve.insert(self.weak()); try_s!(self.ffi_handle.pin(rid)); return Ok(rid); - } + }, } } } @@ -744,8 +744,8 @@ impl MmFutSpawner { impl SpawnFuture for MmFutSpawner { fn spawn(&self, f: F) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn(f) } @@ -753,8 +753,8 @@ impl SpawnFuture for MmFutSpawner { impl SpawnAbortable for MmFutSpawner { fn spawn_with_settings(&self, fut: F, settings: AbortSettings) - where - F: Future + Send + 'static, + where + F: Future + Send + 'static, { self.inner.spawn_with_settings(fut, settings) } @@ -768,9 +768,9 @@ pub fn from_ctx( ctx_field: &Mutex>>, constructor: C, ) -> Result, String> - where - C: FnOnce() -> Result, - T: 'static + Send + Sync, +where + C: FnOnce() -> Result, + T: 'static + Send + Sync, { let mut ctx_field = try_s!(ctx_field.lock()); if let Some(ref ctx) = *ctx_field { @@ -863,9 +863,9 @@ pub fn log_sqlite_file_open_attempt(sqlite_file_path: &Path) { match sqlite_file_path.canonicalize() { Ok(absolute_path) => { log::debug!("Trying to open SQLite database file {}", absolute_path.display()); - } + }, Err(_) => { log::debug!("Trying to open SQLite database file {}", sqlite_file_path.display()); - } + }, } } From ee8d9d0659bf30d628ca9bc2390b99fbfb50bba7 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 23 Apr 2024 07:38:52 +0100 Subject: [PATCH 064/186] wip changes --- .../coins/hd_wallet_storage/wasm_storage.rs | 22 +- mm2src/coins/lp_coins.rs | 2 +- mm2src/coins/nft.rs | 235 +++++++++++------- mm2src/coins/nft/nft_structs.rs | 31 +-- mm2src/coins/nft/nft_tests.rs | 52 ++-- mm2src/coins/nft/storage/db_test_helpers.rs | 2 +- .../wasm/tx_history_storage_v2.rs | 4 +- .../utxo/utxo_block_header_storage/mod.rs | 12 +- .../wasm/indexeddb_block_header_storage.rs | 2 +- .../storage/blockdb/blockdb_idb_storage.rs | 2 +- .../z_coin/storage/walletdb/wasm/storage.rs | 2 +- .../z_coin/storage/z_params/indexeddb.rs | 2 +- mm2src/mm2_db/src/indexed_db/db_lock.rs | 33 ++- .../src/account/storage/wasm_storage.rs | 2 +- mm2src/mm2_main/src/lp_ordermatch.rs | 3 +- mm2src/mm2_main/src/lp_swap.rs | 3 +- mm2src/mm2_main/src/lp_wallet.rs | 5 +- 17 files changed, 242 insertions(+), 172 deletions(-) diff --git a/mm2src/coins/hd_wallet_storage/wasm_storage.rs b/mm2src/coins/hd_wallet_storage/wasm_storage.rs index d25363a854..5a2787620f 100644 --- a/mm2src/coins/hd_wallet_storage/wasm_storage.rs +++ b/mm2src/coins/hd_wallet_storage/wasm_storage.rs @@ -53,17 +53,17 @@ impl From for HDWalletStorageError { let stringified_error = e.to_string(); match e { // We don't expect that the `String` and `u32` types serialization to fail. - CursorError::ErrorSerializingIndexFieldValue {..} + CursorError::ErrorSerializingIndexFieldValue { .. } // We don't expect that the `String` and `u32` types deserialization to fail. - | CursorError::ErrorDeserializingIndexValue {..} - | CursorError::ErrorOpeningCursor {..} - | CursorError::AdvanceError {..} - | CursorError::InvalidKeyRange {..} - | CursorError::TypeMismatch {..} - | CursorError::IncorrectNumberOfKeysPerIndex {..} + | CursorError::ErrorDeserializingIndexValue { .. } + | CursorError::ErrorOpeningCursor { .. } + | CursorError::AdvanceError { .. } + | CursorError::InvalidKeyRange { .. } + | CursorError::TypeMismatch { .. } + | CursorError::IncorrectNumberOfKeysPerIndex { .. } | CursorError::UnexpectedState(..) - | CursorError::IncorrectUsage {..} => HDWalletStorageError::Internal(stringified_error), - CursorError::ErrorDeserializingItem {..} => HDWalletStorageError::ErrorDeserializing(stringified_error), + | CursorError::IncorrectUsage { .. } => HDWalletStorageError::Internal(stringified_error), + CursorError::ErrorDeserializingItem { .. } => HDWalletStorageError::ErrorDeserializing(stringified_error), } } } @@ -271,7 +271,7 @@ impl HDWalletIndexedDbStorage { } async fn lock_db_mutex(db: &SharedDb) -> HDWalletStorageResult> { - db.get_or_initialize().await.mm_err(HDWalletStorageError::from) + db.get_or_initialize(None).await.mm_err(HDWalletStorageError::from) } async fn find_account( @@ -316,7 +316,7 @@ impl HDWalletIndexedDbStorage { /// This function is used in `hd_wallet_storage::tests`. pub(super) async fn get_all_storage_items(ctx: &MmArc) -> Vec { let coins_ctx = CoinsContext::from_ctx(ctx).unwrap(); - let db = coins_ctx.hd_wallet_db.get_or_initialize().await.unwrap(); + let db = coins_ctx.hd_wallet_db.get_or_initialize(None).await.unwrap(); let transaction = db.inner.transaction().await.unwrap(); let table = transaction.table::().await.unwrap(); table diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index d7f8e4d964..5e9435aed7 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -3621,7 +3621,7 @@ impl CoinsContext { #[cfg(target_arch = "wasm32")] async fn tx_history_db(&self) -> TxHistoryResult> { - Ok(self.tx_history_db.get_or_initialize().await?) + Ok(self.tx_history_db.get_or_initialize(None).await?) } } diff --git a/mm2src/coins/nft.rs b/mm2src/coins/nft.rs index ff2c0c968c..83fc530d2d 100644 --- a/mm2src/coins/nft.rs +++ b/mm2src/coins/nft.rs @@ -93,7 +93,7 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult let req = req.clone(); let res = async move { - let nft_ctx = NftCtx::from_ctx(&ctx_clone, Some(&id)).map_to_mm(GetNftInfoError::Internal)?; + let nft_ctx = NftCtx::from_ctx(&ctx_clone).map_to_mm(GetNftInfoError::Internal)?; let chains = req .chains @@ -101,7 +101,7 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult .into_iter() .filter(|c| chains.contains(c)) .collect::>(); - let storage = nft_ctx.lock_db().await?; + let storage = nft_ctx.lock_db(Some(&id)).await?; for chain in req.chains.iter() { if !NftListStorageOps::is_initialized(&storage, chain).await? { NftListStorageOps::init(&storage, chain).await?; @@ -144,11 +144,13 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult /// token ID, and chain, and returns comprehensive information about the NFT. /// It also checks and redacts potential spam if `protect_from_spam` in the request is set to true. pub async fn get_nft_metadata(ctx: MmArc, req: NftMetadataReq) -> MmResult { - // TODO: db_id - let db_id: Option = None; - let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; + let db_id = find_nft_account_id_for_chain(&ctx, req.chain) + .await + .map_to_mm(GetNftInfoError::Internal)?; + + let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; - let storage = nft_ctx.lock_db().await?; + let storage = nft_ctx.lock_db(db_id.map(|(key, _)| key).as_deref()).await?; if !NftListStorageOps::is_initialized(&storage, &req.chain).await? { NftListStorageOps::init(&storage, &req.chain).await?; } @@ -162,6 +164,7 @@ pub async fn get_nft_metadata(ctx: MmArc, req: NftMetadataReq) -> MmResult MmResult<(), UpdateNftError> { - // TODO: db_id - let db_id: Option = None; - let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; + let db_ids = find_unique_nft_account_ids(&ctx, req.chains.clone()) + .await + .map_to_mm(UpdateNftError::Internal)?; - let storage = nft_ctx.lock_db().await?; - for chain in req.chains.iter() { - let transfer_history_initialized = NftTransferHistoryStorageOps::is_initialized(&storage, chain).await?; + let futures = + |db_id: String, chains: Vec| -> Pin> + Send>> { + let ctx = ctx.clone(); + let req = req.clone(); + Box::pin(async move { + let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; + + let storage = nft_ctx.lock_db(Some(&db_id)).await?; + for chain in chains.iter() { + let transfer_history_initialized = + NftTransferHistoryStorageOps::is_initialized(&storage, chain).await?; + + let from_block = if transfer_history_initialized { + let last_transfer_block = + NftTransferHistoryStorageOps::get_last_block_number(&storage, chain).await?; + last_transfer_block.map(|b| b + 1) + } else { + NftTransferHistoryStorageOps::init(&storage, chain).await?; + None + }; + let coin_enum = lp_coinfind_or_err(&ctx, chain.to_ticker()).await?; + let eth_coin = match coin_enum { + MmCoinEnum::EthCoin(eth_coin) => eth_coin, + _ => { + return MmError::err(UpdateNftError::CoinDoesntSupportNft { + coin: coin_enum.ticker().to_owned(), + }); + }, + }; + let nft_transfers = get_moralis_nft_transfers(&ctx, chain, from_block, &req.url, eth_coin).await?; + storage.add_transfers_to_history(*chain, nft_transfers).await?; + + let nft_block = match NftListStorageOps::get_last_block_number(&storage, chain).await { + Ok(Some(block)) => block, + Ok(None) => { + // if there are no rows in NFT LIST table we can try to get nft list from moralis. + let nft_list = + cache_nfts_from_moralis(&ctx, &storage, chain, &req.url, &req.url_antispam).await?; + update_meta_in_transfers(&storage, chain, nft_list).await?; + update_transfers_with_empty_meta(&storage, chain, &req.url, &req.url_antispam).await?; + update_spam(&storage, *chain, &req.url_antispam).await?; + update_phishing(&storage, chain, &req.url_antispam).await?; + continue; + }, + Err(_) => { + // if there is an error, then NFT LIST table doesnt exist, so we need to cache nft list from moralis. + NftListStorageOps::init(&storage, chain).await?; + let nft_list = + cache_nfts_from_moralis(&ctx, &storage, chain, &req.url, &req.url_antispam).await?; + update_meta_in_transfers(&storage, chain, nft_list).await?; + update_transfers_with_empty_meta(&storage, chain, &req.url, &req.url_antispam).await?; + update_spam(&storage, *chain, &req.url_antispam).await?; + update_phishing(&storage, chain, &req.url_antispam).await?; + continue; + }, + }; + let scanned_block = storage.get_last_scanned_block(chain).await?.ok_or_else(|| { + UpdateNftError::LastScannedBlockNotFound { + last_nft_block: nft_block.to_string(), + } + })?; + // if both block numbers exist, last scanned block should be equal + // or higher than last block number from NFT LIST table. + if scanned_block < nft_block { + return MmError::err(UpdateNftError::InvalidBlockOrder { + last_scanned_block: scanned_block.to_string(), + last_nft_block: nft_block.to_string(), + }); + } + update_nft_list( + ctx.clone(), + &storage, + chain, + scanned_block + 1, + &req.url, + &req.url_antispam, + ) + .await?; + update_nft_global_in_coins_ctx(&ctx, &storage, *chain).await?; + update_transfers_with_empty_meta(&storage, chain, &req.url, &req.url_antispam).await?; + update_spam(&storage, *chain, &req.url_antispam).await?; + update_phishing(&storage, chain, &req.url_antispam).await?; + } - let from_block = if transfer_history_initialized { - let last_transfer_block = NftTransferHistoryStorageOps::get_last_block_number(&storage, chain).await?; - last_transfer_block.map(|b| b + 1) - } else { - NftTransferHistoryStorageOps::init(&storage, chain).await?; - None - }; - let coin_enum = lp_coinfind_or_err(&ctx, chain.to_ticker()).await?; - let eth_coin = match coin_enum { - MmCoinEnum::EthCoin(eth_coin) => eth_coin, - _ => { - return MmError::err(UpdateNftError::CoinDoesntSupportNft { - coin: coin_enum.ticker().to_owned(), - }); - }, - }; - let nft_transfers = get_moralis_nft_transfers(&ctx, chain, from_block, &req.url, eth_coin).await?; - storage.add_transfers_to_history(*chain, nft_transfers).await?; - - let nft_block = match NftListStorageOps::get_last_block_number(&storage, chain).await { - Ok(Some(block)) => block, - Ok(None) => { - // if there are no rows in NFT LIST table we can try to get nft list from moralis. - let nft_list = cache_nfts_from_moralis(&ctx, &storage, chain, &req.url, &req.url_antispam).await?; - update_meta_in_transfers(&storage, chain, nft_list).await?; - update_transfers_with_empty_meta(&storage, chain, &req.url, &req.url_antispam).await?; - update_spam(&storage, *chain, &req.url_antispam).await?; - update_phishing(&storage, chain, &req.url_antispam).await?; - continue; - }, - Err(_) => { - // if there is an error, then NFT LIST table doesnt exist, so we need to cache nft list from moralis. - NftListStorageOps::init(&storage, chain).await?; - let nft_list = cache_nfts_from_moralis(&ctx, &storage, chain, &req.url, &req.url_antispam).await?; - update_meta_in_transfers(&storage, chain, nft_list).await?; - update_transfers_with_empty_meta(&storage, chain, &req.url, &req.url_antispam).await?; - update_spam(&storage, *chain, &req.url_antispam).await?; - update_phishing(&storage, chain, &req.url_antispam).await?; - continue; - }, + Ok(()) + }) }; - let scanned_block = - storage - .get_last_scanned_block(chain) - .await? - .ok_or_else(|| UpdateNftError::LastScannedBlockNotFound { - last_nft_block: nft_block.to_string(), - })?; - // if both block numbers exist, last scanned block should be equal - // or higher than last block number from NFT LIST table. - if scanned_block < nft_block { - return MmError::err(UpdateNftError::InvalidBlockOrder { - last_scanned_block: scanned_block.to_string(), - last_nft_block: nft_block.to_string(), - }); - } - update_nft_list( - ctx.clone(), - &storage, - chain, - scanned_block + 1, - &req.url, - &req.url_antispam, - ) - .await?; - update_nft_global_in_coins_ctx(&ctx, &storage, *chain).await?; - update_transfers_with_empty_meta(&storage, chain, &req.url, &req.url_antispam).await?; - update_spam(&storage, *chain, &req.url_antispam).await?; - update_phishing(&storage, chain, &req.url_antispam).await?; - } + + let future_list = db_ids + .into_iter() + .filter_map(|(id, chains)| { + if !chains.is_empty() { + Some(futures(id, chains)) + } else { + None + } + }) + .collect::>(); + + try_join_all(future_list).await?; + Ok(()) } @@ -533,11 +563,13 @@ fn prepare_uri_for_blocklist_endpoint( /// is identified as spam or matches with any phishing domains, the NFT's `possible_spam` and/or /// `possible_phishing` flags are set to true. pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResult<(), UpdateNftError> { - // TODO: db_id - let db_id: Option = None; - let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(GetNftInfoError::Internal)?; + let db_id = find_nft_account_id_for_chain(&ctx, req.chain) + .await + .map_to_mm(UpdateNftError::Internal)? + .map(|(key, _)| key); + let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(GetNftInfoError::Internal)?; - let storage = nft_ctx.lock_db().await?; + let storage = nft_ctx.lock_db(db_id.as_deref()).await?; let token_address_str = eth_addr_to_hex(&req.token_address); let moralis_meta = match get_moralis_metadata( token_address_str.clone(), @@ -1542,8 +1574,8 @@ pub async fn clear_nft_db(ctx: MmArc, req: ClearNftDbReq) -> MmResult<(), ClearN // TODO: db_id let db_id: Option = None; if req.clear_all { - let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(ClearNftDbError::Internal)?; - let storage = nft_ctx.lock_db().await?; + let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(ClearNftDbError::Internal)?; + let storage = nft_ctx.lock_db(None).await?; storage.clear_all_nft_data().await?; storage.clear_all_history_data().await?; return Ok(()); @@ -1555,8 +1587,8 @@ pub async fn clear_nft_db(ctx: MmArc, req: ClearNftDbReq) -> MmResult<(), ClearN )); } - let nft_ctx = NftCtx::from_ctx(&ctx, db_id.as_deref()).map_to_mm(ClearNftDbError::Internal)?; - let storage = nft_ctx.lock_db().await?; + let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(ClearNftDbError::Internal)?; + let storage = nft_ctx.lock_db(db_id.as_deref()).await?; let mut errors = Vec::new(); for chain in req.chains.iter() { if let Err(e) = clear_data_for_chain(&storage, chain).await { @@ -1622,6 +1654,25 @@ pub async fn find_unique_nft_account_ids( } } - common::log::info!("nft account_ids=({active_id_chains:>2?})"); Ok(active_id_chains) } + +pub async fn find_nft_account_id_for_chain(ctx: &MmArc, chains: Chain) -> Result, String> { + let cctx = try_s!(CoinsContext::from_ctx(ctx)); + let coins = cctx.coins.lock().await; + let coins = coins.values().collect::>(); + + for coin in coins.iter() { + if coin.is_available() { + // Use default if no db_id + let db_id = coin.inner.account_db_id().unwrap_or_else(|| ctx.rmd160_hex()); + if let Ok(chain) = Chain::from_ticker(coin.inner.ticker()) { + if chains == chain { + return Ok(Some((db_id, chain))); + } + } + } + } + + Ok(None) +} diff --git a/mm2src/coins/nft/nft_structs.rs b/mm2src/coins/nft/nft_structs.rs index 553de5d506..ed34255a79 100644 --- a/mm2src/coins/nft/nft_structs.rs +++ b/mm2src/coins/nft/nft_structs.rs @@ -85,7 +85,7 @@ pub struct NftMetadataReq { } /// Contains parameters required to refresh metadata for a specified NFT. -#[derive(Debug, Deserialize)] +#[derive(Debug, Deserialize, Clone)] pub struct RefreshMetadataReq { /// The address of the NFT token whose metadata needs to be refreshed. pub(crate) token_address: Address, @@ -664,7 +664,7 @@ pub struct NftTransferHistoryFilters { } /// Contains parameters required to update NFT transfer history and NFT list. -#[derive(Debug, Deserialize)] +#[derive(Debug, Deserialize, Clone)] pub struct UpdateNftReq { /// A list of blockchains for which the NFTs need to be updated. pub(crate) chains: Vec, @@ -719,6 +719,7 @@ impl From for TransferMeta { } } } + #[cfg(not(target_arch = "wasm32"))] pub struct NftCacheDbSql(pub AsyncConnection); @@ -733,8 +734,8 @@ pub(crate) struct NftCtx { pub(crate) nft_cache_db: SharedDb, #[cfg(not(target_arch = "wasm32"))] pub(crate) nft_cache_dbs: Arc>>, - _ctx: MmArc, - _current_db_id: String, + #[cfg(not(target_arch = "wasm32"))] + ctx: MmArc, } impl NftCtx { @@ -742,26 +743,23 @@ impl NftCtx { /// /// If an `NftCtx` instance doesn't already exist in the MM context, it gets created and cached for subsequent use. #[cfg(not(target_arch = "wasm32"))] - pub(crate) fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { + pub(crate) fn from_ctx(ctx: &MmArc) -> Result, String> { Ok(try_s!(from_ctx(&ctx.nft_ctx, move || { let async_sqlite_connection = ctx .async_sqlite_connection .ok_or("async_sqlite_connection is not initialized".to_owned())?; Ok(NftCtx { nft_cache_dbs: async_sqlite_connection.clone(), - _current_db_id: db_id.map(|d| d.to_string()).unwrap_or_else(|| ctx.rmd160_hex()), - _ctx: ctx.clone(), + ctx: ctx.clone(), }) }))) } #[cfg(target_arch = "wasm32")] - pub(crate) fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { + pub(crate) fn from_ctx(ctx: &MmArc) -> Result, String> { Ok(try_s!(from_ctx(&ctx.nft_ctx, move || { Ok(NftCtx { - nft_cache_db: ConstructibleDb::new(ctx, db_id).into_shared(), - _current_db_id: db_id.map(|d| d.to_string()).unwrap_or_else(|| ctx.rmd160_hex()), - _ctx: ctx.clone(), + nft_cache_db: ConstructibleDb::new(ctx, None).into_shared(), }) }))) } @@ -770,20 +768,22 @@ impl NftCtx { #[cfg(not(target_arch = "wasm32"))] pub(crate) async fn lock_db( &self, + db_id: Option<&str>, ) -> MmResult { + let db_id = db_id.map(|d| d.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); let mut connections = self.nft_cache_dbs.lock().await; - if let Some(async_conn) = connections.get(&self._current_db_id) { + if let Some(async_conn) = connections.get(&db_id) { let conn = NftCacheDbSql(async_conn.lock().await.clone()); Ok(conn) } else { - let sqlite_file_path = self._ctx.dbdir(Some(&self._current_db_id)).join(ASYNC_SQLITE_DB_ID); + let sqlite_file_path = self.ctx.dbdir(Some(&db_id)).join(ASYNC_SQLITE_DB_ID); log_sqlite_file_open_attempt(&sqlite_file_path); let async_conn = Arc::new(AsyncMutex::new( AsyncConnection::open(sqlite_file_path) .await .map_to_mm(|e| LockDBError::InternalError(e.to_string()))?, )); - connections.insert(self._current_db_id.to_owned(), async_conn.clone()); + connections.insert(db_id, async_conn.clone()); let conn = NftCacheDbSql(async_conn.lock().await.clone()); Ok(conn) @@ -793,9 +793,10 @@ impl NftCtx { #[cfg(target_arch = "wasm32")] pub(crate) async fn lock_db( &self, + db_id: Option<&str>, ) -> MmResult { self.nft_cache_db - .get_or_initialize() + .get_or_initialize(db_id) .await .mm_err(WasmNftCacheError::from) .mm_err(LockDBError::from) diff --git a/mm2src/coins/nft/nft_tests.rs b/mm2src/coins/nft/nft_tests.rs index 926932ec3f..d9943d61c4 100644 --- a/mm2src/coins/nft/nft_tests.rs +++ b/mm2src/coins/nft/nft_tests.rs @@ -158,7 +158,7 @@ cross_test!(test_camo, { cross_test!(test_add_get_nfts, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -175,7 +175,7 @@ cross_test!(test_add_get_nfts, { cross_test!(test_last_nft_block, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -190,7 +190,7 @@ cross_test!(test_last_nft_block, { cross_test!(test_nft_list, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -210,7 +210,7 @@ cross_test!(test_nft_list, { cross_test!(test_remove_nft, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -235,7 +235,7 @@ cross_test!(test_remove_nft, { cross_test!(test_nft_amount, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let mut nft = nft(); storage @@ -273,7 +273,7 @@ cross_test!(test_nft_amount, { cross_test!(test_refresh_metadata, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let new_symbol = "NEW_SYMBOL"; let mut nft = nft(); @@ -293,7 +293,7 @@ cross_test!(test_refresh_metadata, { cross_test!(test_update_nft_spam_by_token_address, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -314,7 +314,7 @@ cross_test!(test_update_nft_spam_by_token_address, { cross_test!(test_exclude_nft_spam, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -333,7 +333,7 @@ cross_test!(test_exclude_nft_spam, { cross_test!(test_get_animation_external_domains, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -347,7 +347,7 @@ cross_test!(test_get_animation_external_domains, { cross_test!(test_update_nft_phishing_by_domain, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -375,7 +375,7 @@ cross_test!(test_update_nft_phishing_by_domain, { cross_test!(test_exclude_nft_phishing_spam, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft_list = nft_list(); storage.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); @@ -399,7 +399,7 @@ cross_test!(test_exclude_nft_phishing_spam, { cross_test!(test_clear_nft, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft = nft(); storage.add_nfts_to_list(chain, vec![nft], 28056726).await.unwrap(); @@ -411,7 +411,7 @@ cross_test!(test_clear_nft, { cross_test!(test_clear_all_nft, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftListStorageOps::init(&storage, &chain).await.unwrap(); let nft = nft(); storage.add_nfts_to_list(chain, vec![nft], 28056726).await.unwrap(); @@ -441,7 +441,7 @@ async fn test_clear_nft_target(storage: &S, chain: &Chain) cross_test!(test_add_get_transfers, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -468,7 +468,7 @@ cross_test!(test_add_get_transfers, { cross_test!(test_last_transfer_block, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -483,7 +483,7 @@ cross_test!(test_last_transfer_block, { cross_test!(test_transfer_history, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -502,7 +502,7 @@ cross_test!(test_transfer_history, { cross_test!(test_transfer_history_filters, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -564,7 +564,7 @@ cross_test!(test_transfer_history_filters, { cross_test!(test_get_update_transfer_meta, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -599,7 +599,7 @@ cross_test!(test_get_update_transfer_meta, { cross_test!(test_update_transfer_spam_by_token_address, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -620,7 +620,7 @@ cross_test!(test_update_transfer_spam_by_token_address, { cross_test!(test_get_token_addresses, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -632,7 +632,7 @@ cross_test!(test_get_token_addresses, { cross_test!(test_exclude_transfer_spam, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -655,7 +655,7 @@ cross_test!(test_exclude_transfer_spam, { cross_test!(test_get_domains, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -669,7 +669,7 @@ cross_test!(test_get_domains, { cross_test!(test_update_transfer_phishing_by_domain, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -697,7 +697,7 @@ cross_test!(test_update_transfer_phishing_by_domain, { cross_test!(test_exclude_transfer_phishing_spam, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -740,7 +740,7 @@ cross_test!(test_exclude_transfer_phishing_spam, { cross_test!(test_clear_history, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); @@ -752,7 +752,7 @@ cross_test!(test_clear_history, { cross_test!(test_clear_all_history, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; - let storage = nft_ctx.lock_db().await.unwrap(); + let storage = nft_ctx.lock_db(None).await.unwrap(); NftTransferHistoryStorageOps::init(&storage, &chain).await.unwrap(); let transfers = nft_transfer_history(); storage.add_transfers_to_history(chain, transfers).await.unwrap(); diff --git a/mm2src/coins/nft/storage/db_test_helpers.rs b/mm2src/coins/nft/storage/db_test_helpers.rs index 75c7b248c2..d59b845661 100644 --- a/mm2src/coins/nft/storage/db_test_helpers.rs +++ b/mm2src/coins/nft/storage/db_test_helpers.rs @@ -358,5 +358,5 @@ pub(crate) async fn get_nft_ctx(_chain: &Chain) -> Arc { let ctx = mm_ctx_with_custom_async_db().await; #[cfg(target_arch = "wasm32")] let ctx = mm_ctx_with_custom_db(); - NftCtx::from_ctx(&ctx, None).unwrap() + NftCtx::from_ctx(&ctx).unwrap() } diff --git a/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v2.rs b/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v2.rs index b55b04ad86..4f3158b77d 100644 --- a/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v2.rs +++ b/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v2.rs @@ -342,7 +342,7 @@ impl IndexedDbTxHistoryStorage { transactions: Vec::new(), skipped: 0, total: total_count, - }) + }); }, } }, @@ -357,7 +357,7 @@ impl IndexedDbTxHistoryStorage { } async fn lock_db(&self) -> WasmTxHistoryResult> { - self.db.get_or_initialize().await.mm_err(WasmTxHistoryError::from) + self.db.get_or_initialize(None).await.mm_err(WasmTxHistoryError::from) } } diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index 3669a5432a..3690fb1aa1 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -146,7 +146,7 @@ mod block_headers_storage_tests { pub(crate) async fn test_add_block_headers_impl(for_coin: &str) { let ctx = mm_ctx_with_custom_db(); - let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string(), None) + let storage = BlockHeaderStorage::new_from_ctx(&ctx, for_coin.to_string(), None) .unwrap() .into_inner(); storage.init().await.unwrap(); @@ -160,7 +160,7 @@ mod block_headers_storage_tests { pub(crate) async fn test_get_block_header_impl(for_coin: &str) { let ctx = mm_ctx_with_custom_db(); - let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string(), None) + let storage = BlockHeaderStorage::new_from_ctx(&ctx, for_coin.to_string(), None) .unwrap() .into_inner(); storage.init().await.unwrap(); @@ -185,7 +185,7 @@ mod block_headers_storage_tests { pub(crate) async fn test_get_last_block_header_with_non_max_bits_impl(for_coin: &str) { let ctx = mm_ctx_with_custom_db(); - let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string(), None) + let storage = BlockHeaderStorage::new_from_ctx(&ctx, for_coin.to_string(), None) .unwrap() .into_inner(); storage.init().await.unwrap(); @@ -220,7 +220,7 @@ mod block_headers_storage_tests { pub(crate) async fn test_get_last_block_height_impl(for_coin: &str) { let ctx = mm_ctx_with_custom_db(); - let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string(), None) + let storage = BlockHeaderStorage::new_from_ctx(&ctx, for_coin.to_string(), None) .unwrap() .into_inner(); storage.init().await.unwrap(); @@ -248,7 +248,7 @@ mod block_headers_storage_tests { pub(crate) async fn test_remove_headers_from_storage_impl(for_coin: &str) { let ctx = mm_ctx_with_custom_db(); - let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string(), None) + let storage = BlockHeaderStorage::new_from_ctx(&ctx, for_coin.to_string(), None) .unwrap() .into_inner(); storage.init().await.unwrap(); @@ -302,7 +302,7 @@ mod native_tests { fn test_init_collection() { let for_coin = "init_collection"; let ctx = mm_ctx_with_custom_db(); - let storage = BlockHeaderStorage::new_from_ctx(ctx, for_coin.to_string(), None) + let storage = BlockHeaderStorage::new_from_ctx(&ctx, for_coin.to_string(), None) .unwrap() .into_inner(); diff --git a/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs b/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs index 60fc3e02f4..4e3b1c8307 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs @@ -56,7 +56,7 @@ impl IDBBlockHeadersStorage { async fn lock_db(&self) -> IDBBlockHeadersStorageRes> { self.db - .get_or_initialize() + .get_or_initialize(None) .await .mm_err(|err| BlockHeaderStorageError::init_err(&self.ticker, err.to_string())) } diff --git a/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs b/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs index 1265e5b6b1..cb62e41220 100644 --- a/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs +++ b/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs @@ -76,7 +76,7 @@ impl BlockDbImpl { async fn lock_db(&self) -> ZcoinStorageRes> { self.db - .get_or_initialize() + .get_or_initialize(None) .await .mm_err(|err| ZcoinStorageError::DbError(err.to_string())) } diff --git a/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs b/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs index 1811b1e790..3cc4a4c1aa 100644 --- a/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs +++ b/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs @@ -150,7 +150,7 @@ impl<'a> WalletIndexedDb { pub(crate) async fn lock_db(&self) -> ZcoinStorageRes> { self.db - .get_or_initialize() + .get_or_initialize(None) .await .mm_err(|err| ZcoinStorageError::DbError(err.to_string())) } diff --git a/mm2src/coins/z_coin/storage/z_params/indexeddb.rs b/mm2src/coins/z_coin/storage/z_params/indexeddb.rs index ac6eeada39..a2b379ad0f 100644 --- a/mm2src/coins/z_coin/storage/z_params/indexeddb.rs +++ b/mm2src/coins/z_coin/storage/z_params/indexeddb.rs @@ -75,7 +75,7 @@ impl ZcashParamsWasmImpl { async fn lock_db(&self) -> ZcashParamsWasmRes> { self.0 - .get_or_initialize() + .get_or_initialize(None) .await .mm_err(|err| ZcoinStorageError::DbError(err.to_string())) } diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index 481f078ae7..7ae7560d75 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -13,7 +13,8 @@ pub struct ConstructibleDb { /// It's better to use something like [`Constructible`], but it doesn't provide a method to get the inner value by the mutable reference. mutex: AsyncMutex>, db_namespace: DbNamespaceId, - db_id: Option, + db_id: AsyncMutex>, + default_db_id: String, } impl ConstructibleDb { @@ -27,7 +28,8 @@ impl ConstructibleDb { ConstructibleDb { mutex: AsyncMutex::new(None), db_namespace: ctx.db_namespace, - db_id: Some(db_id.to_string()), + db_id: AsyncMutex::new(Some(db_id.to_string())), + default_db_id: rmd, } } @@ -40,7 +42,8 @@ impl ConstructibleDb { ConstructibleDb { mutex: AsyncMutex::new(None), db_namespace: ctx.db_namespace, - db_id: Some(db_id.to_string()), + db_id: AsyncMutex::new(Some(db_id.to_string())), + default_db_id: rmd, } } @@ -50,23 +53,33 @@ impl ConstructibleDb { ConstructibleDb { mutex: AsyncMutex::new(None), db_namespace: ctx.db_namespace, - db_id: None, + db_id: AsyncMutex::new(None), + default_db_id: ctx.rmd160_hex(), } } /// Locks the given mutex and checks if the inner database is initialized already or not, /// initializes it if it's required, and returns the locked instance. - pub async fn get_or_initialize(&self) -> InitDbResult> { + pub async fn get_or_initialize(&self, db_id: Option<&str>) -> InitDbResult> { let mut locked_db = self.mutex.lock().await; - // Db is initialized already - if locked_db.is_some() { - return Ok(unwrap_db_instance(locked_db)); + let mut locked_db_id = self.db_id.lock().await; + + // Check if the database is initialized and if the db_id matches + if let Some(current_db_id) = &*locked_db_id { + if locked_db.is_some() && (db_id.map(|id| id.to_string()) == Some(current_db_id.clone())) { + // If the database is initialized and the db_id matches, return the existing instance + return Ok(unwrap_db_instance(locked_db)); + } } - let db_id = DbIdentifier::new::(self.db_namespace, self.db_id.clone()); + if locked_db.is_some() && db_id.is_none() && Some(self.default_db_id.as_str()) == locked_db_id.as_deref() { + return Ok(unwrap_db_instance(locked_db)); + } - let db = Db::init(db_id).await?; + // Initialize the new DB instance as the db_id is different or no DB was initialized before + let db = Db::init(DbIdentifier::new::(self.db_namespace, locked_db_id.clone())).await?; *locked_db = Some(db); + Ok(unwrap_db_instance(locked_db)) } } diff --git a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs index d4b55a15b2..5cc2395e71 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs @@ -69,7 +69,7 @@ impl WasmAccountStorage { async fn lock_db_mutex(&self) -> AccountStorageResult> { self.account_db - .get_or_initialize() + .get_or_initialize(None) .await .mm_err(AccountStorageError::from) } diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 379b30eeca..84706995d5 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -2819,7 +2819,8 @@ impl OrdermatchContext { #[cfg(target_arch = "wasm32")] pub async fn ordermatch_db(&self) -> InitDbResult> { - self.ordermatch_db.get_or_initialize().await + // TODO + self.ordermatch_db.get_or_initialize(None).await } } diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index a46b114c8b..a3a69ad4c0 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -566,8 +566,9 @@ impl SwapsContext { /// Removes storage for the swap with specific uuid. pub fn remove_msg_v2_store(&self, uuid: &Uuid) { self.swap_v2_msgs.lock().unwrap().remove(uuid); } + // TODO #[cfg(target_arch = "wasm32")] - pub async fn swap_db(&self) -> InitDbResult> { self.swap_db.get_or_initialize().await } + pub async fn swap_db(&self) -> InitDbResult> { self.swap_db.get_or_initialize(None).await } } #[derive(Debug, Deserialize)] diff --git a/mm2src/mm2_main/src/lp_wallet.rs b/mm2src/mm2_main/src/lp_wallet.rs index e84ea8e98c..db282bb0c5 100644 --- a/mm2src/mm2_main/src/lp_wallet.rs +++ b/mm2src/mm2_main/src/lp_wallet.rs @@ -96,7 +96,10 @@ impl WalletsContext { }))) } - pub async fn wallets_db(&self) -> InitDbResult> { self.wallets_db.get_or_initialize().await } + // TODO + pub async fn wallets_db(&self) -> InitDbResult> { + self.wallets_db.get_or_initialize(None).await + } } // Utility function for deserialization to reduce repetition From acd6383167043fb4879f7a2308880a162b73b21b Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 23 Apr 2024 08:59:01 +0100 Subject: [PATCH 065/186] wip changes db_id --- mm2src/coins/lp_coins.rs | 10 ++++--- mm2src/coins/my_tx_history_v2.rs | 4 +-- mm2src/coins/tx_history_storage/mod.rs | 9 ++++-- .../sql_tx_history_storage_v2.rs | 5 ++-- .../tx_history_storage/tx_history_v2_tests.rs | 28 +++++++++---------- .../wasm/tx_history_storage_v2.rs | 2 +- .../utxo/utxo_block_header_storage/mod.rs | 2 +- .../wasm/indexeddb_block_header_storage.rs | 2 ++ mm2src/coins/utxo/utxo_common_tests.rs | 4 +-- .../storage/blockdb/blockdb_idb_storage.rs | 3 +- .../storage/blockdb/blockdb_sql_storage.rs | 18 +++++++++--- mm2src/coins/z_coin/storage/blockdb/mod.rs | 7 +++-- .../z_coin/storage/walletdb/wasm/storage.rs | 4 ++- .../src/platform_coin_with_tokens.rs | 2 +- .../standalone_coin/init_standalone_coin.rs | 13 +++++++-- mm2src/mm2_db/src/indexed_db/db_lock.rs | 6 +++- 16 files changed, 77 insertions(+), 42 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 5e9435aed7..0253242188 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -3620,8 +3620,8 @@ impl CoinsContext { } #[cfg(target_arch = "wasm32")] - async fn tx_history_db(&self) -> TxHistoryResult> { - Ok(self.tx_history_db.get_or_initialize(None).await?) + async fn tx_history_db(&self, db_id: Option<&str>) -> TxHistoryResult> { + Ok(self.tx_history_db.get_or_initialize(db_id).await?) } } @@ -4739,10 +4739,11 @@ where let ctx = ctx.clone(); let ticker = coin.ticker().to_owned(); let my_address = try_f!(coin.my_address()); + let db_id = coin.account_db_id(); let fut = async move { let coins_ctx = CoinsContext::from_ctx(&ctx).unwrap(); - let db = coins_ctx.tx_history_db().await?; + let db = coins_ctx.tx_history_db(db_id.as_deref()).await?; let err = match load_tx_history(&db, &ticker, &my_address).await { Ok(history) => return Ok(history), Err(e) => e, @@ -4813,12 +4814,13 @@ where let ctx = ctx.clone(); let ticker = coin.ticker().to_owned(); let my_address = try_f!(coin.my_address()); + let db_id = coin.account_db_id(); history.sort_unstable_by(compare_transaction_details); let fut = async move { let coins_ctx = CoinsContext::from_ctx(&ctx).unwrap(); - let db = coins_ctx.tx_history_db().await?; + let db = coins_ctx.tx_history_db(db_id.as_deref()).await?; save_tx_history(&db, &ticker, &my_address, history).await?; Ok(()) }; diff --git a/mm2src/coins/my_tx_history_v2.rs b/mm2src/coins/my_tx_history_v2.rs index 00f25b2701..2ca023e732 100644 --- a/mm2src/coins/my_tx_history_v2.rs +++ b/mm2src/coins/my_tx_history_v2.rs @@ -401,7 +401,7 @@ pub(crate) async fn my_tx_history_v2_impl( where Coin: CoinWithTxHistoryV2 + MmCoin, { - let tx_history_storage = TxHistoryStorageBuilder::new(&ctx).build()?; + let tx_history_storage = TxHistoryStorageBuilder::new(&ctx, coin.account_db_id()).build()?; let wallet_id = coin.history_wallet_id(); let is_storage_init = tx_history_storage.is_initialized_for(&wallet_id).await?; @@ -528,7 +528,7 @@ pub(crate) mod for_tests { pub fn init_storage_for(coin: &Coin) -> (MmArc, impl TxHistoryStorage) { let ctx = mm_ctx_with_custom_db(); - let storage = TxHistoryStorageBuilder::new(&ctx).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); block_on(storage.init(&coin.history_wallet_id())).unwrap(); (ctx, storage) } diff --git a/mm2src/coins/tx_history_storage/mod.rs b/mm2src/coins/tx_history_storage/mod.rs index 1f0ca4f8d8..d32bd183d8 100644 --- a/mm2src/coins/tx_history_storage/mod.rs +++ b/mm2src/coins/tx_history_storage/mod.rs @@ -45,18 +45,21 @@ pub enum CreateTxHistoryStorageError { /// `TxHistoryStorageBuilder` is used to create an instance that implements the `TxHistoryStorage` trait. pub struct TxHistoryStorageBuilder<'a> { ctx: &'a MmArc, + db_id: Option, } impl<'a> TxHistoryStorageBuilder<'a> { #[inline] - pub fn new(ctx: &MmArc) -> TxHistoryStorageBuilder<'_> { TxHistoryStorageBuilder { ctx } } + pub fn new(ctx: &MmArc, db_id: Option) -> TxHistoryStorageBuilder<'_> { + TxHistoryStorageBuilder { ctx, db_id } + } #[inline] pub fn build(self) -> MmResult { #[cfg(target_arch = "wasm32")] - return wasm::IndexedDbTxHistoryStorage::new(self.ctx); + return wasm::IndexedDbTxHistoryStorage::new(self.ctx, self.db_id.as_deref()); #[cfg(not(target_arch = "wasm32"))] - sql_tx_history_storage_v2::SqliteTxHistoryStorage::new(self.ctx) + sql_tx_history_storage_v2::SqliteTxHistoryStorage::new(self.ctx, self.db_id.as_deref()) } } diff --git a/mm2src/coins/tx_history_storage/sql_tx_history_storage_v2.rs b/mm2src/coins/tx_history_storage/sql_tx_history_storage_v2.rs index d7870edab6..fd2f405d56 100644 --- a/mm2src/coins/tx_history_storage/sql_tx_history_storage_v2.rs +++ b/mm2src/coins/tx_history_storage/sql_tx_history_storage_v2.rs @@ -375,9 +375,8 @@ impl WalletId { pub struct SqliteTxHistoryStorage(Arc>); impl SqliteTxHistoryStorage { - pub fn new(ctx: &MmArc) -> Result> { - // TODO db_id - let sqlite_connection = ctx.sqlite_connection_res(None).map_to_mm(|_| { + pub fn new(ctx: &MmArc, db_id: Option<&str>) -> Result> { + let sqlite_connection = ctx.sqlite_connection_res(db_id).map_to_mm(|_| { CreateTxHistoryStorageError::Internal("'MmCtx::sqlite_connection' is not found or initialized".to_owned()) })?; diff --git a/mm2src/coins/tx_history_storage/tx_history_v2_tests.rs b/mm2src/coins/tx_history_storage/tx_history_v2_tests.rs index ab4a2a7e85..afeae1b01d 100644 --- a/mm2src/coins/tx_history_storage/tx_history_v2_tests.rs +++ b/mm2src/coins/tx_history_storage/tx_history_v2_tests.rs @@ -43,7 +43,7 @@ async fn test_add_transactions_impl() { let wallet_id = wallet_id_for_test("TEST_ADD_TRANSACTIONS"); let ctx = mm_ctx_with_custom_db(); - let storage = TxHistoryStorageBuilder::new(&ctx).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); storage.init(&wallet_id).await.unwrap(); @@ -84,7 +84,7 @@ async fn test_remove_transaction_impl() { let wallet_id = wallet_id_for_test("TEST_REMOVE_TRANSACTION"); let ctx = mm_ctx_with_custom_db(); - let storage = TxHistoryStorageBuilder::new(&ctx).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); storage.init(&wallet_id).await.unwrap(); let tx_details = get_bch_tx_details("6686ee013620d31ba645b27d581fed85437ce00f46b595a576718afac4dd5b69"); @@ -116,7 +116,7 @@ async fn test_get_transaction_impl() { let wallet_id = wallet_id_for_test("TEST_GET_TRANSACTION"); let ctx = mm_ctx_with_custom_db(); - let storage = TxHistoryStorageBuilder::new(&ctx).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); storage.init(&wallet_id).await.unwrap(); @@ -158,7 +158,7 @@ async fn test_update_transaction_impl() { let wallet_id = wallet_id_for_test("TEST_UPDATE_TRANSACTION"); let ctx = mm_ctx_with_custom_db(); - let storage = TxHistoryStorageBuilder::new(&ctx).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); storage.init(&wallet_id).await.unwrap(); @@ -185,7 +185,7 @@ async fn test_contains_and_get_unconfirmed_transaction_impl() { let wallet_id = wallet_id_for_test("TEST_CONTAINS_AND_GET_UNCONFIRMED_TRANSACTION"); let ctx = mm_ctx_with_custom_db(); - let storage = TxHistoryStorageBuilder::new(&ctx).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); storage.init(&wallet_id).await.unwrap(); @@ -274,7 +274,7 @@ async fn test_has_transactions_with_hash_impl() { let wallet_id = wallet_id_for_test("TEST_HAS_TRANSACTIONS_WITH_HASH"); let ctx = mm_ctx_with_custom_db(); - let storage = TxHistoryStorageBuilder::new(&ctx).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); storage.init(&wallet_id).await.unwrap(); @@ -307,7 +307,7 @@ async fn test_unique_tx_hashes_num_impl() { let wallet_id = wallet_id_for_test("TEST_UNIQUE_TX_HASHES_NUM"); let ctx = mm_ctx_with_custom_db(); - let storage = TxHistoryStorageBuilder::new(&ctx).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); storage.init(&wallet_id).await.unwrap(); @@ -355,7 +355,7 @@ async fn test_add_and_get_tx_from_cache_impl() { .with_hd_wallet_rmd160("108f07b8382412612c048d07d13f814118445acd".into()); let ctx = mm_ctx_with_custom_db(); - let storage = TxHistoryStorageBuilder::new(&ctx).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); storage.init(&wallet_id_1).await.unwrap(); storage.init(&wallet_id_2).await.unwrap(); @@ -387,7 +387,7 @@ async fn test_get_raw_tx_bytes_on_add_transactions_impl() { let wallet_id = wallet_id_for_test("TEST_GET_RAW_TX_BYTES_ON_ADD_TRANSACTIONS"); let ctx = mm_ctx_with_custom_db(); - let storage = TxHistoryStorageBuilder::new(&ctx).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); storage.init(&wallet_id).await.unwrap(); @@ -418,7 +418,7 @@ async fn test_get_history_page_number_impl() { let wallet_id = wallet_id_for_test("TEST_GET_HISTORY_PAGE_NUMBER"); let ctx = mm_ctx_with_custom_db(); - let storage = TxHistoryStorageBuilder::new(&ctx).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); storage.init(&wallet_id).await.unwrap(); @@ -460,7 +460,7 @@ async fn test_get_history_from_id_impl() { let wallet_id = wallet_id_for_test("TEST_GET_HISTORY_FROM_ID"); let ctx = mm_ctx_with_custom_db(); - let storage = TxHistoryStorageBuilder::new(&ctx).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); storage.init(&wallet_id).await.unwrap(); @@ -502,7 +502,7 @@ async fn test_get_history_for_addresses_impl() { let wallet_id = wallet_id_for_test("TEST_GET_HISTORY_FROM_ID"); let ctx = mm_ctx_with_custom_db(); - let storage = TxHistoryStorageBuilder::new(&ctx).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); storage.init(&wallet_id).await.unwrap(); @@ -591,7 +591,7 @@ mod native_tests { let wallet_id = wallet_id_for_test("TEST_INIT_COLLECTION"); let ctx = mm_ctx_with_custom_db(); - let storage = SqliteTxHistoryStorage::new(&ctx).unwrap(); + let storage = SqliteTxHistoryStorage::new(&ctx, None).unwrap(); let initialized = block_on(storage.is_initialized_for(&wallet_id)).unwrap(); assert!(!initialized); @@ -660,7 +660,7 @@ mod wasm_tests { let wallet_id = wallet_id_for_test("TEST_INIT_COLLECTION"); let ctx = mm_ctx_with_custom_db(); - let storage = IndexedDbTxHistoryStorage::new(&ctx).unwrap(); + let storage = IndexedDbTxHistoryStorage::new(&ctx, None).unwrap(); // Please note this is the `IndexedDbTxHistoryStorage` specific: // [`IndexedDbTxHistoryStorage::is_initialized_for`] always returns `true`. diff --git a/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v2.rs b/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v2.rs index 4f3158b77d..dbdc6fe89c 100644 --- a/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v2.rs +++ b/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v2.rs @@ -27,7 +27,7 @@ pub struct IndexedDbTxHistoryStorage { } impl IndexedDbTxHistoryStorage { - pub fn new(ctx: &MmArc) -> MmResult + pub fn new(ctx: &MmArc, _db_id: Option<&str>) -> MmResult where Self: Sized, { diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index 3690fb1aa1..0ba262948b 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -52,7 +52,7 @@ impl BlockHeaderStorage { db_id: Option<&str>, ) -> Result { Ok(BlockHeaderStorage { - inner: Box::new(IDBBlockHeadersStorage::new(&ctx, ticker, db_id)), + inner: Box::new(IDBBlockHeadersStorage::new(ctx, ticker, db_id)), }) } diff --git a/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs b/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs index 4e3b1c8307..18c8641223 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs @@ -44,6 +44,7 @@ impl IDBBlockHeadersInner { pub struct IDBBlockHeadersStorage { pub db: SharedDb, pub ticker: String, + pub db_id: Option, } impl IDBBlockHeadersStorage { @@ -51,6 +52,7 @@ impl IDBBlockHeadersStorage { Self { db: ConstructibleDb::new(ctx, db_id).into_shared(), ticker, + db_id: db_id.map(|e| e.to_string()), } } diff --git a/mm2src/coins/utxo/utxo_common_tests.rs b/mm2src/coins/utxo/utxo_common_tests.rs index 0e5254cfea..6404eae723 100644 --- a/mm2src/coins/utxo/utxo_common_tests.rs +++ b/mm2src/coins/utxo/utxo_common_tests.rs @@ -286,7 +286,7 @@ pub(super) async fn test_hd_utxo_tx_history_impl(rpc_client: ElectrumClient) { let coin = utxo_coin_from_fields(fields); let current_balances = coin.my_addresses_balances().await.unwrap(); - let storage = TxHistoryStorageBuilder::new(&ctx).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); spawn(utxo_history_loop( coin.clone(), storage, @@ -311,7 +311,7 @@ pub(super) async fn test_hd_utxo_tx_history_impl(rpc_client: ElectrumClient) { _ => unimplemented!(), } - let storage = TxHistoryStorageBuilder::new(&ctx).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); spawn(utxo_history_loop( coin.clone(), storage, diff --git a/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs b/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs index cb62e41220..56a57fc9fd 100644 --- a/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs +++ b/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs @@ -71,12 +71,13 @@ impl BlockDbImpl { Ok(Self { db: ConstructibleDb::new(ctx, db_id).into_shared(), ticker, + db_id: db_id.map(|e| e.to_string()), }) } async fn lock_db(&self) -> ZcoinStorageRes> { self.db - .get_or_initialize(None) + .get_or_initialize(self.db_id.as_deref()) .await .mm_err(|err| ZcoinStorageError::DbError(err.to_string())) } diff --git a/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs b/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs index 4338527f38..07bccdeae5 100644 --- a/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs +++ b/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs @@ -45,7 +45,8 @@ impl From> for ZcoinStorageError { impl BlockDbImpl { #[cfg(all(not(test)))] - pub async fn new(_ctx: &MmArc, ticker: String, path: PathBuf, _db_id: Option<&str>) -> ZcoinStorageRes { + pub async fn new(_ctx: &MmArc, ticker: String, path: PathBuf, db_id: Option<&str>) -> ZcoinStorageRes { + let db_id = db_id.map(|e| e.to_string()); async_blocking(move || { let conn = Connection::open(path).map_to_mm(|err| ZcoinStorageError::DbError(err.to_string()))?; let conn = Arc::new(Mutex::new(conn)); @@ -62,7 +63,11 @@ impl BlockDbImpl { ) .map_to_mm(|err| ZcoinStorageError::DbError(err.to_string()))?; - Ok(Self { db: conn, ticker }) + Ok(Self { + db: conn, + ticker, + db_id, + }) }) .await } @@ -72,8 +77,9 @@ impl BlockDbImpl { _ctx: &MmArc, ticker: String, _path: PathBuf, - _db_id: Option<&str>, + db_id: Option<&str>, ) -> ZcoinStorageRes { + let db_id = db_id.map(|e| e.to_string()); let conn = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); let conn_clone = conn.clone(); async_blocking(move || { @@ -89,7 +95,11 @@ impl BlockDbImpl { ) .map_to_mm(|err| ZcoinStorageError::DbError(err.to_string()))?; - Ok(BlockDbImpl { db: conn, ticker }) + Ok(BlockDbImpl { + db: conn, + ticker, + db_id, + }) }) .await } diff --git a/mm2src/coins/z_coin/storage/blockdb/mod.rs b/mm2src/coins/z_coin/storage/blockdb/mod.rs index e79043d207..1ad6eeca56 100644 --- a/mm2src/coins/z_coin/storage/blockdb/mod.rs +++ b/mm2src/coins/z_coin/storage/blockdb/mod.rs @@ -7,11 +7,13 @@ use db_common::sqlite::rusqlite::Connection; #[cfg(target_arch = "wasm32")] pub(crate) mod blockdb_idb_storage; + #[cfg(target_arch = "wasm32")] use blockdb_idb_storage::BlockDbInner; #[cfg(target_arch = "wasm32")] use mm2_db::indexed_db::SharedDb; /// A wrapper for the db connection to the block cache database in native and browser. +#[allow(unused)] #[derive(Clone)] pub struct BlockDbImpl { #[cfg(not(target_arch = "wasm32"))] @@ -20,6 +22,7 @@ pub struct BlockDbImpl { pub db: SharedDb, #[allow(unused)] ticker: String, + db_id: Option, } #[cfg(any(test, target_arch = "wasm32"))] @@ -32,8 +35,8 @@ mod block_db_storage_tests { const TICKER: &str = "ARRR"; const HEADERS: &[(u32, &str)] = &[(1900000, - "10E0FB731A2044797F3BB78323A7717007F1E289A3689E0B5B3433385DBD8E6F6A17000000002220735484676853C744A8CA0FEA105081C54A8C50A151E42E31EC7E20040000000028EBACFD9306"), (1900001, - "10E1FB731A20A261B624D0E42238255A69F96E45EEA341B5E4125A7DD710118D150B00000000222044797F3BB78323A7717007F1E289A3689E0B5B3433385DBD8E6F6A170000000028FEACFD9306"), (1900002,"10E2FB731A208747587DE8DDED766591FA6C9859D77BFC9C293B054F3D38A9BC5E08000000002220A261B624D0E42238255A69F96E45EEA341B5E4125A7DD710118D150B0000000028F7ADFD93063AC002080212201D7165BCACD3245EED7324367EB34199EA2ED502726933484FEFA6A220AA330F22220A208DD3C9362FBCF766BEF2DFA3A3B186BBB43CA456DB9690EFD06978FC822056D22A7A0A20245E73ED6EB4B73805D3929F841CCD7E01523E2B8A0F29D721CD82547A470C711220D6BAF6AF4783FF265451B8A7A5E4271EA72F034890DA234427082F84F08256DD1A34EAABEE115A1FCDED194189F586C6DC2099E8C5F47BD68B210146EDFFCB39649EB55504910EC590E6E9908B6114ED3DDFD5861FDC2A7A0A2079E70D202FEE537011284A30F1531BCF627613CBBAAFABBB24CE56600FE94B6C122041E9FBA0E6197A58532F61BD7617CACEC8C2F10C77AA8B99B2E535EE1D3C36171A341B6A04C5EC9A2AE8CDF0433C9AAD36C647139C9542759E2758FD4A10ED0C78F8087BE5AEE92EA8834E6CE116C8A5737B7607BD523AC002080312202790606A461DA171221480A3FC414CCF9C273FE6F0C2E3CFA6C85D6CDE8EFE5C22220A201767E6E3B390FAB4C79E46131C54ED91A987EEA2286DB80F240D431AC07A750C2A7A0A20E86C11A660EB72F1449BA0CEB57FFB313A4047880C33ADED93945ED9C477581B12201752816751ABAB19398A4A5CFE429724D820588BCFEDC7D88B399D9B24FB4C111A34DB38AE57231FBE768063E08D8EC70E3486FF89A74E0840B6F5D8412F1C7E2C5D884AA08E2F7EDA42836B80B4433C83CDDC8B51DE2A7A0A20E2FEF897A286A8D5AD9E0485F287CE1A73970EADA899DBE3FC77043846E06B1E1220F0A046829B17CC8B5B750281CD20A1E28F983E599AA2A1C8F3BD97BE49C55CEB1A3488DCDA1444CBACE213100507FC83627D83624EF2AD47C25160F5E604595158C98EBC3549C0A07359FB42D8437A70AB472FB64AA13AC002080412201EDD399E68128B97F6F98E31C1965361528AC07665114D09F9D119C089791E9222220A20B9471453950609CF8C2EDF721FE7D0D2D211BBD158283E8D6B80EAAB312968EF2A7A0A201FF6F7D74ABBAC9D4E5A95F63861C19FE3D18083ABE2EACE7B8A70E7E5FCB51812206753F2992061EF3FC0C37FC0D1352A386514B2CC1AEB39AC835A8D9BFBD022D91A34BA41719ECF19520BD7D6EFB08AAF5018282026781D0FE5697811B34E0DEFE4D4691585D4994056E109DC19FFE63CAB29CA4F26682A7A0A200E570E832326625C9D8536DBAC389529A090FC54C3F378E25431405751BBFF391220D27A030843C93522B2D232644E7AC7CF235494B126FDAEA9F5980FA1AECE746E1A34EF8BD98D7DD39659714E7851E47F57A52741F564F0275CE8A82F2665C70EA5887B0CE8501CF509A8265ECB155A00A0629B463C253AC00208051220E1F375AD9EC6A774E444ECC5EB6F07237B1DE9EAA1A9FD7AEF392D6F40BA705822220A20D8298A06C9657E042DC69473B23A74C94E51AF684DA6281CE7F797791F486AD42A7A0A209216A5DBC616291688CDFB075A5E639FA8000ADD006438C4BCE98D000AE0DF3512202C20533A17279C46EC995DBF819673039E5810DCD2DA024DAEF64053CD7B562D1A346928F93BB25B03519AC83B297F77E2F54F62B1E722E6F8D886ADF709455C2C0B930CE429EA24ECD15354085F7FA3F2A4077DE76D2A7A0A203AE3F07AB8AB4C76B246A0D7CA9321F84081144E9B7E3AE0CEC0139B392E443812200791064E9E188BF1D1373BEEFAE7458F12F976B15896CD69970019B4560A5F721A3428ADC7816F15528F65372E585E07D1CD6C0DFB3F3BA7BD263BB4E5A3ADAAFD84CD55FFBDD23787163F52711A22935EB52A30EB37") + "10E0FB731A2044797F3BB78323A7717007F1E289A3689E0B5B3433385DBD8E6F6A17000000002220735484676853C744A8CA0FEA105081C54A8C50A151E42E31EC7E20040000000028EBACFD9306"), (1900001, + "10E1FB731A20A261B624D0E42238255A69F96E45EEA341B5E4125A7DD710118D150B00000000222044797F3BB78323A7717007F1E289A3689E0B5B3433385DBD8E6F6A170000000028FEACFD9306"), (1900002, "10E2FB731A208747587DE8DDED766591FA6C9859D77BFC9C293B054F3D38A9BC5E08000000002220A261B624D0E42238255A69F96E45EEA341B5E4125A7DD710118D150B0000000028F7ADFD93063AC002080212201D7165BCACD3245EED7324367EB34199EA2ED502726933484FEFA6A220AA330F22220A208DD3C9362FBCF766BEF2DFA3A3B186BBB43CA456DB9690EFD06978FC822056D22A7A0A20245E73ED6EB4B73805D3929F841CCD7E01523E2B8A0F29D721CD82547A470C711220D6BAF6AF4783FF265451B8A7A5E4271EA72F034890DA234427082F84F08256DD1A34EAABEE115A1FCDED194189F586C6DC2099E8C5F47BD68B210146EDFFCB39649EB55504910EC590E6E9908B6114ED3DDFD5861FDC2A7A0A2079E70D202FEE537011284A30F1531BCF627613CBBAAFABBB24CE56600FE94B6C122041E9FBA0E6197A58532F61BD7617CACEC8C2F10C77AA8B99B2E535EE1D3C36171A341B6A04C5EC9A2AE8CDF0433C9AAD36C647139C9542759E2758FD4A10ED0C78F8087BE5AEE92EA8834E6CE116C8A5737B7607BD523AC002080312202790606A461DA171221480A3FC414CCF9C273FE6F0C2E3CFA6C85D6CDE8EFE5C22220A201767E6E3B390FAB4C79E46131C54ED91A987EEA2286DB80F240D431AC07A750C2A7A0A20E86C11A660EB72F1449BA0CEB57FFB313A4047880C33ADED93945ED9C477581B12201752816751ABAB19398A4A5CFE429724D820588BCFEDC7D88B399D9B24FB4C111A34DB38AE57231FBE768063E08D8EC70E3486FF89A74E0840B6F5D8412F1C7E2C5D884AA08E2F7EDA42836B80B4433C83CDDC8B51DE2A7A0A20E2FEF897A286A8D5AD9E0485F287CE1A73970EADA899DBE3FC77043846E06B1E1220F0A046829B17CC8B5B750281CD20A1E28F983E599AA2A1C8F3BD97BE49C55CEB1A3488DCDA1444CBACE213100507FC83627D83624EF2AD47C25160F5E604595158C98EBC3549C0A07359FB42D8437A70AB472FB64AA13AC002080412201EDD399E68128B97F6F98E31C1965361528AC07665114D09F9D119C089791E9222220A20B9471453950609CF8C2EDF721FE7D0D2D211BBD158283E8D6B80EAAB312968EF2A7A0A201FF6F7D74ABBAC9D4E5A95F63861C19FE3D18083ABE2EACE7B8A70E7E5FCB51812206753F2992061EF3FC0C37FC0D1352A386514B2CC1AEB39AC835A8D9BFBD022D91A34BA41719ECF19520BD7D6EFB08AAF5018282026781D0FE5697811B34E0DEFE4D4691585D4994056E109DC19FFE63CAB29CA4F26682A7A0A200E570E832326625C9D8536DBAC389529A090FC54C3F378E25431405751BBFF391220D27A030843C93522B2D232644E7AC7CF235494B126FDAEA9F5980FA1AECE746E1A34EF8BD98D7DD39659714E7851E47F57A52741F564F0275CE8A82F2665C70EA5887B0CE8501CF509A8265ECB155A00A0629B463C253AC00208051220E1F375AD9EC6A774E444ECC5EB6F07237B1DE9EAA1A9FD7AEF392D6F40BA705822220A20D8298A06C9657E042DC69473B23A74C94E51AF684DA6281CE7F797791F486AD42A7A0A209216A5DBC616291688CDFB075A5E639FA8000ADD006438C4BCE98D000AE0DF3512202C20533A17279C46EC995DBF819673039E5810DCD2DA024DAEF64053CD7B562D1A346928F93BB25B03519AC83B297F77E2F54F62B1E722E6F8D886ADF709455C2C0B930CE429EA24ECD15354085F7FA3F2A4077DE76D2A7A0A203AE3F07AB8AB4C76B246A0D7CA9321F84081144E9B7E3AE0CEC0139B392E443812200791064E9E188BF1D1373BEEFAE7458F12F976B15896CD69970019B4560A5F721A3428ADC7816F15528F65372E585E07D1CD6C0DFB3F3BA7BD263BB4E5A3ADAAFD84CD55FFBDD23787163F52711A22935EB52A30EB37") ]; pub(crate) async fn test_insert_block_and_get_latest_block_impl() { diff --git a/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs b/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs index 3cc4a4c1aa..d5a697444e 100644 --- a/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs +++ b/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs @@ -130,6 +130,7 @@ pub struct WalletIndexedDb { pub db: SharedDb, pub ticker: String, pub params: ZcoinConsensusParams, + pub db_id: Option, } impl<'a> WalletIndexedDb { @@ -143,6 +144,7 @@ impl<'a> WalletIndexedDb { db: ConstructibleDb::new(ctx, db_id).into_shared(), ticker: ticker.to_string(), params: consensus_params, + db_id: db_id.map(|e| e.to_string()), }; Ok(db) @@ -150,7 +152,7 @@ impl<'a> WalletIndexedDb { pub(crate) async fn lock_db(&self) -> ZcoinStorageRes> { self.db - .get_or_initialize(None) + .get_or_initialize(self.db_id.as_deref()) .await .mm_err(|err| ZcoinStorageError::DbError(err.to_string())) } diff --git a/mm2src/coins_activation/src/platform_coin_with_tokens.rs b/mm2src/coins_activation/src/platform_coin_with_tokens.rs index c20dccf302..68c3b7318f 100644 --- a/mm2src/coins_activation/src/platform_coin_with_tokens.rs +++ b/mm2src/coins_activation/src/platform_coin_with_tokens.rs @@ -383,7 +383,7 @@ where if req.request.tx_history() { platform_coin.start_history_background_fetching( ctx.clone(), - TxHistoryStorageBuilder::new(&ctx).build()?, + TxHistoryStorageBuilder::new(&ctx, platform_coin.account_db_id()).build()?, activation_result.get_platform_balance(), ); } diff --git a/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs b/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs index 314f3066b4..4d75b3fa00 100644 --- a/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs +++ b/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs @@ -205,16 +205,25 @@ where .await?; let result = coin - .get_activation_result(self.ctx.clone(), task_handle, &self.request.activation_params) + .get_activation_result(self.ctx.clone(), task_handle.clone(), &self.request.activation_params) .await?; log::info!("{} current block {}", ticker, result.current_block()); let tx_history = self.request.activation_params.tx_history(); if tx_history { let current_balances = result.get_addresses_balances(); + let coin_clone = Standalone::init_standalone_coin( + self.ctx.clone(), + ticker.clone(), + self.coin_conf.clone(), + &self.request.activation_params, + self.protocol_info.clone(), + task_handle.clone(), + ) + .await?; coin.start_history_background_fetching( self.ctx.metrics.clone(), - TxHistoryStorageBuilder::new(&self.ctx).build()?, + TxHistoryStorageBuilder::new(&self.ctx, coin_clone.into().account_db_id()).build()?, current_balances, ); } diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index 7ae7560d75..49c243effb 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -62,7 +62,7 @@ impl ConstructibleDb { /// initializes it if it's required, and returns the locked instance. pub async fn get_or_initialize(&self, db_id: Option<&str>) -> InitDbResult> { let mut locked_db = self.mutex.lock().await; - let mut locked_db_id = self.db_id.lock().await; + let locked_db_id = self.db_id.lock().await; // Check if the database is initialized and if the db_id matches if let Some(current_db_id) = &*locked_db_id { @@ -72,6 +72,10 @@ impl ConstructibleDb { } } + // Check if there is already an initialized database instance (`locked_db`) + // and if no specific db_id is provided. It then verifies whether + // the current db_id matches the default default_db_id. + // If these conditions are met, the function returns the existing database instance. if locked_db.is_some() && db_id.is_none() && Some(self.default_db_id.as_str()) == locked_db_id.as_deref() { return Ok(unwrap_db_instance(locked_db)); } From 4d04128438c958dfd59dde2723f825beb4b81336 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 23 Apr 2024 09:05:08 +0100 Subject: [PATCH 066/186] organize lp_coin mods/imports --- mm2src/coins/lp_coins.rs | 88 +++++++++++++++------------------------- 1 file changed, 32 insertions(+), 56 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 0253242188..390b73e27f 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -204,62 +204,22 @@ macro_rules! ok_or_continue_after_sleep { } pub mod coin_balance; -pub mod lp_price; -pub mod watcher_common; - pub mod coin_errors; - -use coin_errors::{MyAddressError, ValidatePaymentError, ValidatePaymentFut}; - #[doc(hidden)] #[cfg(test)] pub mod coins_tests; - pub mod eth; - -use eth::GetValidEthWithdrawAddError; -use eth::{eth_coin_from_conf_and_request, get_eth_address, EthCoin, EthGasDetailsErr, EthTxFeeDetails, - GetEthAddressError, SignedEthTx}; -use ethereum_types::U256; - pub mod hd_confirm_address; pub mod hd_pubkey; - pub mod hd_wallet; - -use hd_wallet::{HDAccountAddressId, HDAddress}; - pub mod hd_wallet_storage; #[cfg(not(target_arch = "wasm32"))] pub mod lightning; +pub mod lp_price; #[cfg_attr(target_arch = "wasm32", allow(dead_code, unused_imports))] pub mod my_tx_history_v2; - +pub mod nft; pub mod qrc20; - -use qrc20::{qrc20_coin_with_policy, Qrc20ActivationParams, Qrc20Coin, Qrc20FeeDetails}; - pub mod rpc_command; - -use rpc_command::{get_new_address::{GetNewAddressTaskManager, GetNewAddressTaskManagerShared}, - init_account_balance::{AccountBalanceTaskManager, AccountBalanceTaskManagerShared}, - init_create_account::{CreateAccountTaskManager, CreateAccountTaskManagerShared}, - init_scan_for_new_addresses::{ScanAddressesTaskManager, ScanAddressesTaskManagerShared}, - init_withdraw::{WithdrawTaskManager, WithdrawTaskManagerShared}}; - -pub mod tendermint; - -use tendermint::htlc::CustomTendermintMsgType; -use tendermint::{CosmosTransaction, TendermintCoin, TendermintFeeDetails, TendermintProtocolInfo, TendermintToken, - TendermintTokenProtocolInfo}; - -#[doc(hidden)] -#[allow(unused_variables)] -pub mod test_coin; - -pub use test_coin::TestCoin; - -pub mod tx_history_storage; - #[doc(hidden)] #[allow(unused_variables)] #[cfg(all( @@ -269,7 +229,33 @@ pub mod tx_history_storage; not(target_arch = "wasm32") ))] pub mod solana; +pub mod tendermint; +#[doc(hidden)] +#[allow(unused_variables)] +pub mod test_coin; +pub mod tx_history_storage; +pub mod utxo; +pub mod watcher_common; +pub mod z_coin; + +use crate::coin_errors::ValidatePaymentResult; +use crate::utxo::swap_proto_v2_scripts; +use crate::utxo::utxo_common::{payment_script, WaitForOutputSpendErr}; +use coin_errors::{MyAddressError, ValidatePaymentError, ValidatePaymentFut}; +use eth::GetValidEthWithdrawAddError; +use eth::{eth_coin_from_conf_and_request, get_eth_address, EthCoin, EthGasDetailsErr, EthTxFeeDetails, + GetEthAddressError, SignedEthTx}; +use ethereum_types::U256; +use hd_wallet::{HDAccountAddressId, HDAddress}; +use nft::nft_errors::GetNftInfoError; +use qrc20::{qrc20_coin_with_policy, Qrc20ActivationParams, Qrc20Coin, Qrc20FeeDetails}; +use rpc_command::{get_new_address::{GetNewAddressTaskManager, GetNewAddressTaskManagerShared}, + init_account_balance::{AccountBalanceTaskManager, AccountBalanceTaskManagerShared}, + init_create_account::{CreateAccountTaskManager, CreateAccountTaskManagerShared}, + init_scan_for_new_addresses::{ScanAddressesTaskManager, ScanAddressesTaskManagerShared}, + init_withdraw::{WithdrawTaskManager, WithdrawTaskManagerShared}}; +use script::Script; #[cfg(all( feature = "enable-solana", not(target_os = "ios"), @@ -284,9 +270,10 @@ pub use solana::spl::SplToken; not(target_arch = "wasm32") ))] pub use solana::{SolanaActivationParams, SolanaCoin, SolanaFeeDetails}; - -pub mod utxo; - +use tendermint::htlc::CustomTendermintMsgType; +use tendermint::{CosmosTransaction, TendermintCoin, TendermintFeeDetails, TendermintProtocolInfo, TendermintToken, + TendermintTokenProtocolInfo}; +pub use test_coin::TestCoin; use utxo::bch::{bch_coin_with_policy, BchActivationRequest, BchCoin}; use utxo::qtum::{self, qtum_coin_with_policy, Qrc20AddressError, QtumCoin, QtumDelegationOps, QtumDelegationRequest, QtumStakingInfosDetails, ScriptHashTypeNotSupported}; @@ -297,17 +284,6 @@ use utxo::utxo_common::big_decimal_from_sat_unsigned; use utxo::utxo_standard::{utxo_standard_coin_with_policy, UtxoStandardCoin}; use utxo::UtxoActivationParams; use utxo::{BlockchainNetwork, GenerateTxError, UtxoFeeDetails, UtxoTx}; - -pub mod nft; - -use nft::nft_errors::GetNftInfoError; -use script::Script; - -pub mod z_coin; - -use crate::coin_errors::ValidatePaymentResult; -use crate::utxo::swap_proto_v2_scripts; -use crate::utxo::utxo_common::{payment_script, WaitForOutputSpendErr}; use z_coin::{ZCoin, ZcoinProtocolInfo}; pub type TransactionFut = Box + Send>; From e508b59080cd5566b91d424abbb75836022088db Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 23 Apr 2024 12:11:12 +0100 Subject: [PATCH 067/186] wip lp_ordermatch db update --- mm2src/mm2_main/src/lp_native_dex.rs | 12 ++---- mm2src/mm2_main/src/lp_ordermatch.rs | 5 +-- .../src/lp_ordermatch/my_orders_storage.rs | 40 ++++++++++--------- 3 files changed, 27 insertions(+), 30 deletions(-) diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index ccf7408bfd..5afb9671ec 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -504,14 +504,10 @@ pub async fn lp_init(ctx: MmArc, version: String, datetime: String) -> MmInitRes info!("Version: {} DT {}", version, datetime); #[cfg(not(target_arch = "wasm32"))] - { - // Todo: Handle properly - let dbdir = ctx.dbdir(None); - fs::create_dir_all(&dbdir).map_to_mm(|e| MmInitError::ErrorCreatingDbDir { - path: dbdir.clone(), - error: e.to_string(), - })?; - } + fs::create_dir_all(ctx.dbdir(None)).map_to_mm(|e| MmInitError::ErrorCreatingDbDir { + path: ctx.dbdir(None), + error: e.to_string(), + })?; // This either initializes the cryptographic context or sets up the context for "no login mode". initialize_wallet_passphrase(&ctx).await?; diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 84706995d5..e44928f21c 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -2818,9 +2818,8 @@ impl OrdermatchContext { } #[cfg(target_arch = "wasm32")] - pub async fn ordermatch_db(&self) -> InitDbResult> { - // TODO - self.ordermatch_db.get_or_initialize(None).await + pub async fn ordermatch_db(&self, db_id: Option<&str>) -> InitDbResult> { + self.ordermatch_db.get_or_initialize(db_id).await } } diff --git a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs index d065a517b5..2501768722 100644 --- a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs +++ b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs @@ -408,12 +408,14 @@ mod wasm_impl { #[derive(Clone)] pub struct MyOrdersStorage { ctx: Arc, + db_id: Option, } impl MyOrdersStorage { - pub fn new(ctx: MmArc, _db_id: Option<&str>) -> MyOrdersStorage { + pub fn new(ctx: MmArc, db_id: Option<&str>) -> MyOrdersStorage { MyOrdersStorage { ctx: OrdermatchContext::from_ctx(&ctx).expect("!OrdermatchContext::from_ctx"), + db_id: db_id.map(|e| e.to_string()), } } } @@ -421,7 +423,7 @@ mod wasm_impl { #[async_trait] impl MyActiveOrders for MyOrdersStorage { async fn load_active_maker_orders(&self) -> MyOrdersResult> { - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; let maker_orders = table.get_all_items().await?; @@ -432,7 +434,7 @@ mod wasm_impl { } async fn load_active_maker_order(&self, uuid: Uuid) -> MyOrdersResult { - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -444,7 +446,7 @@ mod wasm_impl { } async fn load_active_taker_orders(&self) -> MyOrdersResult> { - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; let maker_orders = table.get_all_items().await?; @@ -455,7 +457,7 @@ mod wasm_impl { } async fn save_new_active_maker_order(&self, order: &MakerOrder) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -468,7 +470,7 @@ mod wasm_impl { } async fn save_new_active_taker_order(&self, order: &TakerOrder) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -481,7 +483,7 @@ mod wasm_impl { } async fn delete_active_maker_order(&self, uuid: Uuid) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; table.delete_item_by_unique_index("uuid", uuid).await?; @@ -489,7 +491,7 @@ mod wasm_impl { } async fn delete_active_taker_order(&self, uuid: Uuid) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; table.delete_item_by_unique_index("uuid", uuid).await?; @@ -497,7 +499,7 @@ mod wasm_impl { } async fn update_active_maker_order(&self, order: &MakerOrder) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -510,7 +512,7 @@ mod wasm_impl { } async fn update_active_taker_order(&self, order: &TakerOrder) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -528,7 +530,7 @@ mod wasm_impl { #[async_trait] impl MyOrdersHistory for MyOrdersStorage { async fn save_order_in_history(&self, order: &Order) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -541,7 +543,7 @@ mod wasm_impl { } async fn load_order_from_history(&self, uuid: Uuid) -> MyOrdersResult { - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -567,7 +569,7 @@ mod wasm_impl { } async fn select_order_status(&self, uuid: Uuid) -> MyOrdersResult { - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -581,7 +583,7 @@ mod wasm_impl { async fn save_maker_order_in_filtering_history(&self, order: &MakerOrder) -> MyOrdersResult<()> { let item = maker_order_to_filtering_history_item(order, "Created".to_owned(), false)?; - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; table.add_item(&item).await?; @@ -591,7 +593,7 @@ mod wasm_impl { async fn save_taker_order_in_filtering_history(&self, order: &TakerOrder) -> MyOrdersResult<()> { let item = taker_order_to_filtering_history_item(order, "Created".to_owned())?; - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; table.add_item(&item).await?; @@ -599,7 +601,7 @@ mod wasm_impl { } async fn update_maker_order_in_filtering_history(&self, order: &MakerOrder) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; // get the previous item to see if the order was taker @@ -614,7 +616,7 @@ mod wasm_impl { } async fn update_order_status_in_filtering_history(&self, uuid: Uuid, status: String) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -629,7 +631,7 @@ mod wasm_impl { } async fn update_was_taker_in_filtering_history(&self, uuid: Uuid) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db().await?; + let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -776,7 +778,7 @@ mod tests { async fn get_all_items(ctx: &MmArc) -> Vec { let ordermatch_ctx = OrdermatchContext::from_ctx(ctx).unwrap(); - let db = ordermatch_ctx.ordermatch_db().await.unwrap(); + let db = ordermatch_ctx.ordermatch_db(None).await.unwrap(); let transaction = db.transaction().await.unwrap(); let table = transaction.table::
().await.unwrap(); table From d7c4a831eedccc8518e7c2be5daff7a15797007e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 24 Apr 2024 04:35:45 +0100 Subject: [PATCH 068/186] orders_kick_start --- mm2src/mm2_main/src/lp_ordermatch.rs | 343 ++++++++++++++------------- 1 file changed, 178 insertions(+), 165 deletions(-) diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index e44928f21c..32c2382ae3 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -25,11 +25,10 @@ use best_orders::BestOrdersAction; use blake2::digest::{Update, VariableOutput}; use blake2::Blake2bVar; use coins::utxo::{compressed_pub_key_from_priv_raw, ChecksumType, UtxoAddressFormat}; -use coins::{coin_conf, find_pair, lp_coinfind, BalanceTradeFeeUpdatedHandler, CoinProtocol, CoinsContext, - FeeApproxStage, MarketCoinOps, MmCoinEnum}; +use coins::{coin_conf, find_pair, lp_coinfind, BalanceTradeFeeUpdatedHandler, CoinProtocol, CoinsContext, FeeApproxStage, MarketCoinOps, MmCoinEnum, find_unique_account_ids_active}; use common::executor::{simple_map::AbortableSimpleMap, AbortSettings, AbortableSystem, AbortedError, SpawnAbortable, SpawnFuture, Timer}; -use common::log::{error, warn, LogOnError}; +use common::log::{error, warn, LogOnError, info}; use common::time_cache::TimeCache; use common::{bits256, log, new_uuid, now_ms, now_sec}; use crypto::privkey::SerializableSecp256k1Keypair; @@ -49,7 +48,8 @@ use mm2_number::{BigDecimal, BigRational, MmNumber, MmNumberMultiRepr}; use mm2_rpc::data::legacy::{MatchBy, Mm2RpcResult, OrderConfirmationsSettings, OrderType, RpcOrderbookEntry, SellBuyRequest, SellBuyResponse, TakerAction, TakerRequestForRpc}; use mm2_state_machine::prelude::*; -#[cfg(test)] use mocktopus::macros::*; +#[cfg(test)] +use mocktopus::macros::*; use my_orders_storage::{delete_my_maker_order, delete_my_taker_order, save_maker_order_on_update, save_my_new_maker_order, save_my_new_taker_order, MyActiveOrders, MyOrdersFilteringHistory, MyOrdersHistory, MyOrdersStorage}; @@ -95,19 +95,24 @@ cfg_wasm32! { pub type OrdermatchDbLocked<'a> = DbLocked<'a, OrdermatchDb>; } -#[path = "lp_ordermatch/best_orders.rs"] mod best_orders; -#[path = "lp_ordermatch/lp_bot.rs"] mod lp_bot; +#[path = "lp_ordermatch/best_orders.rs"] +mod best_orders; +#[path = "lp_ordermatch/lp_bot.rs"] +mod lp_bot; pub use lp_bot::{start_simple_market_maker_bot, stop_simple_market_maker_bot, StartSimpleMakerBotRequest, TradingBotEvent}; #[path = "lp_ordermatch/my_orders_storage.rs"] mod my_orders_storage; -#[path = "lp_ordermatch/new_protocol.rs"] mod new_protocol; +#[path = "lp_ordermatch/new_protocol.rs"] +mod new_protocol; #[path = "lp_ordermatch/order_requests_tracker.rs"] mod order_requests_tracker; -#[path = "lp_ordermatch/orderbook_depth.rs"] mod orderbook_depth; -#[path = "lp_ordermatch/orderbook_rpc.rs"] mod orderbook_rpc; +#[path = "lp_ordermatch/orderbook_depth.rs"] +mod orderbook_depth; +#[path = "lp_ordermatch/orderbook_rpc.rs"] +mod orderbook_rpc; #[cfg(all(test, not(target_arch = "wasm32")))] #[path = "ordermatch_tests.rs"] pub mod ordermatch_tests; @@ -153,8 +158,8 @@ pub enum OrderbookP2PHandlerError { P2PRequestError(String), #[display( - fmt = "Couldn't find an order {}, ignoring, it will be synced upon pubkey keep alive", - _0 + fmt = "Couldn't find an order {}, ignoring, it will be synced upon pubkey keep alive", + _0 )] OrderNotFound(Uuid), @@ -272,7 +277,7 @@ fn process_trie_delta( )), None => { orderbook.remove_order_trie_update(uuid); - }, + } } } @@ -311,13 +316,13 @@ async fn process_orders_keep_alive( P2PRequest::Ordermatch(req), propagated_from_peer.clone(), ) - .await? - .ok_or_else(|| { - MmError::new(OrderbookP2PHandlerError::P2PRequestError(format!( - "No response was received from peer {} for SyncPubkeyOrderbookState request!", - propagated_from_peer - ))) - })?; + .await? + .ok_or_else(|| { + MmError::new(OrderbookP2PHandlerError::P2PRequestError(format!( + "No response was received from peer {} for SyncPubkeyOrderbookState request!", + propagated_from_peer + ))) + })?; let mut orderbook = ordermatch_ctx.orderbook.lock(); for (pair, diff) in response.pair_orders_diff { @@ -393,13 +398,13 @@ async fn request_and_fill_orderbook(ctx: &MmArc, base: &str, rel: &str) -> Resul let response = try_s!(request_any_relay::(ctx.clone(), P2PRequest::Ordermatch(request)).await); let (pubkey_orders, protocol_infos, conf_infos) = match response { Some(( - GetOrderbookRes { - pubkey_orders, - protocol_infos, - conf_infos, - }, - _peer_id, - )) => (pubkey_orders, protocol_infos, conf_infos), + GetOrderbookRes { + pubkey_orders, + protocol_infos, + conf_infos, + }, + _peer_id, + )) => (pubkey_orders, protocol_infos, conf_infos), None => return Ok(()), }; @@ -415,7 +420,7 @@ async fn request_and_fill_orderbook(ctx: &MmArc, base: &str, rel: &str) -> Resul Err(e) => { warn!("Error {} decoding pubkey {}", e, pubkey); continue; - }, + } }; if is_my_order(&pubkey, &my_pubsecp, &orderbook.my_p2p_pubkeys) { @@ -481,9 +486,9 @@ fn delete_my_order(ctx: &MmArc, uuid: Uuid, p2p_privkey: Option(ctx: &MmArc, err_construct: F) -> MmResult, E> -where - E: NotMmError, - F: Fn(String) -> E, + where + E: NotMmError, + F: Fn(String) -> E, { match CryptoCtx::from_ctx(ctx).split_mm() { Ok(crypto_ctx) => Ok(Some(CryptoCtx::mm2_internal_pubkey_hex(crypto_ctx.as_ref()))), @@ -556,39 +561,39 @@ pub async fn process_msg(ctx: MmArc, from_peer: String, msg: &[u8], i_am_relay: let order: OrderbookItem = (created_msg, hex::encode(pubkey.to_bytes().as_slice())).into(); insert_or_update_order(&ctx, order); Ok(()) - }, + } new_protocol::OrdermatchMessage::PubkeyKeepAlive(keep_alive) => { process_orders_keep_alive(ctx, from_peer, pubkey.to_hex(), keep_alive, i_am_relay).await - }, + } new_protocol::OrdermatchMessage::TakerRequest(taker_request) => { let msg = TakerRequest::from_new_proto_and_pubkey(taker_request, pubkey.unprefixed().into()); process_taker_request(ctx, pubkey.unprefixed().into(), msg).await; Ok(()) - }, + } new_protocol::OrdermatchMessage::MakerReserved(maker_reserved) => { let msg = MakerReserved::from_new_proto_and_pubkey(maker_reserved, pubkey.unprefixed().into()); // spawn because process_maker_reserved may take significant time to run let spawner = ctx.spawner(); spawner.spawn(process_maker_reserved(ctx, pubkey.unprefixed().into(), msg)); Ok(()) - }, + } new_protocol::OrdermatchMessage::TakerConnect(taker_connect) => { process_taker_connect(ctx, pubkey, taker_connect.into()).await; Ok(()) - }, + } new_protocol::OrdermatchMessage::MakerConnected(maker_connected) => { process_maker_connected(ctx, pubkey, maker_connected.into()).await; Ok(()) - }, + } new_protocol::OrdermatchMessage::MakerOrderCancelled(cancelled_msg) => { delete_order(&ctx, &pubkey.to_hex(), cancelled_msg.uuid.into()); Ok(()) - }, + } new_protocol::OrdermatchMessage::MakerOrderUpdated(updated_msg) => { process_maker_order_updated(ctx, pubkey.to_hex(), updated_msg) - }, + } } - }, + } Err(e) => MmError::err(OrderbookP2PHandlerError::DecodeError(e.to_string())), } } @@ -630,8 +635,8 @@ impl From for TryFromBytesError { trait TryFromBytes { fn try_from_bytes(bytes: Vec) -> Result - where - Self: Sized; + where + Self: Sized; } impl TryFromBytes for String { @@ -665,13 +670,13 @@ pub fn process_peer_request(ctx: MmArc, request: OrdermatchRequest) -> Result { let response = process_sync_pubkey_orderbook_state(ctx, pubkey, trie_roots); response.map(|res| res.map(|r| encode_message(&r).expect("Serialization failed"))) - }, + } OrdermatchRequest::BestOrders { coin, action, volume } => { best_orders::process_best_orders_p2p_request(ctx, coin, action, volume) - }, + } OrdermatchRequest::BestOrdersByNumber { coin, action, number } => { best_orders::process_best_orders_p2p_request_by_number(ctx, coin, action, number) - }, + } OrdermatchRequest::OrderbookDepth { pairs } => orderbook_depth::process_orderbook_depth_p2p_request(ctx, pairs), } } @@ -737,7 +742,7 @@ fn get_pubkeys_orders(orderbook: &Orderbook, base: String, rel: String) -> GetPu uuid ); continue; - }, + } }; let uuids = uuids_by_pubkey.entry(order.pubkey.clone()).or_insert_with(Vec::new); protocol_infos.insert(order.uuid, order.base_rel_proto_info()); @@ -809,12 +814,12 @@ impl DeltaOrFullTrie { .map(|(key, value)| (key, value.map(From::from))) .collect(); DeltaOrFullTrie::Delta(new_map) - }, + } DeltaOrFullTrie::FullTrie(trie) => { trie.iter().for_each(|(key, val)| on_each(key, Some(val))); let new_trie = trie.into_iter().map(|(key, value)| (key, value.into())).collect(); DeltaOrFullTrie::FullTrie(new_trie) - }, + } } } } @@ -845,8 +850,8 @@ fn get_full_trie( db: &MemoryDB, getter: impl Fn(&Key) -> Option, ) -> Result, TrieDiffHistoryError> -where - Key: Clone + Eq + std::hash::Hash + TryFromBytes, + where + Key: Clone + Eq + std::hash::Hash + TryFromBytes, { let trie = TrieDB::::new(db, trie_root)?; let trie: Result, TrieDiffHistoryError> = trie @@ -925,10 +930,10 @@ fn process_sync_pubkey_orderbook_state( let delta_result = match pubkey_state.order_pairs_trie_state_history.get(&pair) { Some(history) => { DeltaOrFullTrie::from_history(history, root, *actual_pair_root, &orderbook.memory_db, order_getter) - }, + } None => { get_full_trie(actual_pair_root, &orderbook.memory_db, order_getter).map(DeltaOrFullTrie::FullTrie) - }, + } }; let delta = try_s!(delta_result); @@ -951,11 +956,11 @@ fn process_sync_pubkey_orderbook_state( if let Some(ref info) = o.conf_settings { conf_infos.insert(o.uuid, info.clone()); } - }, + } None => { protocol_infos.remove(uuid); conf_infos.remove(uuid); - }, + } }); (pair, new_trie) }) @@ -1005,10 +1010,10 @@ pub fn parse_orderbook_pair_from_topic(topic: &str) -> Option<(&str, &str)> { } else { None } - }, + } None => None, } - }, + } None => None, }, _ => None, @@ -1051,7 +1056,7 @@ fn maker_order_created_p2p_notify( Err(e) => { error!("Couldn't encode and sign the 'maker_order_created' message: {}", e); return; - }, + } }; let item: OrderbookItem = (message, hex::encode(key_pair.public_slice())).into(); insert_or_update_my_order(&ctx, item, order); @@ -1082,7 +1087,7 @@ fn maker_order_updated_p2p_notify( Err(e) => { error!("Couldn't encode and sign the 'maker_order_updated' message: {}", e); return; - }, + } }; process_my_maker_order_updated(&ctx, &message); broadcast_p2p_msg(&ctx, topic, encoded_msg, peer_id); @@ -1129,7 +1134,7 @@ impl BalanceTradeFeeUpdatedHandler for BalanceUpdateOrdermatchHandler { Err(e) => { log::warn!("Couldn't handle the 'balance_updated' event: {}", e); return; - }, + } }; let ordermatch_ctx = OrdermatchContext::from_ctx(&ctx).unwrap(); @@ -1152,9 +1157,9 @@ impl BalanceTradeFeeUpdatedHandler for BalanceUpdateOrdermatchHandler { order.clone(), MakerOrderCancellationReason::InsufficientBalance, ) - .compat() - .await - .ok(); + .compat() + .await + .ok(); continue; } } @@ -1599,12 +1604,12 @@ impl TakerOrder { if !uuids.contains(&reserved.maker_order_uuid) { return MatchReservedResult::NotMatched; } - }, + } MatchBy::Pubkeys(pubkeys) => { if !pubkeys.contains(&reserved.sender_pubkey) { return MatchReservedResult::NotMatched; } - }, + } } let my_base_amount = self.request.get_base_amount(); @@ -1622,18 +1627,18 @@ impl TakerOrder { } else { MatchReservedResult::NotMatched } - }, + } TakerAction::Sell => { let match_ticker = (self.request.base == reserved.rel || self.base_orderbook_ticker.as_ref() == Some(&reserved.rel)) && (self.request.rel == reserved.base - || self.rel_orderbook_ticker.as_ref() == Some(&reserved.base)); + || self.rel_orderbook_ticker.as_ref() == Some(&reserved.base)); if match_ticker && my_base_amount == other_rel_amount && my_rel_amount <= other_base_amount { MatchReservedResult::Matched } else { MatchReservedResult::NotMatched } - }, + } } } @@ -2041,7 +2046,7 @@ impl MakerOrder { } else { OrderMatchResult::NotMatched } - }, + } TakerAction::Sell => { let ticker_match = (self.base == taker.rel || self.base_orderbook_ticker.as_ref() == Some(&taker.rel)) && (self.rel == taker.base || self.rel_orderbook_ticker.as_ref() == Some(&taker.base)); @@ -2060,7 +2065,7 @@ impl MakerOrder { } else { OrderMatchResult::NotMatched } - }, + } } } @@ -2141,7 +2146,7 @@ impl From for MakerOrder { rel_orderbook_ticker: taker_order.base_orderbook_ticker, p2p_privkey: taker_order.p2p_privkey, } - }, + } } } } @@ -2339,7 +2344,7 @@ fn broadcast_ordermatch_message( Err(e) => { error!("Failed to encode and sign ordermatch message: {}", e); return; - }, + } }; broadcast_p2p_msg(ctx, topic, encoded_msg, peer_id); } @@ -2387,10 +2392,10 @@ impl TrieDiffHistory { while let Some(next_diff) = self.inner.remove(diff.next_root) { diff = next_diff; } - }, + } None => { self.inner.insert(insert_at, diff); - }, + } }; } @@ -2451,7 +2456,7 @@ fn pubkey_state_mut<'a>( RawEntryMut::Vacant(e) => { let state = OrderbookPubkeyState::with_history_timeout(Duration::new(TRIE_STATE_HISTORY_TIMEOUT, 0)); e.insert(from_pubkey.to_string(), state).1 - }, + } } } @@ -2543,7 +2548,7 @@ impl Orderbook { Err(e) => { error!("Error getting {} trie with root {:?}", e, prev_root); return; - }, + } }; let order_bytes = order.trie_state_bytes(); if let Err(e) = pair_trie.insert(order.uuid.as_bytes(), &order_bytes) { @@ -2652,7 +2657,7 @@ impl Orderbook { Err(_) => { error!("Failed to get existing trie with root {:?}", pair_state); return Some(order); - }, + } }; if pubkey_state.order_pairs_trie_state_history.get(&alb_ordered).is_some() { @@ -2903,11 +2908,11 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO Ok(None) => { error!("Coin {} is not found/enabled", maker_order.rel); return; - }, + } Err(e) => { error!("!lp_coinfind({}): {}", maker_order.rel, e); return; - }, + } }; let maker_coin = match lp_coinfind(&ctx, &maker_order.base).await { @@ -2915,11 +2920,11 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO Ok(None) => { error!("Coin {} is not found/enabled", maker_order.base); return; - }, + } Err(e) => { error!("!lp_coinfind({}): {}", maker_order.base, e); return; - }, + } }; let alice = bits256::from(maker_match.request.sender_pubkey.0); let maker_amount = maker_match.reserved.get_base_amount().clone(); @@ -2949,7 +2954,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO my_conf_settings, other_conf_settings, } - }, + } None => AtomicLocktimeVersion::V1, }; let lock_time = lp_atomic_locktime( @@ -2973,7 +2978,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO Err(e) => { error!("Error {} on secret generation", e); return; - }, + } }; let account_db_id = maker_coin.account_db_id(); @@ -3011,7 +3016,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO .run(Box::new(maker_swap_v2::Initialize::default())) .await .error_log(); - }, + } _ => todo!("implement fallback to the old protocol here"), } } else { @@ -3024,7 +3029,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO LEGACY_SWAP_TYPE, account_db_id.as_deref(), ) - .await + .await { error!("Error {} on new swap insertion", e); } @@ -3064,11 +3069,11 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat Ok(None) => { error!("Coin {} is not found/enabled", taker_coin_ticker); return; - }, + } Err(e) => { error!("!lp_coinfind({}): {}", taker_coin_ticker, e); return; - }, + } }; let maker_coin_ticker = taker_order.maker_coin_ticker(); @@ -3077,11 +3082,11 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat Ok(None) => { error!("Coin {} is not found/enabled", maker_coin_ticker); return; - }, + } Err(e) => { error!("!lp_coinfind({}): {}", maker_coin_ticker, e); return; - }, + } }; // lp_connected_alice is called only from process_maker_connected, which returns if CryptoCtx is not initialized @@ -3111,7 +3116,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat my_conf_settings, other_conf_settings, } - }, + } None => AtomicLocktimeVersion::V1, }; let locktime = lp_atomic_locktime( @@ -3136,7 +3141,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat Err(e) => { error!("Error {} on secret generation", e); return; - }, + } }; let secret_hash_algo = detect_secret_hash_algo(&maker_coin, &taker_coin); match (maker_coin, taker_coin) { @@ -3172,12 +3177,12 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat .run(Box::new(taker_swap_v2::Initialize::default())) .await .error_log(); - }, + } _ => todo!("implement fallback to the old protocol here"), } } else { #[cfg(any(test, feature = "run-docker-tests"))] - let fail_at = std::env::var("TAKER_FAIL_AT").map(FailAt::from).ok(); + let fail_at = std::env::var("TAKER_FAIL_AT").map(FailAt::from).ok(); if let Err(e) = insert_new_swap_to_db( ctx.clone(), @@ -3188,7 +3193,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat LEGACY_SWAP_TYPE, account_db_id.as_deref(), ) - .await + .await { error!("Error {} on new swap insertion", e); } @@ -3207,7 +3212,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat locktime, taker_order.p2p_privkey.map(SerializableSecp256k1Keypair::into_inner), #[cfg(any(test, feature = "run-docker-tests"))] - fail_at, + fail_at, ); run_taker_swap(RunTakerSwapInput::StartNew(taker_swap), ctx).await } @@ -3288,7 +3293,7 @@ pub async fn lp_ordermatch_loop(ctx: MmArc) { log::info!("Error {} on balance check to kickstart order {}, cancelling", e, uuid); to_cancel.push(uuid); continue; - }, + } }; let max_vol = match calc_max_maker_vol(&ctx, &base, ¤t_balance, FeeApproxStage::OrderIssue).await { @@ -3297,7 +3302,7 @@ pub async fn lp_ordermatch_loop(ctx: MmArc) { log::info!("Error {} on balance check to kickstart order {}, cancelling", e, uuid); to_cancel.push(uuid); continue; - }, + } }; if max_vol < order.available_amount() { order.max_base_vol = order.reserved_amount() + max_vol; @@ -3333,9 +3338,9 @@ pub async fn lp_ordermatch_loop(ctx: MmArc) { order.clone(), MakerOrderCancellationReason::InsufficientBalance, ) - .compat() - .await - .ok(); + .compat() + .await + .ok(); } } } @@ -3553,11 +3558,11 @@ async fn process_maker_reserved(ctx: MmArc, from_pubkey: H256Json, reserved_msg: if (my_order.match_reserved(&reserved_msg) == MatchReservedResult::Matched && my_order.matches.is_empty()) && base_coin.is_coin_protocol_supported(&reserved_msg.base_protocol_info, None, lock_time, false) && rel_coin.is_coin_protocol_supported( - &reserved_msg.rel_protocol_info, - Some(reserved_msg.rel_amount.clone()), - lock_time, - false, - ) + &reserved_msg.rel_protocol_info, + Some(reserved_msg.rel_amount.clone()), + lock_time, + false, + ) { let connect = TakerConnect { sender_pubkey: H256Json::from(our_public_id.bytes), @@ -3615,7 +3620,7 @@ async fn process_maker_connected(ctx: MmArc, from_pubkey: PublicKey, connected: connected.maker_order_uuid ); return; - }, + } }; if order_match.reserved.sender_pubkey != unprefixed_from.into() { @@ -3683,21 +3688,21 @@ async fn process_taker_request(ctx: MmArc, from_pubkey: H256Json, taker_request: atomic_locktime_v, ) as f64 * rel_coin.maker_locktime_multiplier()) - .ceil() as u64; + .ceil() as u64; if !order.matches.contains_key(&taker_request.uuid) && base_coin.is_coin_protocol_supported( - taker_request.base_protocol_info_for_maker(), - Some(base_amount.clone()), - maker_lock_duration, - true, - ) + taker_request.base_protocol_info_for_maker(), + Some(base_amount.clone()), + maker_lock_duration, + true, + ) && rel_coin.is_coin_protocol_supported( - taker_request.rel_protocol_info_for_maker(), - None, - maker_lock_duration, - true, - ) + taker_request.rel_protocol_info_for_maker(), + None, + maker_lock_duration, + true, + ) { let reserved = MakerReserved { dest_pub_key: taker_request.sender_pubkey, @@ -3775,7 +3780,7 @@ async fn process_taker_connect(ctx: MmArc, sender_pubkey: PublicKey, connect_msg connect_msg.taker_order_uuid ); return; - }, + } }; if order_match.request.sender_pubkey != sender_unprefixed.into() { log::warn!("Connect message sender pubkey != request message sender pubkey"); @@ -4810,7 +4815,7 @@ pub async fn update_maker_order(ctx: &MmArc, req: MakerOrderUpdateReq) -> Result try_s!(validate_price(new_price.clone())); update_msg.with_new_price(new_price.clone().into()); new_price - }, + } None => order_before_update.price.clone(), }; @@ -4930,10 +4935,8 @@ struct OrderForRpcWithCancellationReason<'a> { pub async fn order_status(ctx: MmArc, req: Json) -> Result>, String> { let req: OrderStatusReq = try_s!(json::from_value(req)); - + let db_ids = find_unique_account_ids_active(&ctx).await?; let ordermatch_ctx = try_s!(OrdermatchContext::from_ctx(&ctx)); - // TODO db_id - let storage = MyOrdersStorage::new(ctx.clone(), None); let maybe_order_mutex = ordermatch_ctx.maker_orders_ctx.lock().get_order(&req.uuid).cloned(); if let Some(order_mutex) = maybe_order_mutex { @@ -4958,16 +4961,22 @@ pub async fn order_status(ctx: MmArc, req: Json) -> Result>, St .map_err(|e| ERRL!("{}", e)); } - let order = try_s!(storage.load_order_from_history(req.uuid).await); - let cancellation_reason = &try_s!(storage.select_order_status(req.uuid).await); + for db_id in db_ids { + let storage = MyOrdersStorage::new(ctx.clone(), Some(&db_id)); + if let (Ok(order), Ok(cancellation_reason)) = (storage.load_order_from_history(req.uuid).await, &storage.select_order_status(req.uuid).await) { + info!("Order with UUID=({}) found for db_id=({db_id})", req.uuid); + let res = json!(OrderForRpcWithCancellationReason { + order: OrderForRpc::from(&order), + cancellation_reason, + }); - let res = json!(OrderForRpcWithCancellationReason { - order: OrderForRpc::from(&order), - cancellation_reason, - }); - Response::builder() - .body(json::to_vec(&res).expect("Serialization failed")) - .map_err(|e| ERRL!("{}", e)) + return Response::builder() + .body(json::to_vec(&res).expect("Serialization failed")) + .map_err(|e| ERRL!("{}", e)); + }; + }; + + Err("No orders found across databases".to_string()) } #[derive(Display)] @@ -5078,13 +5087,13 @@ pub async fn orders_history_by_filter(ctx: MmArc, req: Json) -> Result Result (), } @@ -5238,7 +5247,7 @@ pub async fn cancel_order_rpc(ctx: MmArc, req: Json) -> Result> return Response::builder() .body(json::to_vec(&res).expect("Serialization failed")) .map_err(|e| ERRL!("{}", e)); - }, + } // error is returned Entry::Vacant(_) => (), } @@ -5399,26 +5408,30 @@ pub async fn orders_kick_start(ctx: &MmArc) -> Result, String> { let mut coins = HashSet::new(); let ordermatch_ctx = try_s!(OrdermatchContext::from_ctx(ctx)); - // TODO db_id - let storage = MyOrdersStorage::new(ctx.clone(), None); - let saved_maker_orders = try_s!(storage.load_active_maker_orders().await); - let saved_taker_orders = try_s!(storage.load_active_taker_orders().await); + let db_ids = find_unique_account_ids_active(ctx).await?; + for db_id in db_ids { + let storage = MyOrdersStorage::new(ctx.clone(), Some(&db_id)); + let saved_maker_orders = try_s!(storage.load_active_maker_orders().await); + let saved_taker_orders = try_s!(storage.load_active_taker_orders().await); - { - let mut maker_orders_ctx = ordermatch_ctx.maker_orders_ctx.lock(); - for order in saved_maker_orders { - coins.insert(order.base.clone()); - coins.insert(order.rel.clone()); - maker_orders_ctx.add_order(ctx.weak(), order.clone(), None); + { + let mut maker_orders_ctx = ordermatch_ctx.maker_orders_ctx.lock(); + for order in saved_maker_orders { + coins.insert(order.base.clone()); + coins.insert(order.rel.clone()); + maker_orders_ctx.add_order(ctx.weak(), order.clone(), None); + } } - } - let mut taker_orders = ordermatch_ctx.my_taker_orders.lock().await; - for order in saved_taker_orders { - coins.insert(order.request.base.clone()); - coins.insert(order.request.rel.clone()); - taker_orders.insert(order.request.uuid, order); - } + let mut taker_orders = ordermatch_ctx.my_taker_orders.lock().await; + for order in saved_taker_orders { + coins.insert(order.request.base.clone()); + coins.insert(order.request.rel.clone()); + taker_orders.insert(order.request.uuid, order); + } + }; + + Ok(coins) } @@ -5518,7 +5531,7 @@ pub async fn cancel_orders_by(ctx: &MmArc, cancel_by: CancelBy) -> Result<(Vec { let mut to_remove = Vec::new(); for (uuid, order) in maker_orders.iter() { @@ -5538,7 +5551,7 @@ pub async fn cancel_orders_by(ctx: &MmArc, cancel_by: CancelBy) -> Result<(Vec { let mut to_remove = Vec::new(); for (uuid, order) in maker_orders.iter() { @@ -5558,7 +5571,7 @@ pub async fn cancel_orders_by(ctx: &MmArc, cancel_by: CancelBy) -> Result<(Vec match e.get() { OrderbookRequestingState::Requested => { // We are subscribed to the topic and the orderbook was requested already true - }, + } OrderbookRequestingState::NotRequested { subscribed_at } => { // We are subscribed to the topic. Also we didn't request the orderbook, // True if enough time has passed for the orderbook to fill by OrdermatchRequest::SyncPubkeyOrderbookState. *subscribed_at + ORDERBOOK_REQUESTING_TIMEOUT < current_timestamp - }, + } }, } }; @@ -5702,7 +5715,7 @@ fn choose_maker_confs_and_notas( maker_settings.rel_confs, maker_settings.rel_nota, ) - }, + } TakerAction::Buy => { let maker_coin_confs = if taker_settings.base_confs < maker_settings.base_confs { taker_settings.base_confs @@ -5720,7 +5733,7 @@ fn choose_maker_confs_and_notas( maker_settings.rel_confs, maker_settings.rel_nota, ) - }, + } }, None => ( maker_settings.base_confs, @@ -5820,12 +5833,12 @@ fn orderbook_address( coins::eth::addr_from_pubkey_str(pubkey) .map(OrderbookAddress::Transparent) .map_to_mm(OrderbookAddrErr::AddrFromPubkeyError) - }, + } CoinProtocol::UTXO | CoinProtocol::QTUM | CoinProtocol::QRC20 { .. } | CoinProtocol::BCH { .. } => { coins::utxo::address_by_conf_and_pubkey_str(coin, conf, pubkey, addr_format) .map(OrderbookAddress::Transparent) .map_to_mm(OrderbookAddrErr::AddrFromPubkeyError) - }, + } CoinProtocol::SLPTOKEN { platform, .. } => { let platform_conf = coin_conf(ctx, &platform); if platform_conf.is_null() { @@ -5839,12 +5852,12 @@ fn orderbook_address( .mm_err(|e| OrderbookAddrErr::AddrFromPubkeyError(e.to_string())), _ => MmError::err(OrderbookAddrErr::InvalidPlatformCoinProtocol(platform)), } - }, + } CoinProtocol::TENDERMINT(protocol) => Ok(coins::tendermint::account_id_from_pubkey_hex( &protocol.account_prefix, pubkey, ) - .map(|id| OrderbookAddress::Transparent(id.to_string()))?), + .map(|id| OrderbookAddress::Transparent(id.to_string()))?), CoinProtocol::TENDERMINTTOKEN(proto) => { let platform_conf = coin_conf(ctx, &proto.platform); if platform_conf.is_null() { @@ -5857,17 +5870,17 @@ fn orderbook_address( &platform.account_prefix, pubkey, ) - .map(|id| OrderbookAddress::Transparent(id.to_string()))?), + .map(|id| OrderbookAddress::Transparent(id.to_string()))?), _ => MmError::err(OrderbookAddrErr::InvalidPlatformCoinProtocol(format!( "Platform protocol {:?} is not TENDERMINT", platform_protocol ))), } - }, + } #[cfg(all(feature = "enable-solana", not(target_arch = "wasm32")))] CoinProtocol::SOLANA | CoinProtocol::SPLTOKEN { .. } => { MmError::err(OrderbookAddrErr::CoinIsNotSupported(coin.to_owned())) - }, + } CoinProtocol::ZHTLC { .. } => Ok(OrderbookAddress::Shielded), #[cfg(not(target_arch = "wasm32"))] // Todo: Shielded address is used for lightning for now, the lightning node public key can be used for the orderbook entry pubkey From 3a7fa66242929b15b8345e81f8fa9b500caea6a1 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 24 Apr 2024 05:15:43 +0100 Subject: [PATCH 069/186] wasm shareddb storage --- mm2src/coins/lp_coins.rs | 404 +++++++++--------- mm2src/mm2_db/src/indexed_db/db_lock.rs | 7 +- .../src/account/storage/wasm_storage.rs | 12 +- 3 files changed, 215 insertions(+), 208 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 390b73e27f..599197976b 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -21,10 +21,10 @@ // `mockable` implementation uses these #![allow( - clippy::forget_ref, - clippy::forget_copy, - clippy::swap_ptr_to_ref, - clippy::forget_non_drop +clippy::forget_ref, +clippy::forget_copy, +clippy::swap_ptr_to_ref, +clippy::forget_non_drop )] #![allow(uncommon_codepoints)] #![feature(integer_atomics)] @@ -33,13 +33,20 @@ #![feature(stmt_expr_attributes)] #![feature(result_flattening)] -#[macro_use] extern crate common; -#[macro_use] extern crate gstuff; -#[macro_use] extern crate lazy_static; -#[macro_use] extern crate mm2_metrics; -#[macro_use] extern crate serde_derive; -#[macro_use] extern crate serde_json; -#[macro_use] extern crate ser_error_derive; +#[macro_use] +extern crate common; +#[macro_use] +extern crate gstuff; +#[macro_use] +extern crate lazy_static; +#[macro_use] +extern crate mm2_metrics; +#[macro_use] +extern crate serde_derive; +#[macro_use] +extern crate serde_json; +#[macro_use] +extern crate ser_error_derive; use async_trait::async_trait; use base58::FromBase58Error; @@ -140,7 +147,7 @@ macro_rules! try_tx_fus { TransactionEnum::from($tx), ERRL!("{:?}", err), ))); - }, + } } }; } @@ -157,7 +164,7 @@ macro_rules! try_tx_s { line!(), err ))); - }, + } } }; ($e: expr, $tx: expr) => { @@ -168,7 +175,7 @@ macro_rules! try_tx_s { TransactionEnum::from($tx), format!("{}:{}] {:?}", file!(), line!(), err), )); - }, + } } }; } @@ -198,7 +205,7 @@ macro_rules! ok_or_continue_after_sleep { error!("error {:?}", e); Timer::sleep($delay).await; continue; - }, + } } }; } @@ -213,7 +220,8 @@ pub mod hd_confirm_address; pub mod hd_pubkey; pub mod hd_wallet; pub mod hd_wallet_storage; -#[cfg(not(target_arch = "wasm32"))] pub mod lightning; +#[cfg(not(target_arch = "wasm32"))] +pub mod lightning; pub mod lp_price; #[cfg_attr(target_arch = "wasm32", allow(dead_code, unused_imports))] pub mod my_tx_history_v2; @@ -223,10 +231,10 @@ pub mod rpc_command; #[doc(hidden)] #[allow(unused_variables)] #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") +feature = "enable-solana", +not(target_os = "ios"), +not(target_os = "android"), +not(target_arch = "wasm32") ))] pub mod solana; pub mod tendermint; @@ -257,17 +265,17 @@ use rpc_command::{get_new_address::{GetNewAddressTaskManager, GetNewAddressTaskM init_withdraw::{WithdrawTaskManager, WithdrawTaskManagerShared}}; use script::Script; #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") +feature = "enable-solana", +not(target_os = "ios"), +not(target_os = "android"), +not(target_arch = "wasm32") ))] pub use solana::spl::SplToken; #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") +feature = "enable-solana", +not(target_os = "ios"), +not(target_os = "android"), +not(target_arch = "wasm32") ))] pub use solana::{SolanaActivationParams, SolanaCoin, SolanaFeeDetails}; use tendermint::htlc::CustomTendermintMsgType; @@ -286,26 +294,26 @@ use utxo::UtxoActivationParams; use utxo::{BlockchainNetwork, GenerateTxError, UtxoFeeDetails, UtxoTx}; use z_coin::{ZCoin, ZcoinProtocolInfo}; -pub type TransactionFut = Box + Send>; +pub type TransactionFut = Box + Send>; pub type TransactionResult = Result; pub type BalanceResult = Result>; -pub type BalanceFut = Box> + Send>; -pub type NonZeroBalanceFut = Box> + Send>; +pub type BalanceFut = Box> + Send>; +pub type NonZeroBalanceFut = Box> + Send>; pub type NumConversResult = Result>; pub type StakingInfosResult = Result>; -pub type StakingInfosFut = Box> + Send>; +pub type StakingInfosFut = Box> + Send>; pub type DelegationResult = Result>; -pub type DelegationFut = Box> + Send>; +pub type DelegationFut = Box> + Send>; pub type WithdrawResult = Result>; -pub type WithdrawFut = Box> + Send>; +pub type WithdrawFut = Box> + Send>; pub type TradePreimageResult = Result>; -pub type TradePreimageFut = Box> + Send>; +pub type TradePreimageFut = Box> + Send>; pub type CoinFindResult = Result>; -pub type TxHistoryFut = Box> + Send>; +pub type TxHistoryFut = Box> + Send>; pub type TxHistoryResult = Result>; pub type RawTransactionResult = Result>; pub type RawTransactionFut<'a> = - Box> + Send + 'a>; +Box> + Send + 'a>; pub type RefundResult = Result>; /// Helper type used for swap transactions' spend preimage generation result pub type GenPreimageResult = MmResult, TxGenError>; @@ -363,7 +371,7 @@ impl HttpStatusCode for RawTransactionError { match self { RawTransactionError::InternalError(_) | RawTransactionError::SigningError(_) => { StatusCode::INTERNAL_SERVER_ERROR - }, + } RawTransactionError::NoSuchCoin { .. } | RawTransactionError::InvalidHashError(_) | RawTransactionError::HashNotExist(_) @@ -412,7 +420,7 @@ impl HttpStatusCode for GetMyAddressError { | GetMyAddressError::InvalidRequest(_) => StatusCode::BAD_REQUEST, GetMyAddressError::Internal(_) | GetMyAddressError::GetEthAddressError(_) => { StatusCode::INTERNAL_SERVER_ERROR - }, + } } } } @@ -529,8 +537,8 @@ pub enum PrivKeyPolicyNotAllowed { impl Serialize for PrivKeyPolicyNotAllowed { fn serialize(&self, serializer: S) -> Result - where - S: Serializer, + where + S: Serializer, { serializer.serialize_str(&self.to_string()) } @@ -829,10 +837,10 @@ impl<'a> SwapTxTypeWithSecretHash<'a> { match self { SwapTxTypeWithSecretHash::TakerOrMakerPayment { maker_secret_hash } => { payment_script(time_lock, maker_secret_hash, my_public, other_public) - }, + } SwapTxTypeWithSecretHash::TakerFunding { taker_secret_hash } => { swap_proto_v2_scripts::taker_funding_script(time_lock, taker_secret_hash, my_public, other_public) - }, + } SwapTxTypeWithSecretHash::MakerPaymentV2 { maker_secret_hash, taker_secret_hash, @@ -845,7 +853,7 @@ impl<'a> SwapTxTypeWithSecretHash<'a> { ), SwapTxTypeWithSecretHash::TakerPaymentV2 { maker_secret_hash } => { swap_proto_v2_scripts::taker_payment_script(time_lock, maker_secret_hash, my_public, other_public) - }, + } } } @@ -1050,7 +1058,7 @@ pub trait SwapOps { fn check_if_my_payment_sent( &self, if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, - ) -> Box, Error = String> + Send>; + ) -> Box, Error=String> + Send>; async fn search_for_swap_tx_spend_my( &self, @@ -1074,7 +1082,7 @@ pub trait SwapOps { /// Whether the refund transaction can be sent now /// For example: there are no additional conditions for ETH, but for some UTXO coins we should wait for /// locktime < MTP - fn can_refund_htlc(&self, locktime: u64) -> Box + Send + '_> { + fn can_refund_htlc(&self, locktime: u64) -> Box + Send + '_> { let now = now_sec(); let result = if now > locktime { CanRefundHtlc::CanRefundNow @@ -1663,9 +1671,9 @@ pub trait MakerNftSwapOpsV2: ParseCoinAssocTypes + ParseNftAssocTypes + Send + S pub enum WaitForTakerPaymentSpendError { /// Timeout error variant, indicating that the wait for taker payment spend has timed out. #[display( - fmt = "Timed out waiting for taker payment spend, wait_until {}, now {}", - wait_until, - now + fmt = "Timed out waiting for taker payment spend, wait_until {}, now {}", + wait_until, + now )] Timeout { /// The timestamp until which the wait was expected to complete. @@ -1683,10 +1691,10 @@ impl From for WaitForTakerPaymentSpendError { match err { WaitForOutputSpendErr::Timeout { wait_until, now } => { WaitForTakerPaymentSpendError::Timeout { wait_until, now } - }, + } WaitForOutputSpendErr::NoOutputWithIndex(index) => { WaitForTakerPaymentSpendError::InvalidInputTx(format!("Tx doesn't have output with index {}", index)) - }, + } } } } @@ -1714,13 +1722,13 @@ impl fmt::Debug for FundingTxSpend { match self { FundingTxSpend::RefundedTimelock(tx) => { write!(f, "RefundedTimelock({:?})", tx) - }, + } FundingTxSpend::RefundedSecret { tx, secret: _ } => { write!(f, "RefundedSecret {{ tx: {:?} }}", tx) - }, + } FundingTxSpend::TransferredToTakerPayment(tx) => { write!(f, "TransferredToTakerPayment({:?})", tx) - }, + } } } } @@ -1864,21 +1872,21 @@ pub trait MarketCoinOps { fn platform_ticker(&self) -> &str; /// Receives raw transaction bytes in hexadecimal format as input and returns tx hash in hexadecimal format - fn send_raw_tx(&self, tx: &str) -> Box + Send>; + fn send_raw_tx(&self, tx: &str) -> Box + Send>; /// Receives raw transaction bytes as input and returns tx hash in hexadecimal format - fn send_raw_tx_bytes(&self, tx: &[u8]) -> Box + Send>; + fn send_raw_tx_bytes(&self, tx: &[u8]) -> Box + Send>; /// Signs raw utxo transaction in hexadecimal format as input and returns signed transaction in hexadecimal format async fn sign_raw_tx(&self, args: &SignRawTransactionRequest) -> RawTransactionResult; - fn wait_for_confirmations(&self, input: ConfirmPaymentInput) -> Box + Send>; + fn wait_for_confirmations(&self, input: ConfirmPaymentInput) -> Box + Send>; fn wait_for_htlc_tx_spend(&self, args: WaitForHTLCTxSpendArgs<'_>) -> TransactionFut; fn tx_enum_from_bytes(&self, bytes: &[u8]) -> Result>; - fn current_block(&self) -> Box + Send>; + fn current_block(&self) -> Box + Send>; fn display_priv_key(&self) -> Result; @@ -2064,10 +2072,10 @@ pub enum TxFeeDetails { Slp(SlpFeeDetails), Tendermint(TendermintFeeDetails), #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] Solana(SolanaFeeDetails), } @@ -2075,8 +2083,8 @@ pub enum TxFeeDetails { /// Deserialize the TxFeeDetails as an untagged enum. impl<'de> Deserialize<'de> for TxFeeDetails { fn deserialize(deserializer: D) -> Result>::Error> - where - D: Deserializer<'de>, + where + D: Deserializer<'de>, { #[derive(Deserialize)] #[serde(untagged)] @@ -2085,10 +2093,10 @@ impl<'de> Deserialize<'de> for TxFeeDetails { Eth(EthTxFeeDetails), Qrc20(Qrc20FeeDetails), #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] Solana(SolanaFeeDetails), Tendermint(TendermintFeeDetails), @@ -2099,10 +2107,10 @@ impl<'de> Deserialize<'de> for TxFeeDetails { TxFeeDetailsUnTagged::Eth(f) => Ok(TxFeeDetails::Eth(f)), TxFeeDetailsUnTagged::Qrc20(f) => Ok(TxFeeDetails::Qrc20(f)), #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] TxFeeDetailsUnTagged::Solana(f) => Ok(TxFeeDetails::Solana(f)), TxFeeDetailsUnTagged::Tendermint(f) => Ok(TxFeeDetails::Tendermint(f)), @@ -2123,10 +2131,10 @@ impl From for TxFeeDetails { } #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") +feature = "enable-solana", +not(target_os = "ios"), +not(target_os = "android"), +not(target_arch = "wasm32") ))] impl From for TxFeeDetails { fn from(solana_details: SolanaFeeDetails) -> Self { TxFeeDetails::Solana(solana_details) } @@ -2305,10 +2313,10 @@ pub enum TradePreimageValue { #[derive(Debug, Display, EnumFromStringify, PartialEq)] pub enum TradePreimageError { #[display( - fmt = "Not enough {} to preimage the trade: available {}, required at least {}", - coin, - available, - required + fmt = "Not enough {} to preimage the trade: available {}, required at least {}", + coin, + available, + required )] NotSufficientBalance { coin: String, @@ -2342,7 +2350,7 @@ impl TradePreimageError { available: BigDecimal::from(0), required, } - }, + } GenerateTxError::EmptyOutputs => TradePreimageError::InternalError(gen_tx_err.to_string()), GenerateTxError::OutputValueLessThanDust { value, dust } => { if is_upper_bound { @@ -2366,7 +2374,7 @@ impl TradePreimageError { let threshold = big_decimal_from_sat_unsigned(dust, decimals); TradePreimageError::AmountIsTooSmall { amount, threshold } } - }, + } GenerateTxError::DeductFeeFromOutputFailed { output_value, required, .. } => { @@ -2377,7 +2385,7 @@ impl TradePreimageError { available, required, } - }, + } GenerateTxError::NotEnoughUtxos { sum_utxos, required } => { let available = big_decimal_from_sat_unsigned(sum_utxos, decimals); let required = big_decimal_from_sat_unsigned(required, decimals); @@ -2386,7 +2394,7 @@ impl TradePreimageError { available, required, } - }, + } GenerateTxError::Transport(e) => TradePreimageError::Transport(e), GenerateTxError::Internal(e) => TradePreimageError::InternalError(e), } @@ -2459,7 +2467,7 @@ impl From for StakingInfosError { match e { UtxoRpcError::Transport(rpc) | UtxoRpcError::ResponseParseError(rpc) => { StakingInfosError::Transport(rpc.to_string()) - }, + } UtxoRpcError::InvalidResponse(error) => StakingInfosError::Transport(error), UtxoRpcError::Internal(error) => StakingInfosError::Internal(error), } @@ -2472,7 +2480,7 @@ impl From for StakingInfosError { Qrc20AddressError::UnexpectedDerivationMethod(e) => StakingInfosError::UnexpectedDerivationMethod(e), Qrc20AddressError::ScriptHashTypeNotSupported { script_hash_type } => { StakingInfosError::Internal(format!("Script hash type '{}' is not supported", script_hash_type)) - }, + } } } } @@ -2501,10 +2509,10 @@ impl From for StakingInfosError { #[serde(tag = "error_type", content = "error_data")] pub enum DelegationError { #[display( - fmt = "Not enough {} to delegate: available {}, required at least {}", - coin, - available, - required + fmt = "Not enough {} to delegate: available {}, required at least {}", + coin, + available, + required )] NotSufficientBalance { coin: String, @@ -2538,7 +2546,7 @@ impl From for DelegationError { match e { UtxoRpcError::Transport(transport) | UtxoRpcError::ResponseParseError(transport) => { DelegationError::Transport(transport.to_string()) - }, + } UtxoRpcError::InvalidResponse(resp) => DelegationError::Transport(resp), UtxoRpcError::Internal(internal) => DelegationError::InternalError(internal), } @@ -2550,12 +2558,12 @@ impl From for DelegationError { match e { StakingInfosError::CoinDoesntSupportStakingInfos { coin } => { DelegationError::CoinDoesntSupportDelegation { coin } - }, + } StakingInfosError::NoSuchCoin { coin } => DelegationError::NoSuchCoin { coin }, StakingInfosError::Transport(e) => DelegationError::Transport(e), StakingInfosError::UnexpectedDerivationMethod(reason) => { DelegationError::DelegationOpsNotSupported { reason } - }, + } StakingInfosError::Internal(e) => DelegationError::InternalError(e), } } @@ -2575,7 +2583,7 @@ impl From for DelegationError { BalanceError::Transport(error) | BalanceError::InvalidResponse(error) => DelegationError::Transport(error), BalanceError::UnexpectedDerivationMethod(e) => { DelegationError::DelegationOpsNotSupported { reason: e.to_string() } - }, + } e @ BalanceError::WalletStorageError(_) => DelegationError::InternalError(e.to_string()), BalanceError::Internal(internal) => DelegationError::InternalError(internal), } @@ -2619,13 +2627,13 @@ impl DelegationError { available: BigDecimal::from(0), required, } - }, + } GenerateTxError::EmptyOutputs => DelegationError::InternalError(gen_tx_err.to_string()), GenerateTxError::OutputValueLessThanDust { value, dust } => { let amount = big_decimal_from_sat_unsigned(value, decimals); let threshold = big_decimal_from_sat_unsigned(dust, decimals); DelegationError::AmountTooLow { amount, threshold } - }, + } GenerateTxError::DeductFeeFromOutputFailed { output_value, required, .. } => { @@ -2636,7 +2644,7 @@ impl DelegationError { available, required, } - }, + } GenerateTxError::NotEnoughUtxos { sum_utxos, required } => { let available = big_decimal_from_sat_unsigned(sum_utxos, decimals); let required = big_decimal_from_sat_unsigned(required, decimals); @@ -2645,7 +2653,7 @@ impl DelegationError { available, required, } - }, + } GenerateTxError::Transport(e) => DelegationError::Transport(e), GenerateTxError::Internal(e) => DelegationError::InternalError(e), } @@ -2656,17 +2664,17 @@ impl DelegationError { #[serde(tag = "error_type", content = "error_data")] pub enum WithdrawError { #[display( - fmt = "'{}' coin doesn't support 'init_withdraw' yet. Consider using 'withdraw' request instead", - coin + fmt = "'{}' coin doesn't support 'init_withdraw' yet. Consider using 'withdraw' request instead", + coin )] CoinDoesntSupportInitWithdraw { coin: String, }, #[display( - fmt = "Not enough {} to withdraw: available {}, required at least {}", - coin, - available, - required + fmt = "Not enough {} to withdraw: available {}, required at least {}", + coin, + available, + required )] NotSufficientBalance { coin: String, @@ -2674,10 +2682,10 @@ pub enum WithdrawError { required: BigDecimal, }, #[display( - fmt = "Not enough {} to afford fee. Available {}, required at least {}", - coin, - available, - required + fmt = "Not enough {} to afford fee. Available {}, required at least {}", + coin, + available, + required )] NotSufficientPlatformBalanceForFee { coin: String, @@ -2724,10 +2732,10 @@ pub enum WithdrawError { Transport(String), #[from_trait(WithInternal::internal)] #[from_stringify( - "MyAddressError", - "NumConversError", - "UnexpectedDerivationMethod", - "PrivKeyPolicyNotAllowed" + "MyAddressError", + "NumConversError", + "UnexpectedDerivationMethod", + "PrivKeyPolicyNotAllowed" )] #[display(fmt = "Internal error: {}", _0)] InternalError(String), @@ -2743,11 +2751,11 @@ pub enum WithdrawError { ActionNotAllowed(String), GetNftInfoError(GetNftInfoError), #[display( - fmt = "Not enough NFTs amount with token_address: {} and token_id {}. Available {}, required {}", - token_address, - token_id, - available, - required + fmt = "Not enough NFTs amount with token_address: {} and token_id {}. Available {}, required {}", + token_address, + token_id, + available, + required )] NotEnoughNftsAmount { token_address: String, @@ -2795,7 +2803,7 @@ impl HttpStatusCode for WithdrawError { WithdrawError::BroadcastExpected(_) => StatusCode::BAD_REQUEST, WithdrawError::InternalError(_) | WithdrawError::DbError(_) | WithdrawError::NftProtocolNotSupported => { StatusCode::INTERNAL_SERVER_ERROR - }, + } WithdrawError::Transport(_) => StatusCode::BAD_GATEWAY, } } @@ -2836,7 +2844,7 @@ impl From for WithdrawError { match e { GetValidEthWithdrawAddError::CoinDoesntSupportNftWithdraw { coin } => { WithdrawError::CoinDoesntSupportNftWithdraw { coin } - }, + } GetValidEthWithdrawAddError::InvalidAddress(e) => WithdrawError::InvalidAddress(e), } } @@ -2871,13 +2879,13 @@ impl WithdrawError { available: BigDecimal::from(0), required, } - }, + } GenerateTxError::EmptyOutputs => WithdrawError::InternalError(gen_tx_err.to_string()), GenerateTxError::OutputValueLessThanDust { value, dust } => { let amount = big_decimal_from_sat_unsigned(value, decimals); let threshold = big_decimal_from_sat_unsigned(dust, decimals); WithdrawError::AmountTooLow { amount, threshold } - }, + } GenerateTxError::DeductFeeFromOutputFailed { output_value, required, .. } => { @@ -2888,7 +2896,7 @@ impl WithdrawError { available, required, } - }, + } GenerateTxError::NotEnoughUtxos { sum_utxos, required } => { let available = big_decimal_from_sat_unsigned(sum_utxos, decimals); let required = big_decimal_from_sat_unsigned(required, decimals); @@ -2897,7 +2905,7 @@ impl WithdrawError { available, required, } - }, + } GenerateTxError::Transport(e) => WithdrawError::Transport(e), GenerateTxError::Internal(e) => WithdrawError::InternalError(e), } @@ -2968,10 +2976,10 @@ impl From for VerificationError { match e { FromBase58Error::InvalidBase58Character(c, _) => { VerificationError::AddressDecodingError(format!("Invalid Base58 Character: {}", c)) - }, + } FromBase58Error::InvalidBase58Length => { VerificationError::AddressDecodingError(String::from("Invalid Base58 Length")) - }, + } } } } @@ -2979,7 +2987,7 @@ impl From for VerificationError { /// NB: Implementations are expected to follow the pImpl idiom, providing cheap reference-counted cloning and garbage collection. #[async_trait] pub trait MmCoin: - SwapOps + TakerSwapMakerCoin + MakerSwapTakerCoin + WatcherOps + MarketCoinOps + Send + Sync + 'static +SwapOps + TakerSwapMakerCoin + MakerSwapTakerCoin + WatcherOps + MarketCoinOps + Send + Sync + 'static { // `MmCoin` is an extension fulcrum for something that doesn't fit the `MarketCoinOps`. Practical examples: // name (might be required for some APIs, CoinMarketCap for instance); @@ -3017,7 +3025,7 @@ pub trait MmCoin: fn validate_address(&self, address: &str) -> ValidateAddressResult; /// Loop collecting coin transaction history and saving it to local DB - fn process_history_loop(&self, ctx: MmArc) -> Box + Send>; + fn process_history_loop(&self, ctx: MmArc) -> Box + Send>; fn account_db_id(&self) -> Option { None } @@ -3067,7 +3075,7 @@ pub trait MmCoin: fn history_sync_status(&self) -> HistorySyncState; /// Get fee to be paid per 1 swap transaction - fn get_trade_fee(&self) -> Box + Send>; + fn get_trade_fee(&self) -> Box + Send>; /// Get fee to be paid by sender per whole swap using the sending value and check if the wallet has sufficient balance to pay the fee. async fn get_sender_trade_fee( @@ -3148,8 +3156,8 @@ impl CoinFutSpawner { impl SpawnFuture for CoinFutSpawner { fn spawn(&self, f: F) - where - F: Future03 + Send + 'static, + where + F: Future03 + Send + 'static, { self.inner.spawn(f) } @@ -3157,8 +3165,8 @@ impl SpawnFuture for CoinFutSpawner { impl SpawnAbortable for CoinFutSpawner { fn spawn_with_settings(&self, fut: F, settings: AbortSettings) - where - F: Future03 + Send + 'static, + where + F: Future03 + Send + 'static, { self.inner.spawn_with_settings(fut, settings) } @@ -3177,17 +3185,17 @@ pub enum MmCoinEnum { Tendermint(TendermintCoin), TendermintToken(TendermintToken), #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] SolanaCoin(SolanaCoin), #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] SplToken(SplToken), #[cfg(not(target_arch = "wasm32"))] @@ -3208,20 +3216,20 @@ impl From for MmCoinEnum { } #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") +feature = "enable-solana", +not(target_os = "ios"), +not(target_os = "android"), +not(target_arch = "wasm32") ))] impl From for MmCoinEnum { fn from(c: SolanaCoin) -> MmCoinEnum { MmCoinEnum::SolanaCoin(c) } } #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") +feature = "enable-solana", +not(target_os = "ios"), +not(target_os = "android"), +not(target_arch = "wasm32") ))] impl From for MmCoinEnum { fn from(c: SplToken) -> MmCoinEnum { MmCoinEnum::SplToken(c) } @@ -3278,17 +3286,17 @@ impl Deref for MmCoinEnum { MmCoinEnum::ZCoin(ref c) => c, MmCoinEnum::Test(ref c) => c, #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] MmCoinEnum::SolanaCoin(ref c) => c, #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] MmCoinEnum::SplToken(ref c) => c, } @@ -3464,7 +3472,7 @@ impl CoinsContext { #[cfg(target_arch = "wasm32")] tx_history_db: ConstructibleDb::new(ctx, None).into_shared(), #[cfg(target_arch = "wasm32")] - hd_wallet_db: ConstructibleDb::new_shared_db(ctx, None).into_shared(), + hd_wallet_db: ConstructibleDb::new_shared_db(ctx,).into_shared(), }) }))) } @@ -4017,14 +4025,14 @@ pub async fn lp_coininit(ctx: &MmArc, ticker: &str, req: &Json) -> Result { let params = try_s!(UtxoActivationParams::from_legacy_req(req)); try_s!(utxo_standard_coin_with_policy(ctx, ticker, &coins_en, ¶ms, priv_key_policy).await).into() - }, + } CoinProtocol::QTUM => { let params = try_s!(UtxoActivationParams::from_legacy_req(req)); try_s!(qtum_coin_with_policy(ctx, ticker, &coins_en, ¶ms, priv_key_policy).await).into() - }, + } CoinProtocol::ETH | CoinProtocol::ERC20 { .. } => { try_s!(eth_coin_from_conf_and_request(ctx, ticker, &coins_en, req, protocol, priv_key_policy).await).into() - }, + } CoinProtocol::QRC20 { platform, contract_address, @@ -4044,15 +4052,15 @@ pub async fn lp_coininit(ctx: &MmArc, ticker: &str, req: &Json) -> Result { let prefix = try_s!(CashAddrPrefix::from_str(slp_prefix)); let params = try_s!(BchActivationRequest::from_legacy_req(req)); let bch = try_s!(bch_coin_with_policy(ctx, ticker, &coins_en, params, prefix, priv_key_policy).await); bch.into() - }, + } CoinProtocol::SLPTOKEN { platform, token_id, @@ -4075,7 +4083,7 @@ pub async fn lp_coininit(ctx: &MmArc, ticker: &str, req: &Json) -> Result return ERR!("TENDERMINT protocol is not supported by lp_coininit"), CoinProtocol::TENDERMINTTOKEN(_) => return ERR!("TENDERMINTTOKEN protocol is not supported by lp_coininit"), CoinProtocol::ZHTLC { .. } => return ERR!("ZHTLC protocol is not supported by lp_coininit"), @@ -4085,11 +4093,11 @@ pub async fn lp_coininit(ctx: &MmArc, ticker: &str, req: &Json) -> Result { return ERR!("Solana protocol is not supported by lp_coininit - use enable_solana_with_tokens instead"); - }, + } #[cfg(all(feature = "enable-solana", not(target_arch = "wasm32")))] CoinProtocol::SPLTOKEN { .. } => { return ERR!("SplToken protocol is not supported by lp_coininit - use enable_spl instead"); - }, + } }; let register_params = RegisterCoinParams { @@ -4133,7 +4141,7 @@ pub async fn lp_register_coin( match coins.raw_entry_mut().from_key(&ticker) { RawEntryMut::Occupied(_oe) => { return MmError::err(RegisterCoinError::CoinIsInitializedAlready { coin: ticker.clone() }); - }, + } RawEntryMut::Vacant(ve) => ve.insert(ticker.clone(), MmCoinStruct::new(coin.clone())), }; @@ -4347,7 +4355,7 @@ pub async fn remove_delegation(ctx: MmArc, req: RemoveDelegateRequest) -> Delega return MmError::err(DelegationError::CoinDoesntSupportDelegation { coin: coin.ticker().to_string(), }); - }, + } } } @@ -4359,7 +4367,7 @@ pub async fn get_staking_infos(ctx: MmArc, req: GetStakingInfosRequest) -> Staki return MmError::err(StakingInfosError::CoinDoesntSupportStakingInfos { coin: coin.ticker().to_string(), }); - }, + } } } @@ -4372,7 +4380,7 @@ pub async fn add_delegation(ctx: MmArc, req: AddDelegateRequest) -> DelegationRe return MmError::err(DelegationError::CoinDoesntSupportDelegation { coin: coin.ticker().to_string(), }); - }, + } }; match req.staking_details { StakingDetails::Qtum(qtum_staking) => coin_concrete.add_delegation(qtum_staking).compat().await, @@ -4436,7 +4444,7 @@ pub async fn my_tx_history(ctx: MmArc, req: Json) -> Result>, S .position(|item| item.internal_id == *id) .ok_or(format!("from_id {:02x} is not found", id))) + 1 - }, + } None => match request.page_number { Some(page_n) => (page_n.get() - 1) * request.limit, None => 0, @@ -4612,7 +4620,7 @@ pub fn update_coins_config(mut config: Json) -> Result { contract_address, } } - }, + } _ => CoinProtocol::UTXO, }; @@ -4661,7 +4669,7 @@ pub fn address_by_coin_conf_and_pubkey_str( CoinProtocol::ERC20 { .. } | CoinProtocol::ETH | CoinProtocol::NFT { .. } => eth::addr_from_pubkey_str(pubkey), CoinProtocol::UTXO | CoinProtocol::QTUM | CoinProtocol::QRC20 { .. } | CoinProtocol::BCH { .. } => { utxo::address_by_conf_and_pubkey_str(coin, conf, pubkey, addr_format) - }, + } CoinProtocol::SLPTOKEN { platform, .. } => { let platform_conf = coin_conf(ctx, &platform); if platform_conf.is_null() { @@ -4672,10 +4680,10 @@ pub fn address_by_coin_conf_and_pubkey_str( match platform_protocol { CoinProtocol::BCH { slp_prefix } => { slp_addr_from_pubkey_str(pubkey, &slp_prefix).map_err(|e| ERRL!("{}", e)) - }, + } _ => ERR!("Platform protocol {:?} is not BCH", platform_protocol), } - }, + } CoinProtocol::TENDERMINT(protocol) => tendermint::account_id_from_pubkey_hex(&protocol.account_prefix, pubkey) .map(|id| id.to_string()) .map_err(|e| e.to_string()), @@ -4691,26 +4699,26 @@ pub fn address_by_coin_conf_and_pubkey_str( tendermint::account_id_from_pubkey_hex(&platform.account_prefix, pubkey) .map(|id| id.to_string()) .map_err(|e| e.to_string()) - }, + } _ => ERR!("Platform protocol {:?} is not TENDERMINT", platform_protocol), } - }, + } #[cfg(not(target_arch = "wasm32"))] CoinProtocol::LIGHTNING { .. } => { ERR!("address_by_coin_conf_and_pubkey_str is not implemented for lightning protocol yet!") - }, + } #[cfg(all(feature = "enable-solana", not(target_arch = "wasm32")))] CoinProtocol::SOLANA | CoinProtocol::SPLTOKEN { .. } => { ERR!("Solana pubkey is the public address - you do not need to use this rpc call.") - }, + } CoinProtocol::ZHTLC { .. } => ERR!("address_by_coin_conf_and_pubkey_str is not supported for ZHTLC protocol!"), } } #[cfg(target_arch = "wasm32")] fn load_history_from_file_impl(coin: &T, ctx: &MmArc) -> TxHistoryFut> -where - T: MmCoin + ?Sized, + where + T: MmCoin + ?Sized, { let ctx = ctx.clone(); let ticker = coin.ticker().to_owned(); @@ -4742,8 +4750,8 @@ where #[cfg(not(target_arch = "wasm32"))] fn load_history_from_file_impl(coin: &T, ctx: &MmArc) -> TxHistoryFut> -where - T: MmCoin + ?Sized, + where + T: MmCoin + ?Sized, { let ticker = coin.ticker().to_owned(); let history_path = coin.tx_history_path(ctx); @@ -4754,7 +4762,7 @@ where Ok(content) => content, Err(err) if err.kind() == io::ErrorKind::NotFound => { return Ok(Vec::new()); - }, + } Err(err) => { let error = format!( "Error '{}' reading from the history file {}", @@ -4762,7 +4770,7 @@ where history_path.display() ); return MmError::err(TxHistoryError::ErrorLoading(error)); - }, + } }; let serde_err = match json::from_slice(&content) { Ok(txs) => return Ok(txs), @@ -4784,8 +4792,8 @@ where #[cfg(target_arch = "wasm32")] fn save_history_to_file_impl(coin: &T, ctx: &MmArc, mut history: Vec) -> TxHistoryFut<()> -where - T: MmCoin + MarketCoinOps + ?Sized, + where + T: MmCoin + MarketCoinOps + ?Sized, { let ctx = ctx.clone(); let ticker = coin.ticker().to_owned(); @@ -4805,8 +4813,8 @@ where #[cfg(not(target_arch = "wasm32"))] fn get_tx_history_migration_impl(coin: &T, ctx: &MmArc) -> TxHistoryFut -where - T: MmCoin + MarketCoinOps + ?Sized, + where + T: MmCoin + MarketCoinOps + ?Sized, { let migration_path = coin.tx_migration_path(ctx); @@ -4820,7 +4828,7 @@ where } else { 0 } - }, + } Err(_) => 0, }; @@ -4832,8 +4840,8 @@ where #[cfg(not(target_arch = "wasm32"))] fn update_migration_file_impl(coin: &T, ctx: &MmArc, migration_number: u64) -> TxHistoryFut<()> -where - T: MmCoin + MarketCoinOps + ?Sized, + where + T: MmCoin + MarketCoinOps + ?Sized, { let migration_path = coin.tx_migration_path(ctx); let tmp_file = format!("{}.tmp", migration_path.display()); @@ -4860,8 +4868,8 @@ where #[cfg(not(target_arch = "wasm32"))] fn save_history_to_file_impl(coin: &T, ctx: &MmArc, mut history: Vec) -> TxHistoryFut<()> -where - T: MmCoin + MarketCoinOps + ?Sized, + where + T: MmCoin + MarketCoinOps + ?Sized, { let history_path = coin.tx_history_path(ctx); let tmp_file = format!("{}.tmp", history_path.display()); @@ -4905,8 +4913,8 @@ impl TxIdHeight { } pub(crate) fn compare_transactions(a: TxIdHeight, b: TxIdHeight) -> Ordering -where - Id: Ord, + where + Id: Ord, { // the transactions with block_height == 0 are the most recent so we need to separately handle them while sorting if a.block_height == b.block_height { @@ -4946,7 +4954,7 @@ pub async fn get_my_address(ctx: MmArc, req: MyAddressReq) -> MmResult ConstructibleDb { /// Creates a new uninitialized `Db` instance shared between Iguana and all HD accounts /// derived from the same passphrase. /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. - pub fn new_shared_db(ctx: &MmArc, db_id: Option<&str>) -> Self { - let rmd = hex::encode(ctx.shared_db_id().as_slice()); - let db_id = db_id.unwrap_or(&rmd); + pub fn new_shared_db(ctx: &MmArc) -> Self { + let db_id = hex::encode(ctx.shared_db_id().as_slice()); ConstructibleDb { mutex: AsyncMutex::new(None), db_namespace: ctx.db_namespace, db_id: AsyncMutex::new(Some(db_id.to_string())), - default_db_id: rmd, + default_db_id: db_id, } } diff --git a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs index 5cc2395e71..7f251a1337 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs @@ -32,10 +32,10 @@ impl From for AccountStorageError { DbTransactionError::ErrorSerializingItem(_) => AccountStorageError::ErrorSerializing(desc), DbTransactionError::ErrorGettingItems(_) | DbTransactionError::ErrorCountingItems(_) => { AccountStorageError::ErrorLoading(desc) - }, + } DbTransactionError::ErrorUploadingItem(_) | DbTransactionError::ErrorDeletingItems(_) => { AccountStorageError::ErrorSaving(desc) - }, + } } } } @@ -63,7 +63,7 @@ pub(crate) struct WasmAccountStorage { impl WasmAccountStorage { pub fn new(ctx: &MmArc) -> Self { WasmAccountStorage { - account_db: ConstructibleDb::new_shared_db(ctx, None).into_shared(), + account_db: ConstructibleDb::new_shared_db(ctx).into_shared(), } } @@ -166,8 +166,8 @@ impl WasmAccountStorage { /// Loads an account by `AccountId`, applies the given `f` function to it, /// and uploads changes to the storage. async fn update_account(&self, account_id: AccountId, f: F) -> AccountStorageResult<()> - where - F: FnOnce(&mut AccountTable), + where + F: FnOnce(&mut AccountTable), { let locked_db = self.lock_db_mutex().await?; let transaction = locked_db.inner.transaction().await?; @@ -331,7 +331,7 @@ impl AccountStorage for WasmAccountStorage { account.activated_coins.remove(ticker); } }) - .await + .await } } From 4e7ae81b54f455aefdf43f61d9ec84b7b8ed4492 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 24 Apr 2024 05:33:48 +0100 Subject: [PATCH 070/186] clippy & fmt --- mm2src/coins/lp_coins.rs | 404 +++++++++--------- .../src/account/storage/wasm_storage.rs | 10 +- mm2src/mm2_main/src/lp_ordermatch.rs | 285 ++++++------ 3 files changed, 344 insertions(+), 355 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 599197976b..06199e9a23 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -21,10 +21,10 @@ // `mockable` implementation uses these #![allow( -clippy::forget_ref, -clippy::forget_copy, -clippy::swap_ptr_to_ref, -clippy::forget_non_drop + clippy::forget_ref, + clippy::forget_copy, + clippy::swap_ptr_to_ref, + clippy::forget_non_drop )] #![allow(uncommon_codepoints)] #![feature(integer_atomics)] @@ -33,20 +33,13 @@ clippy::forget_non_drop #![feature(stmt_expr_attributes)] #![feature(result_flattening)] -#[macro_use] -extern crate common; -#[macro_use] -extern crate gstuff; -#[macro_use] -extern crate lazy_static; -#[macro_use] -extern crate mm2_metrics; -#[macro_use] -extern crate serde_derive; -#[macro_use] -extern crate serde_json; -#[macro_use] -extern crate ser_error_derive; +#[macro_use] extern crate common; +#[macro_use] extern crate gstuff; +#[macro_use] extern crate lazy_static; +#[macro_use] extern crate mm2_metrics; +#[macro_use] extern crate serde_derive; +#[macro_use] extern crate serde_json; +#[macro_use] extern crate ser_error_derive; use async_trait::async_trait; use base58::FromBase58Error; @@ -147,7 +140,7 @@ macro_rules! try_tx_fus { TransactionEnum::from($tx), ERRL!("{:?}", err), ))); - } + }, } }; } @@ -164,7 +157,7 @@ macro_rules! try_tx_s { line!(), err ))); - } + }, } }; ($e: expr, $tx: expr) => { @@ -175,7 +168,7 @@ macro_rules! try_tx_s { TransactionEnum::from($tx), format!("{}:{}] {:?}", file!(), line!(), err), )); - } + }, } }; } @@ -205,7 +198,7 @@ macro_rules! ok_or_continue_after_sleep { error!("error {:?}", e); Timer::sleep($delay).await; continue; - } + }, } }; } @@ -220,8 +213,7 @@ pub mod hd_confirm_address; pub mod hd_pubkey; pub mod hd_wallet; pub mod hd_wallet_storage; -#[cfg(not(target_arch = "wasm32"))] -pub mod lightning; +#[cfg(not(target_arch = "wasm32"))] pub mod lightning; pub mod lp_price; #[cfg_attr(target_arch = "wasm32", allow(dead_code, unused_imports))] pub mod my_tx_history_v2; @@ -231,10 +223,10 @@ pub mod rpc_command; #[doc(hidden)] #[allow(unused_variables)] #[cfg(all( -feature = "enable-solana", -not(target_os = "ios"), -not(target_os = "android"), -not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] pub mod solana; pub mod tendermint; @@ -265,17 +257,17 @@ use rpc_command::{get_new_address::{GetNewAddressTaskManager, GetNewAddressTaskM init_withdraw::{WithdrawTaskManager, WithdrawTaskManagerShared}}; use script::Script; #[cfg(all( -feature = "enable-solana", -not(target_os = "ios"), -not(target_os = "android"), -not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] pub use solana::spl::SplToken; #[cfg(all( -feature = "enable-solana", -not(target_os = "ios"), -not(target_os = "android"), -not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] pub use solana::{SolanaActivationParams, SolanaCoin, SolanaFeeDetails}; use tendermint::htlc::CustomTendermintMsgType; @@ -294,26 +286,26 @@ use utxo::UtxoActivationParams; use utxo::{BlockchainNetwork, GenerateTxError, UtxoFeeDetails, UtxoTx}; use z_coin::{ZCoin, ZcoinProtocolInfo}; -pub type TransactionFut = Box + Send>; +pub type TransactionFut = Box + Send>; pub type TransactionResult = Result; pub type BalanceResult = Result>; -pub type BalanceFut = Box> + Send>; -pub type NonZeroBalanceFut = Box> + Send>; +pub type BalanceFut = Box> + Send>; +pub type NonZeroBalanceFut = Box> + Send>; pub type NumConversResult = Result>; pub type StakingInfosResult = Result>; -pub type StakingInfosFut = Box> + Send>; +pub type StakingInfosFut = Box> + Send>; pub type DelegationResult = Result>; -pub type DelegationFut = Box> + Send>; +pub type DelegationFut = Box> + Send>; pub type WithdrawResult = Result>; -pub type WithdrawFut = Box> + Send>; +pub type WithdrawFut = Box> + Send>; pub type TradePreimageResult = Result>; -pub type TradePreimageFut = Box> + Send>; +pub type TradePreimageFut = Box> + Send>; pub type CoinFindResult = Result>; -pub type TxHistoryFut = Box> + Send>; +pub type TxHistoryFut = Box> + Send>; pub type TxHistoryResult = Result>; pub type RawTransactionResult = Result>; pub type RawTransactionFut<'a> = -Box> + Send + 'a>; + Box> + Send + 'a>; pub type RefundResult = Result>; /// Helper type used for swap transactions' spend preimage generation result pub type GenPreimageResult = MmResult, TxGenError>; @@ -371,7 +363,7 @@ impl HttpStatusCode for RawTransactionError { match self { RawTransactionError::InternalError(_) | RawTransactionError::SigningError(_) => { StatusCode::INTERNAL_SERVER_ERROR - } + }, RawTransactionError::NoSuchCoin { .. } | RawTransactionError::InvalidHashError(_) | RawTransactionError::HashNotExist(_) @@ -420,7 +412,7 @@ impl HttpStatusCode for GetMyAddressError { | GetMyAddressError::InvalidRequest(_) => StatusCode::BAD_REQUEST, GetMyAddressError::Internal(_) | GetMyAddressError::GetEthAddressError(_) => { StatusCode::INTERNAL_SERVER_ERROR - } + }, } } } @@ -537,8 +529,8 @@ pub enum PrivKeyPolicyNotAllowed { impl Serialize for PrivKeyPolicyNotAllowed { fn serialize(&self, serializer: S) -> Result - where - S: Serializer, + where + S: Serializer, { serializer.serialize_str(&self.to_string()) } @@ -837,10 +829,10 @@ impl<'a> SwapTxTypeWithSecretHash<'a> { match self { SwapTxTypeWithSecretHash::TakerOrMakerPayment { maker_secret_hash } => { payment_script(time_lock, maker_secret_hash, my_public, other_public) - } + }, SwapTxTypeWithSecretHash::TakerFunding { taker_secret_hash } => { swap_proto_v2_scripts::taker_funding_script(time_lock, taker_secret_hash, my_public, other_public) - } + }, SwapTxTypeWithSecretHash::MakerPaymentV2 { maker_secret_hash, taker_secret_hash, @@ -853,7 +845,7 @@ impl<'a> SwapTxTypeWithSecretHash<'a> { ), SwapTxTypeWithSecretHash::TakerPaymentV2 { maker_secret_hash } => { swap_proto_v2_scripts::taker_payment_script(time_lock, maker_secret_hash, my_public, other_public) - } + }, } } @@ -1058,7 +1050,7 @@ pub trait SwapOps { fn check_if_my_payment_sent( &self, if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, - ) -> Box, Error=String> + Send>; + ) -> Box, Error = String> + Send>; async fn search_for_swap_tx_spend_my( &self, @@ -1082,7 +1074,7 @@ pub trait SwapOps { /// Whether the refund transaction can be sent now /// For example: there are no additional conditions for ETH, but for some UTXO coins we should wait for /// locktime < MTP - fn can_refund_htlc(&self, locktime: u64) -> Box + Send + '_> { + fn can_refund_htlc(&self, locktime: u64) -> Box + Send + '_> { let now = now_sec(); let result = if now > locktime { CanRefundHtlc::CanRefundNow @@ -1671,9 +1663,9 @@ pub trait MakerNftSwapOpsV2: ParseCoinAssocTypes + ParseNftAssocTypes + Send + S pub enum WaitForTakerPaymentSpendError { /// Timeout error variant, indicating that the wait for taker payment spend has timed out. #[display( - fmt = "Timed out waiting for taker payment spend, wait_until {}, now {}", - wait_until, - now + fmt = "Timed out waiting for taker payment spend, wait_until {}, now {}", + wait_until, + now )] Timeout { /// The timestamp until which the wait was expected to complete. @@ -1691,10 +1683,10 @@ impl From for WaitForTakerPaymentSpendError { match err { WaitForOutputSpendErr::Timeout { wait_until, now } => { WaitForTakerPaymentSpendError::Timeout { wait_until, now } - } + }, WaitForOutputSpendErr::NoOutputWithIndex(index) => { WaitForTakerPaymentSpendError::InvalidInputTx(format!("Tx doesn't have output with index {}", index)) - } + }, } } } @@ -1722,13 +1714,13 @@ impl fmt::Debug for FundingTxSpend { match self { FundingTxSpend::RefundedTimelock(tx) => { write!(f, "RefundedTimelock({:?})", tx) - } + }, FundingTxSpend::RefundedSecret { tx, secret: _ } => { write!(f, "RefundedSecret {{ tx: {:?} }}", tx) - } + }, FundingTxSpend::TransferredToTakerPayment(tx) => { write!(f, "TransferredToTakerPayment({:?})", tx) - } + }, } } } @@ -1872,21 +1864,21 @@ pub trait MarketCoinOps { fn platform_ticker(&self) -> &str; /// Receives raw transaction bytes in hexadecimal format as input and returns tx hash in hexadecimal format - fn send_raw_tx(&self, tx: &str) -> Box + Send>; + fn send_raw_tx(&self, tx: &str) -> Box + Send>; /// Receives raw transaction bytes as input and returns tx hash in hexadecimal format - fn send_raw_tx_bytes(&self, tx: &[u8]) -> Box + Send>; + fn send_raw_tx_bytes(&self, tx: &[u8]) -> Box + Send>; /// Signs raw utxo transaction in hexadecimal format as input and returns signed transaction in hexadecimal format async fn sign_raw_tx(&self, args: &SignRawTransactionRequest) -> RawTransactionResult; - fn wait_for_confirmations(&self, input: ConfirmPaymentInput) -> Box + Send>; + fn wait_for_confirmations(&self, input: ConfirmPaymentInput) -> Box + Send>; fn wait_for_htlc_tx_spend(&self, args: WaitForHTLCTxSpendArgs<'_>) -> TransactionFut; fn tx_enum_from_bytes(&self, bytes: &[u8]) -> Result>; - fn current_block(&self) -> Box + Send>; + fn current_block(&self) -> Box + Send>; fn display_priv_key(&self) -> Result; @@ -2072,10 +2064,10 @@ pub enum TxFeeDetails { Slp(SlpFeeDetails), Tendermint(TendermintFeeDetails), #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] Solana(SolanaFeeDetails), } @@ -2083,8 +2075,8 @@ pub enum TxFeeDetails { /// Deserialize the TxFeeDetails as an untagged enum. impl<'de> Deserialize<'de> for TxFeeDetails { fn deserialize(deserializer: D) -> Result>::Error> - where - D: Deserializer<'de>, + where + D: Deserializer<'de>, { #[derive(Deserialize)] #[serde(untagged)] @@ -2093,10 +2085,10 @@ impl<'de> Deserialize<'de> for TxFeeDetails { Eth(EthTxFeeDetails), Qrc20(Qrc20FeeDetails), #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] Solana(SolanaFeeDetails), Tendermint(TendermintFeeDetails), @@ -2107,10 +2099,10 @@ impl<'de> Deserialize<'de> for TxFeeDetails { TxFeeDetailsUnTagged::Eth(f) => Ok(TxFeeDetails::Eth(f)), TxFeeDetailsUnTagged::Qrc20(f) => Ok(TxFeeDetails::Qrc20(f)), #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] TxFeeDetailsUnTagged::Solana(f) => Ok(TxFeeDetails::Solana(f)), TxFeeDetailsUnTagged::Tendermint(f) => Ok(TxFeeDetails::Tendermint(f)), @@ -2131,10 +2123,10 @@ impl From for TxFeeDetails { } #[cfg(all( -feature = "enable-solana", -not(target_os = "ios"), -not(target_os = "android"), -not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] impl From for TxFeeDetails { fn from(solana_details: SolanaFeeDetails) -> Self { TxFeeDetails::Solana(solana_details) } @@ -2313,10 +2305,10 @@ pub enum TradePreimageValue { #[derive(Debug, Display, EnumFromStringify, PartialEq)] pub enum TradePreimageError { #[display( - fmt = "Not enough {} to preimage the trade: available {}, required at least {}", - coin, - available, - required + fmt = "Not enough {} to preimage the trade: available {}, required at least {}", + coin, + available, + required )] NotSufficientBalance { coin: String, @@ -2350,7 +2342,7 @@ impl TradePreimageError { available: BigDecimal::from(0), required, } - } + }, GenerateTxError::EmptyOutputs => TradePreimageError::InternalError(gen_tx_err.to_string()), GenerateTxError::OutputValueLessThanDust { value, dust } => { if is_upper_bound { @@ -2374,7 +2366,7 @@ impl TradePreimageError { let threshold = big_decimal_from_sat_unsigned(dust, decimals); TradePreimageError::AmountIsTooSmall { amount, threshold } } - } + }, GenerateTxError::DeductFeeFromOutputFailed { output_value, required, .. } => { @@ -2385,7 +2377,7 @@ impl TradePreimageError { available, required, } - } + }, GenerateTxError::NotEnoughUtxos { sum_utxos, required } => { let available = big_decimal_from_sat_unsigned(sum_utxos, decimals); let required = big_decimal_from_sat_unsigned(required, decimals); @@ -2394,7 +2386,7 @@ impl TradePreimageError { available, required, } - } + }, GenerateTxError::Transport(e) => TradePreimageError::Transport(e), GenerateTxError::Internal(e) => TradePreimageError::InternalError(e), } @@ -2467,7 +2459,7 @@ impl From for StakingInfosError { match e { UtxoRpcError::Transport(rpc) | UtxoRpcError::ResponseParseError(rpc) => { StakingInfosError::Transport(rpc.to_string()) - } + }, UtxoRpcError::InvalidResponse(error) => StakingInfosError::Transport(error), UtxoRpcError::Internal(error) => StakingInfosError::Internal(error), } @@ -2480,7 +2472,7 @@ impl From for StakingInfosError { Qrc20AddressError::UnexpectedDerivationMethod(e) => StakingInfosError::UnexpectedDerivationMethod(e), Qrc20AddressError::ScriptHashTypeNotSupported { script_hash_type } => { StakingInfosError::Internal(format!("Script hash type '{}' is not supported", script_hash_type)) - } + }, } } } @@ -2509,10 +2501,10 @@ impl From for StakingInfosError { #[serde(tag = "error_type", content = "error_data")] pub enum DelegationError { #[display( - fmt = "Not enough {} to delegate: available {}, required at least {}", - coin, - available, - required + fmt = "Not enough {} to delegate: available {}, required at least {}", + coin, + available, + required )] NotSufficientBalance { coin: String, @@ -2546,7 +2538,7 @@ impl From for DelegationError { match e { UtxoRpcError::Transport(transport) | UtxoRpcError::ResponseParseError(transport) => { DelegationError::Transport(transport.to_string()) - } + }, UtxoRpcError::InvalidResponse(resp) => DelegationError::Transport(resp), UtxoRpcError::Internal(internal) => DelegationError::InternalError(internal), } @@ -2558,12 +2550,12 @@ impl From for DelegationError { match e { StakingInfosError::CoinDoesntSupportStakingInfos { coin } => { DelegationError::CoinDoesntSupportDelegation { coin } - } + }, StakingInfosError::NoSuchCoin { coin } => DelegationError::NoSuchCoin { coin }, StakingInfosError::Transport(e) => DelegationError::Transport(e), StakingInfosError::UnexpectedDerivationMethod(reason) => { DelegationError::DelegationOpsNotSupported { reason } - } + }, StakingInfosError::Internal(e) => DelegationError::InternalError(e), } } @@ -2583,7 +2575,7 @@ impl From for DelegationError { BalanceError::Transport(error) | BalanceError::InvalidResponse(error) => DelegationError::Transport(error), BalanceError::UnexpectedDerivationMethod(e) => { DelegationError::DelegationOpsNotSupported { reason: e.to_string() } - } + }, e @ BalanceError::WalletStorageError(_) => DelegationError::InternalError(e.to_string()), BalanceError::Internal(internal) => DelegationError::InternalError(internal), } @@ -2627,13 +2619,13 @@ impl DelegationError { available: BigDecimal::from(0), required, } - } + }, GenerateTxError::EmptyOutputs => DelegationError::InternalError(gen_tx_err.to_string()), GenerateTxError::OutputValueLessThanDust { value, dust } => { let amount = big_decimal_from_sat_unsigned(value, decimals); let threshold = big_decimal_from_sat_unsigned(dust, decimals); DelegationError::AmountTooLow { amount, threshold } - } + }, GenerateTxError::DeductFeeFromOutputFailed { output_value, required, .. } => { @@ -2644,7 +2636,7 @@ impl DelegationError { available, required, } - } + }, GenerateTxError::NotEnoughUtxos { sum_utxos, required } => { let available = big_decimal_from_sat_unsigned(sum_utxos, decimals); let required = big_decimal_from_sat_unsigned(required, decimals); @@ -2653,7 +2645,7 @@ impl DelegationError { available, required, } - } + }, GenerateTxError::Transport(e) => DelegationError::Transport(e), GenerateTxError::Internal(e) => DelegationError::InternalError(e), } @@ -2664,17 +2656,17 @@ impl DelegationError { #[serde(tag = "error_type", content = "error_data")] pub enum WithdrawError { #[display( - fmt = "'{}' coin doesn't support 'init_withdraw' yet. Consider using 'withdraw' request instead", - coin + fmt = "'{}' coin doesn't support 'init_withdraw' yet. Consider using 'withdraw' request instead", + coin )] CoinDoesntSupportInitWithdraw { coin: String, }, #[display( - fmt = "Not enough {} to withdraw: available {}, required at least {}", - coin, - available, - required + fmt = "Not enough {} to withdraw: available {}, required at least {}", + coin, + available, + required )] NotSufficientBalance { coin: String, @@ -2682,10 +2674,10 @@ pub enum WithdrawError { required: BigDecimal, }, #[display( - fmt = "Not enough {} to afford fee. Available {}, required at least {}", - coin, - available, - required + fmt = "Not enough {} to afford fee. Available {}, required at least {}", + coin, + available, + required )] NotSufficientPlatformBalanceForFee { coin: String, @@ -2732,10 +2724,10 @@ pub enum WithdrawError { Transport(String), #[from_trait(WithInternal::internal)] #[from_stringify( - "MyAddressError", - "NumConversError", - "UnexpectedDerivationMethod", - "PrivKeyPolicyNotAllowed" + "MyAddressError", + "NumConversError", + "UnexpectedDerivationMethod", + "PrivKeyPolicyNotAllowed" )] #[display(fmt = "Internal error: {}", _0)] InternalError(String), @@ -2751,11 +2743,11 @@ pub enum WithdrawError { ActionNotAllowed(String), GetNftInfoError(GetNftInfoError), #[display( - fmt = "Not enough NFTs amount with token_address: {} and token_id {}. Available {}, required {}", - token_address, - token_id, - available, - required + fmt = "Not enough NFTs amount with token_address: {} and token_id {}. Available {}, required {}", + token_address, + token_id, + available, + required )] NotEnoughNftsAmount { token_address: String, @@ -2803,7 +2795,7 @@ impl HttpStatusCode for WithdrawError { WithdrawError::BroadcastExpected(_) => StatusCode::BAD_REQUEST, WithdrawError::InternalError(_) | WithdrawError::DbError(_) | WithdrawError::NftProtocolNotSupported => { StatusCode::INTERNAL_SERVER_ERROR - } + }, WithdrawError::Transport(_) => StatusCode::BAD_GATEWAY, } } @@ -2844,7 +2836,7 @@ impl From for WithdrawError { match e { GetValidEthWithdrawAddError::CoinDoesntSupportNftWithdraw { coin } => { WithdrawError::CoinDoesntSupportNftWithdraw { coin } - } + }, GetValidEthWithdrawAddError::InvalidAddress(e) => WithdrawError::InvalidAddress(e), } } @@ -2879,13 +2871,13 @@ impl WithdrawError { available: BigDecimal::from(0), required, } - } + }, GenerateTxError::EmptyOutputs => WithdrawError::InternalError(gen_tx_err.to_string()), GenerateTxError::OutputValueLessThanDust { value, dust } => { let amount = big_decimal_from_sat_unsigned(value, decimals); let threshold = big_decimal_from_sat_unsigned(dust, decimals); WithdrawError::AmountTooLow { amount, threshold } - } + }, GenerateTxError::DeductFeeFromOutputFailed { output_value, required, .. } => { @@ -2896,7 +2888,7 @@ impl WithdrawError { available, required, } - } + }, GenerateTxError::NotEnoughUtxos { sum_utxos, required } => { let available = big_decimal_from_sat_unsigned(sum_utxos, decimals); let required = big_decimal_from_sat_unsigned(required, decimals); @@ -2905,7 +2897,7 @@ impl WithdrawError { available, required, } - } + }, GenerateTxError::Transport(e) => WithdrawError::Transport(e), GenerateTxError::Internal(e) => WithdrawError::InternalError(e), } @@ -2976,10 +2968,10 @@ impl From for VerificationError { match e { FromBase58Error::InvalidBase58Character(c, _) => { VerificationError::AddressDecodingError(format!("Invalid Base58 Character: {}", c)) - } + }, FromBase58Error::InvalidBase58Length => { VerificationError::AddressDecodingError(String::from("Invalid Base58 Length")) - } + }, } } } @@ -2987,7 +2979,7 @@ impl From for VerificationError { /// NB: Implementations are expected to follow the pImpl idiom, providing cheap reference-counted cloning and garbage collection. #[async_trait] pub trait MmCoin: -SwapOps + TakerSwapMakerCoin + MakerSwapTakerCoin + WatcherOps + MarketCoinOps + Send + Sync + 'static + SwapOps + TakerSwapMakerCoin + MakerSwapTakerCoin + WatcherOps + MarketCoinOps + Send + Sync + 'static { // `MmCoin` is an extension fulcrum for something that doesn't fit the `MarketCoinOps`. Practical examples: // name (might be required for some APIs, CoinMarketCap for instance); @@ -3025,7 +3017,7 @@ SwapOps + TakerSwapMakerCoin + MakerSwapTakerCoin + WatcherOps + MarketCoinOps + fn validate_address(&self, address: &str) -> ValidateAddressResult; /// Loop collecting coin transaction history and saving it to local DB - fn process_history_loop(&self, ctx: MmArc) -> Box + Send>; + fn process_history_loop(&self, ctx: MmArc) -> Box + Send>; fn account_db_id(&self) -> Option { None } @@ -3075,7 +3067,7 @@ SwapOps + TakerSwapMakerCoin + MakerSwapTakerCoin + WatcherOps + MarketCoinOps + fn history_sync_status(&self) -> HistorySyncState; /// Get fee to be paid per 1 swap transaction - fn get_trade_fee(&self) -> Box + Send>; + fn get_trade_fee(&self) -> Box + Send>; /// Get fee to be paid by sender per whole swap using the sending value and check if the wallet has sufficient balance to pay the fee. async fn get_sender_trade_fee( @@ -3156,8 +3148,8 @@ impl CoinFutSpawner { impl SpawnFuture for CoinFutSpawner { fn spawn(&self, f: F) - where - F: Future03 + Send + 'static, + where + F: Future03 + Send + 'static, { self.inner.spawn(f) } @@ -3165,8 +3157,8 @@ impl SpawnFuture for CoinFutSpawner { impl SpawnAbortable for CoinFutSpawner { fn spawn_with_settings(&self, fut: F, settings: AbortSettings) - where - F: Future03 + Send + 'static, + where + F: Future03 + Send + 'static, { self.inner.spawn_with_settings(fut, settings) } @@ -3185,17 +3177,17 @@ pub enum MmCoinEnum { Tendermint(TendermintCoin), TendermintToken(TendermintToken), #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] SolanaCoin(SolanaCoin), #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] SplToken(SplToken), #[cfg(not(target_arch = "wasm32"))] @@ -3216,20 +3208,20 @@ impl From for MmCoinEnum { } #[cfg(all( -feature = "enable-solana", -not(target_os = "ios"), -not(target_os = "android"), -not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] impl From for MmCoinEnum { fn from(c: SolanaCoin) -> MmCoinEnum { MmCoinEnum::SolanaCoin(c) } } #[cfg(all( -feature = "enable-solana", -not(target_os = "ios"), -not(target_os = "android"), -not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] impl From for MmCoinEnum { fn from(c: SplToken) -> MmCoinEnum { MmCoinEnum::SplToken(c) } @@ -3286,17 +3278,17 @@ impl Deref for MmCoinEnum { MmCoinEnum::ZCoin(ref c) => c, MmCoinEnum::Test(ref c) => c, #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] MmCoinEnum::SolanaCoin(ref c) => c, #[cfg(all( - feature = "enable-solana", - not(target_os = "ios"), - not(target_os = "android"), - not(target_arch = "wasm32") + feature = "enable-solana", + not(target_os = "ios"), + not(target_os = "android"), + not(target_arch = "wasm32") ))] MmCoinEnum::SplToken(ref c) => c, } @@ -3472,7 +3464,7 @@ impl CoinsContext { #[cfg(target_arch = "wasm32")] tx_history_db: ConstructibleDb::new(ctx, None).into_shared(), #[cfg(target_arch = "wasm32")] - hd_wallet_db: ConstructibleDb::new_shared_db(ctx,).into_shared(), + hd_wallet_db: ConstructibleDb::new_shared_db(ctx).into_shared(), }) }))) } @@ -4025,14 +4017,14 @@ pub async fn lp_coininit(ctx: &MmArc, ticker: &str, req: &Json) -> Result { let params = try_s!(UtxoActivationParams::from_legacy_req(req)); try_s!(utxo_standard_coin_with_policy(ctx, ticker, &coins_en, ¶ms, priv_key_policy).await).into() - } + }, CoinProtocol::QTUM => { let params = try_s!(UtxoActivationParams::from_legacy_req(req)); try_s!(qtum_coin_with_policy(ctx, ticker, &coins_en, ¶ms, priv_key_policy).await).into() - } + }, CoinProtocol::ETH | CoinProtocol::ERC20 { .. } => { try_s!(eth_coin_from_conf_and_request(ctx, ticker, &coins_en, req, protocol, priv_key_policy).await).into() - } + }, CoinProtocol::QRC20 { platform, contract_address, @@ -4052,15 +4044,15 @@ pub async fn lp_coininit(ctx: &MmArc, ticker: &str, req: &Json) -> Result { let prefix = try_s!(CashAddrPrefix::from_str(slp_prefix)); let params = try_s!(BchActivationRequest::from_legacy_req(req)); let bch = try_s!(bch_coin_with_policy(ctx, ticker, &coins_en, params, prefix, priv_key_policy).await); bch.into() - } + }, CoinProtocol::SLPTOKEN { platform, token_id, @@ -4083,7 +4075,7 @@ pub async fn lp_coininit(ctx: &MmArc, ticker: &str, req: &Json) -> Result return ERR!("TENDERMINT protocol is not supported by lp_coininit"), CoinProtocol::TENDERMINTTOKEN(_) => return ERR!("TENDERMINTTOKEN protocol is not supported by lp_coininit"), CoinProtocol::ZHTLC { .. } => return ERR!("ZHTLC protocol is not supported by lp_coininit"), @@ -4093,11 +4085,11 @@ pub async fn lp_coininit(ctx: &MmArc, ticker: &str, req: &Json) -> Result { return ERR!("Solana protocol is not supported by lp_coininit - use enable_solana_with_tokens instead"); - } + }, #[cfg(all(feature = "enable-solana", not(target_arch = "wasm32")))] CoinProtocol::SPLTOKEN { .. } => { return ERR!("SplToken protocol is not supported by lp_coininit - use enable_spl instead"); - } + }, }; let register_params = RegisterCoinParams { @@ -4141,7 +4133,7 @@ pub async fn lp_register_coin( match coins.raw_entry_mut().from_key(&ticker) { RawEntryMut::Occupied(_oe) => { return MmError::err(RegisterCoinError::CoinIsInitializedAlready { coin: ticker.clone() }); - } + }, RawEntryMut::Vacant(ve) => ve.insert(ticker.clone(), MmCoinStruct::new(coin.clone())), }; @@ -4355,7 +4347,7 @@ pub async fn remove_delegation(ctx: MmArc, req: RemoveDelegateRequest) -> Delega return MmError::err(DelegationError::CoinDoesntSupportDelegation { coin: coin.ticker().to_string(), }); - } + }, } } @@ -4367,7 +4359,7 @@ pub async fn get_staking_infos(ctx: MmArc, req: GetStakingInfosRequest) -> Staki return MmError::err(StakingInfosError::CoinDoesntSupportStakingInfos { coin: coin.ticker().to_string(), }); - } + }, } } @@ -4380,7 +4372,7 @@ pub async fn add_delegation(ctx: MmArc, req: AddDelegateRequest) -> DelegationRe return MmError::err(DelegationError::CoinDoesntSupportDelegation { coin: coin.ticker().to_string(), }); - } + }, }; match req.staking_details { StakingDetails::Qtum(qtum_staking) => coin_concrete.add_delegation(qtum_staking).compat().await, @@ -4444,7 +4436,7 @@ pub async fn my_tx_history(ctx: MmArc, req: Json) -> Result>, S .position(|item| item.internal_id == *id) .ok_or(format!("from_id {:02x} is not found", id))) + 1 - } + }, None => match request.page_number { Some(page_n) => (page_n.get() - 1) * request.limit, None => 0, @@ -4620,7 +4612,7 @@ pub fn update_coins_config(mut config: Json) -> Result { contract_address, } } - } + }, _ => CoinProtocol::UTXO, }; @@ -4669,7 +4661,7 @@ pub fn address_by_coin_conf_and_pubkey_str( CoinProtocol::ERC20 { .. } | CoinProtocol::ETH | CoinProtocol::NFT { .. } => eth::addr_from_pubkey_str(pubkey), CoinProtocol::UTXO | CoinProtocol::QTUM | CoinProtocol::QRC20 { .. } | CoinProtocol::BCH { .. } => { utxo::address_by_conf_and_pubkey_str(coin, conf, pubkey, addr_format) - } + }, CoinProtocol::SLPTOKEN { platform, .. } => { let platform_conf = coin_conf(ctx, &platform); if platform_conf.is_null() { @@ -4680,10 +4672,10 @@ pub fn address_by_coin_conf_and_pubkey_str( match platform_protocol { CoinProtocol::BCH { slp_prefix } => { slp_addr_from_pubkey_str(pubkey, &slp_prefix).map_err(|e| ERRL!("{}", e)) - } + }, _ => ERR!("Platform protocol {:?} is not BCH", platform_protocol), } - } + }, CoinProtocol::TENDERMINT(protocol) => tendermint::account_id_from_pubkey_hex(&protocol.account_prefix, pubkey) .map(|id| id.to_string()) .map_err(|e| e.to_string()), @@ -4699,26 +4691,26 @@ pub fn address_by_coin_conf_and_pubkey_str( tendermint::account_id_from_pubkey_hex(&platform.account_prefix, pubkey) .map(|id| id.to_string()) .map_err(|e| e.to_string()) - } + }, _ => ERR!("Platform protocol {:?} is not TENDERMINT", platform_protocol), } - } + }, #[cfg(not(target_arch = "wasm32"))] CoinProtocol::LIGHTNING { .. } => { ERR!("address_by_coin_conf_and_pubkey_str is not implemented for lightning protocol yet!") - } + }, #[cfg(all(feature = "enable-solana", not(target_arch = "wasm32")))] CoinProtocol::SOLANA | CoinProtocol::SPLTOKEN { .. } => { ERR!("Solana pubkey is the public address - you do not need to use this rpc call.") - } + }, CoinProtocol::ZHTLC { .. } => ERR!("address_by_coin_conf_and_pubkey_str is not supported for ZHTLC protocol!"), } } #[cfg(target_arch = "wasm32")] fn load_history_from_file_impl(coin: &T, ctx: &MmArc) -> TxHistoryFut> - where - T: MmCoin + ?Sized, +where + T: MmCoin + ?Sized, { let ctx = ctx.clone(); let ticker = coin.ticker().to_owned(); @@ -4750,8 +4742,8 @@ fn load_history_from_file_impl(coin: &T, ctx: &MmArc) -> TxHistoryFut(coin: &T, ctx: &MmArc) -> TxHistoryFut> - where - T: MmCoin + ?Sized, +where + T: MmCoin + ?Sized, { let ticker = coin.ticker().to_owned(); let history_path = coin.tx_history_path(ctx); @@ -4762,7 +4754,7 @@ fn load_history_from_file_impl(coin: &T, ctx: &MmArc) -> TxHistoryFut content, Err(err) if err.kind() == io::ErrorKind::NotFound => { return Ok(Vec::new()); - } + }, Err(err) => { let error = format!( "Error '{}' reading from the history file {}", @@ -4770,7 +4762,7 @@ fn load_history_from_file_impl(coin: &T, ctx: &MmArc) -> TxHistoryFut return Ok(txs), @@ -4792,8 +4784,8 @@ fn load_history_from_file_impl(coin: &T, ctx: &MmArc) -> TxHistoryFut(coin: &T, ctx: &MmArc, mut history: Vec) -> TxHistoryFut<()> - where - T: MmCoin + MarketCoinOps + ?Sized, +where + T: MmCoin + MarketCoinOps + ?Sized, { let ctx = ctx.clone(); let ticker = coin.ticker().to_owned(); @@ -4813,8 +4805,8 @@ fn save_history_to_file_impl(coin: &T, ctx: &MmArc, mut history: Vec(coin: &T, ctx: &MmArc) -> TxHistoryFut - where - T: MmCoin + MarketCoinOps + ?Sized, +where + T: MmCoin + MarketCoinOps + ?Sized, { let migration_path = coin.tx_migration_path(ctx); @@ -4828,7 +4820,7 @@ fn get_tx_history_migration_impl(coin: &T, ctx: &MmArc) -> TxHistoryFut } else { 0 } - } + }, Err(_) => 0, }; @@ -4840,8 +4832,8 @@ fn get_tx_history_migration_impl(coin: &T, ctx: &MmArc) -> TxHistoryFut #[cfg(not(target_arch = "wasm32"))] fn update_migration_file_impl(coin: &T, ctx: &MmArc, migration_number: u64) -> TxHistoryFut<()> - where - T: MmCoin + MarketCoinOps + ?Sized, +where + T: MmCoin + MarketCoinOps + ?Sized, { let migration_path = coin.tx_migration_path(ctx); let tmp_file = format!("{}.tmp", migration_path.display()); @@ -4868,8 +4860,8 @@ fn update_migration_file_impl(coin: &T, ctx: &MmArc, migration_number: u64) - #[cfg(not(target_arch = "wasm32"))] fn save_history_to_file_impl(coin: &T, ctx: &MmArc, mut history: Vec) -> TxHistoryFut<()> - where - T: MmCoin + MarketCoinOps + ?Sized, +where + T: MmCoin + MarketCoinOps + ?Sized, { let history_path = coin.tx_history_path(ctx); let tmp_file = format!("{}.tmp", history_path.display()); @@ -4913,8 +4905,8 @@ impl TxIdHeight { } pub(crate) fn compare_transactions(a: TxIdHeight, b: TxIdHeight) -> Ordering - where - Id: Ord, +where + Id: Ord, { // the transactions with block_height == 0 are the most recent so we need to separately handle them while sorting if a.block_height == b.block_height { @@ -4954,7 +4946,7 @@ pub async fn get_my_address(ctx: MmArc, req: MyAddressReq) -> MmResult for AccountStorageError { DbTransactionError::ErrorSerializingItem(_) => AccountStorageError::ErrorSerializing(desc), DbTransactionError::ErrorGettingItems(_) | DbTransactionError::ErrorCountingItems(_) => { AccountStorageError::ErrorLoading(desc) - } + }, DbTransactionError::ErrorUploadingItem(_) | DbTransactionError::ErrorDeletingItems(_) => { AccountStorageError::ErrorSaving(desc) - } + }, } } } @@ -166,8 +166,8 @@ impl WasmAccountStorage { /// Loads an account by `AccountId`, applies the given `f` function to it, /// and uploads changes to the storage. async fn update_account(&self, account_id: AccountId, f: F) -> AccountStorageResult<()> - where - F: FnOnce(&mut AccountTable), + where + F: FnOnce(&mut AccountTable), { let locked_db = self.lock_db_mutex().await?; let transaction = locked_db.inner.transaction().await?; @@ -331,7 +331,7 @@ impl AccountStorage for WasmAccountStorage { account.activated_coins.remove(ticker); } }) - .await + .await } } diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 32c2382ae3..ac0c4866d5 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -25,10 +25,11 @@ use best_orders::BestOrdersAction; use blake2::digest::{Update, VariableOutput}; use blake2::Blake2bVar; use coins::utxo::{compressed_pub_key_from_priv_raw, ChecksumType, UtxoAddressFormat}; -use coins::{coin_conf, find_pair, lp_coinfind, BalanceTradeFeeUpdatedHandler, CoinProtocol, CoinsContext, FeeApproxStage, MarketCoinOps, MmCoinEnum, find_unique_account_ids_active}; +use coins::{coin_conf, find_pair, find_unique_account_ids_active, lp_coinfind, BalanceTradeFeeUpdatedHandler, + CoinProtocol, CoinsContext, FeeApproxStage, MarketCoinOps, MmCoinEnum}; use common::executor::{simple_map::AbortableSimpleMap, AbortSettings, AbortableSystem, AbortedError, SpawnAbortable, SpawnFuture, Timer}; -use common::log::{error, warn, LogOnError, info}; +use common::log::{error, info, warn, LogOnError}; use common::time_cache::TimeCache; use common::{bits256, log, new_uuid, now_ms, now_sec}; use crypto::privkey::SerializableSecp256k1Keypair; @@ -48,8 +49,7 @@ use mm2_number::{BigDecimal, BigRational, MmNumber, MmNumberMultiRepr}; use mm2_rpc::data::legacy::{MatchBy, Mm2RpcResult, OrderConfirmationsSettings, OrderType, RpcOrderbookEntry, SellBuyRequest, SellBuyResponse, TakerAction, TakerRequestForRpc}; use mm2_state_machine::prelude::*; -#[cfg(test)] -use mocktopus::macros::*; +#[cfg(test)] use mocktopus::macros::*; use my_orders_storage::{delete_my_maker_order, delete_my_taker_order, save_maker_order_on_update, save_my_new_maker_order, save_my_new_taker_order, MyActiveOrders, MyOrdersFilteringHistory, MyOrdersHistory, MyOrdersStorage}; @@ -95,24 +95,19 @@ cfg_wasm32! { pub type OrdermatchDbLocked<'a> = DbLocked<'a, OrdermatchDb>; } -#[path = "lp_ordermatch/best_orders.rs"] -mod best_orders; -#[path = "lp_ordermatch/lp_bot.rs"] -mod lp_bot; +#[path = "lp_ordermatch/best_orders.rs"] mod best_orders; +#[path = "lp_ordermatch/lp_bot.rs"] mod lp_bot; pub use lp_bot::{start_simple_market_maker_bot, stop_simple_market_maker_bot, StartSimpleMakerBotRequest, TradingBotEvent}; #[path = "lp_ordermatch/my_orders_storage.rs"] mod my_orders_storage; -#[path = "lp_ordermatch/new_protocol.rs"] -mod new_protocol; +#[path = "lp_ordermatch/new_protocol.rs"] mod new_protocol; #[path = "lp_ordermatch/order_requests_tracker.rs"] mod order_requests_tracker; -#[path = "lp_ordermatch/orderbook_depth.rs"] -mod orderbook_depth; -#[path = "lp_ordermatch/orderbook_rpc.rs"] -mod orderbook_rpc; +#[path = "lp_ordermatch/orderbook_depth.rs"] mod orderbook_depth; +#[path = "lp_ordermatch/orderbook_rpc.rs"] mod orderbook_rpc; #[cfg(all(test, not(target_arch = "wasm32")))] #[path = "ordermatch_tests.rs"] pub mod ordermatch_tests; @@ -158,8 +153,8 @@ pub enum OrderbookP2PHandlerError { P2PRequestError(String), #[display( - fmt = "Couldn't find an order {}, ignoring, it will be synced upon pubkey keep alive", - _0 + fmt = "Couldn't find an order {}, ignoring, it will be synced upon pubkey keep alive", + _0 )] OrderNotFound(Uuid), @@ -277,7 +272,7 @@ fn process_trie_delta( )), None => { orderbook.remove_order_trie_update(uuid); - } + }, } } @@ -316,13 +311,13 @@ async fn process_orders_keep_alive( P2PRequest::Ordermatch(req), propagated_from_peer.clone(), ) - .await? - .ok_or_else(|| { - MmError::new(OrderbookP2PHandlerError::P2PRequestError(format!( - "No response was received from peer {} for SyncPubkeyOrderbookState request!", - propagated_from_peer - ))) - })?; + .await? + .ok_or_else(|| { + MmError::new(OrderbookP2PHandlerError::P2PRequestError(format!( + "No response was received from peer {} for SyncPubkeyOrderbookState request!", + propagated_from_peer + ))) + })?; let mut orderbook = ordermatch_ctx.orderbook.lock(); for (pair, diff) in response.pair_orders_diff { @@ -398,13 +393,13 @@ async fn request_and_fill_orderbook(ctx: &MmArc, base: &str, rel: &str) -> Resul let response = try_s!(request_any_relay::(ctx.clone(), P2PRequest::Ordermatch(request)).await); let (pubkey_orders, protocol_infos, conf_infos) = match response { Some(( - GetOrderbookRes { - pubkey_orders, - protocol_infos, - conf_infos, - }, - _peer_id, - )) => (pubkey_orders, protocol_infos, conf_infos), + GetOrderbookRes { + pubkey_orders, + protocol_infos, + conf_infos, + }, + _peer_id, + )) => (pubkey_orders, protocol_infos, conf_infos), None => return Ok(()), }; @@ -420,7 +415,7 @@ async fn request_and_fill_orderbook(ctx: &MmArc, base: &str, rel: &str) -> Resul Err(e) => { warn!("Error {} decoding pubkey {}", e, pubkey); continue; - } + }, }; if is_my_order(&pubkey, &my_pubsecp, &orderbook.my_p2p_pubkeys) { @@ -486,9 +481,9 @@ fn delete_my_order(ctx: &MmArc, uuid: Uuid, p2p_privkey: Option(ctx: &MmArc, err_construct: F) -> MmResult, E> - where - E: NotMmError, - F: Fn(String) -> E, +where + E: NotMmError, + F: Fn(String) -> E, { match CryptoCtx::from_ctx(ctx).split_mm() { Ok(crypto_ctx) => Ok(Some(CryptoCtx::mm2_internal_pubkey_hex(crypto_ctx.as_ref()))), @@ -561,39 +556,39 @@ pub async fn process_msg(ctx: MmArc, from_peer: String, msg: &[u8], i_am_relay: let order: OrderbookItem = (created_msg, hex::encode(pubkey.to_bytes().as_slice())).into(); insert_or_update_order(&ctx, order); Ok(()) - } + }, new_protocol::OrdermatchMessage::PubkeyKeepAlive(keep_alive) => { process_orders_keep_alive(ctx, from_peer, pubkey.to_hex(), keep_alive, i_am_relay).await - } + }, new_protocol::OrdermatchMessage::TakerRequest(taker_request) => { let msg = TakerRequest::from_new_proto_and_pubkey(taker_request, pubkey.unprefixed().into()); process_taker_request(ctx, pubkey.unprefixed().into(), msg).await; Ok(()) - } + }, new_protocol::OrdermatchMessage::MakerReserved(maker_reserved) => { let msg = MakerReserved::from_new_proto_and_pubkey(maker_reserved, pubkey.unprefixed().into()); // spawn because process_maker_reserved may take significant time to run let spawner = ctx.spawner(); spawner.spawn(process_maker_reserved(ctx, pubkey.unprefixed().into(), msg)); Ok(()) - } + }, new_protocol::OrdermatchMessage::TakerConnect(taker_connect) => { process_taker_connect(ctx, pubkey, taker_connect.into()).await; Ok(()) - } + }, new_protocol::OrdermatchMessage::MakerConnected(maker_connected) => { process_maker_connected(ctx, pubkey, maker_connected.into()).await; Ok(()) - } + }, new_protocol::OrdermatchMessage::MakerOrderCancelled(cancelled_msg) => { delete_order(&ctx, &pubkey.to_hex(), cancelled_msg.uuid.into()); Ok(()) - } + }, new_protocol::OrdermatchMessage::MakerOrderUpdated(updated_msg) => { process_maker_order_updated(ctx, pubkey.to_hex(), updated_msg) - } + }, } - } + }, Err(e) => MmError::err(OrderbookP2PHandlerError::DecodeError(e.to_string())), } } @@ -635,8 +630,8 @@ impl From for TryFromBytesError { trait TryFromBytes { fn try_from_bytes(bytes: Vec) -> Result - where - Self: Sized; + where + Self: Sized; } impl TryFromBytes for String { @@ -670,13 +665,13 @@ pub fn process_peer_request(ctx: MmArc, request: OrdermatchRequest) -> Result { let response = process_sync_pubkey_orderbook_state(ctx, pubkey, trie_roots); response.map(|res| res.map(|r| encode_message(&r).expect("Serialization failed"))) - } + }, OrdermatchRequest::BestOrders { coin, action, volume } => { best_orders::process_best_orders_p2p_request(ctx, coin, action, volume) - } + }, OrdermatchRequest::BestOrdersByNumber { coin, action, number } => { best_orders::process_best_orders_p2p_request_by_number(ctx, coin, action, number) - } + }, OrdermatchRequest::OrderbookDepth { pairs } => orderbook_depth::process_orderbook_depth_p2p_request(ctx, pairs), } } @@ -742,7 +737,7 @@ fn get_pubkeys_orders(orderbook: &Orderbook, base: String, rel: String) -> GetPu uuid ); continue; - } + }, }; let uuids = uuids_by_pubkey.entry(order.pubkey.clone()).or_insert_with(Vec::new); protocol_infos.insert(order.uuid, order.base_rel_proto_info()); @@ -814,12 +809,12 @@ impl DeltaOrFullTrie { .map(|(key, value)| (key, value.map(From::from))) .collect(); DeltaOrFullTrie::Delta(new_map) - } + }, DeltaOrFullTrie::FullTrie(trie) => { trie.iter().for_each(|(key, val)| on_each(key, Some(val))); let new_trie = trie.into_iter().map(|(key, value)| (key, value.into())).collect(); DeltaOrFullTrie::FullTrie(new_trie) - } + }, } } } @@ -850,8 +845,8 @@ fn get_full_trie( db: &MemoryDB, getter: impl Fn(&Key) -> Option, ) -> Result, TrieDiffHistoryError> - where - Key: Clone + Eq + std::hash::Hash + TryFromBytes, +where + Key: Clone + Eq + std::hash::Hash + TryFromBytes, { let trie = TrieDB::::new(db, trie_root)?; let trie: Result, TrieDiffHistoryError> = trie @@ -930,10 +925,10 @@ fn process_sync_pubkey_orderbook_state( let delta_result = match pubkey_state.order_pairs_trie_state_history.get(&pair) { Some(history) => { DeltaOrFullTrie::from_history(history, root, *actual_pair_root, &orderbook.memory_db, order_getter) - } + }, None => { get_full_trie(actual_pair_root, &orderbook.memory_db, order_getter).map(DeltaOrFullTrie::FullTrie) - } + }, }; let delta = try_s!(delta_result); @@ -956,11 +951,11 @@ fn process_sync_pubkey_orderbook_state( if let Some(ref info) = o.conf_settings { conf_infos.insert(o.uuid, info.clone()); } - } + }, None => { protocol_infos.remove(uuid); conf_infos.remove(uuid); - } + }, }); (pair, new_trie) }) @@ -1010,10 +1005,10 @@ pub fn parse_orderbook_pair_from_topic(topic: &str) -> Option<(&str, &str)> { } else { None } - } + }, None => None, } - } + }, None => None, }, _ => None, @@ -1056,7 +1051,7 @@ fn maker_order_created_p2p_notify( Err(e) => { error!("Couldn't encode and sign the 'maker_order_created' message: {}", e); return; - } + }, }; let item: OrderbookItem = (message, hex::encode(key_pair.public_slice())).into(); insert_or_update_my_order(&ctx, item, order); @@ -1087,7 +1082,7 @@ fn maker_order_updated_p2p_notify( Err(e) => { error!("Couldn't encode and sign the 'maker_order_updated' message: {}", e); return; - } + }, }; process_my_maker_order_updated(&ctx, &message); broadcast_p2p_msg(&ctx, topic, encoded_msg, peer_id); @@ -1134,7 +1129,7 @@ impl BalanceTradeFeeUpdatedHandler for BalanceUpdateOrdermatchHandler { Err(e) => { log::warn!("Couldn't handle the 'balance_updated' event: {}", e); return; - } + }, }; let ordermatch_ctx = OrdermatchContext::from_ctx(&ctx).unwrap(); @@ -1157,9 +1152,9 @@ impl BalanceTradeFeeUpdatedHandler for BalanceUpdateOrdermatchHandler { order.clone(), MakerOrderCancellationReason::InsufficientBalance, ) - .compat() - .await - .ok(); + .compat() + .await + .ok(); continue; } } @@ -1604,12 +1599,12 @@ impl TakerOrder { if !uuids.contains(&reserved.maker_order_uuid) { return MatchReservedResult::NotMatched; } - } + }, MatchBy::Pubkeys(pubkeys) => { if !pubkeys.contains(&reserved.sender_pubkey) { return MatchReservedResult::NotMatched; } - } + }, } let my_base_amount = self.request.get_base_amount(); @@ -1627,18 +1622,18 @@ impl TakerOrder { } else { MatchReservedResult::NotMatched } - } + }, TakerAction::Sell => { let match_ticker = (self.request.base == reserved.rel || self.base_orderbook_ticker.as_ref() == Some(&reserved.rel)) && (self.request.rel == reserved.base - || self.rel_orderbook_ticker.as_ref() == Some(&reserved.base)); + || self.rel_orderbook_ticker.as_ref() == Some(&reserved.base)); if match_ticker && my_base_amount == other_rel_amount && my_rel_amount <= other_base_amount { MatchReservedResult::Matched } else { MatchReservedResult::NotMatched } - } + }, } } @@ -2046,7 +2041,7 @@ impl MakerOrder { } else { OrderMatchResult::NotMatched } - } + }, TakerAction::Sell => { let ticker_match = (self.base == taker.rel || self.base_orderbook_ticker.as_ref() == Some(&taker.rel)) && (self.rel == taker.base || self.rel_orderbook_ticker.as_ref() == Some(&taker.base)); @@ -2065,7 +2060,7 @@ impl MakerOrder { } else { OrderMatchResult::NotMatched } - } + }, } } @@ -2146,7 +2141,7 @@ impl From for MakerOrder { rel_orderbook_ticker: taker_order.base_orderbook_ticker, p2p_privkey: taker_order.p2p_privkey, } - } + }, } } } @@ -2344,7 +2339,7 @@ fn broadcast_ordermatch_message( Err(e) => { error!("Failed to encode and sign ordermatch message: {}", e); return; - } + }, }; broadcast_p2p_msg(ctx, topic, encoded_msg, peer_id); } @@ -2392,10 +2387,10 @@ impl TrieDiffHistory { while let Some(next_diff) = self.inner.remove(diff.next_root) { diff = next_diff; } - } + }, None => { self.inner.insert(insert_at, diff); - } + }, }; } @@ -2456,7 +2451,7 @@ fn pubkey_state_mut<'a>( RawEntryMut::Vacant(e) => { let state = OrderbookPubkeyState::with_history_timeout(Duration::new(TRIE_STATE_HISTORY_TIMEOUT, 0)); e.insert(from_pubkey.to_string(), state).1 - } + }, } } @@ -2548,7 +2543,7 @@ impl Orderbook { Err(e) => { error!("Error getting {} trie with root {:?}", e, prev_root); return; - } + }, }; let order_bytes = order.trie_state_bytes(); if let Err(e) = pair_trie.insert(order.uuid.as_bytes(), &order_bytes) { @@ -2657,7 +2652,7 @@ impl Orderbook { Err(_) => { error!("Failed to get existing trie with root {:?}", pair_state); return Some(order); - } + }, }; if pubkey_state.order_pairs_trie_state_history.get(&alb_ordered).is_some() { @@ -2908,11 +2903,11 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO Ok(None) => { error!("Coin {} is not found/enabled", maker_order.rel); return; - } + }, Err(e) => { error!("!lp_coinfind({}): {}", maker_order.rel, e); return; - } + }, }; let maker_coin = match lp_coinfind(&ctx, &maker_order.base).await { @@ -2920,11 +2915,11 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO Ok(None) => { error!("Coin {} is not found/enabled", maker_order.base); return; - } + }, Err(e) => { error!("!lp_coinfind({}): {}", maker_order.base, e); return; - } + }, }; let alice = bits256::from(maker_match.request.sender_pubkey.0); let maker_amount = maker_match.reserved.get_base_amount().clone(); @@ -2954,7 +2949,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO my_conf_settings, other_conf_settings, } - } + }, None => AtomicLocktimeVersion::V1, }; let lock_time = lp_atomic_locktime( @@ -2978,7 +2973,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO Err(e) => { error!("Error {} on secret generation", e); return; - } + }, }; let account_db_id = maker_coin.account_db_id(); @@ -3016,7 +3011,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO .run(Box::new(maker_swap_v2::Initialize::default())) .await .error_log(); - } + }, _ => todo!("implement fallback to the old protocol here"), } } else { @@ -3029,7 +3024,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO LEGACY_SWAP_TYPE, account_db_id.as_deref(), ) - .await + .await { error!("Error {} on new swap insertion", e); } @@ -3069,11 +3064,11 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat Ok(None) => { error!("Coin {} is not found/enabled", taker_coin_ticker); return; - } + }, Err(e) => { error!("!lp_coinfind({}): {}", taker_coin_ticker, e); return; - } + }, }; let maker_coin_ticker = taker_order.maker_coin_ticker(); @@ -3082,11 +3077,11 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat Ok(None) => { error!("Coin {} is not found/enabled", maker_coin_ticker); return; - } + }, Err(e) => { error!("!lp_coinfind({}): {}", maker_coin_ticker, e); return; - } + }, }; // lp_connected_alice is called only from process_maker_connected, which returns if CryptoCtx is not initialized @@ -3116,7 +3111,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat my_conf_settings, other_conf_settings, } - } + }, None => AtomicLocktimeVersion::V1, }; let locktime = lp_atomic_locktime( @@ -3141,7 +3136,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat Err(e) => { error!("Error {} on secret generation", e); return; - } + }, }; let secret_hash_algo = detect_secret_hash_algo(&maker_coin, &taker_coin); match (maker_coin, taker_coin) { @@ -3177,12 +3172,12 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat .run(Box::new(taker_swap_v2::Initialize::default())) .await .error_log(); - } + }, _ => todo!("implement fallback to the old protocol here"), } } else { #[cfg(any(test, feature = "run-docker-tests"))] - let fail_at = std::env::var("TAKER_FAIL_AT").map(FailAt::from).ok(); + let fail_at = std::env::var("TAKER_FAIL_AT").map(FailAt::from).ok(); if let Err(e) = insert_new_swap_to_db( ctx.clone(), @@ -3193,7 +3188,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat LEGACY_SWAP_TYPE, account_db_id.as_deref(), ) - .await + .await { error!("Error {} on new swap insertion", e); } @@ -3212,7 +3207,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat locktime, taker_order.p2p_privkey.map(SerializableSecp256k1Keypair::into_inner), #[cfg(any(test, feature = "run-docker-tests"))] - fail_at, + fail_at, ); run_taker_swap(RunTakerSwapInput::StartNew(taker_swap), ctx).await } @@ -3293,7 +3288,7 @@ pub async fn lp_ordermatch_loop(ctx: MmArc) { log::info!("Error {} on balance check to kickstart order {}, cancelling", e, uuid); to_cancel.push(uuid); continue; - } + }, }; let max_vol = match calc_max_maker_vol(&ctx, &base, ¤t_balance, FeeApproxStage::OrderIssue).await { @@ -3302,7 +3297,7 @@ pub async fn lp_ordermatch_loop(ctx: MmArc) { log::info!("Error {} on balance check to kickstart order {}, cancelling", e, uuid); to_cancel.push(uuid); continue; - } + }, }; if max_vol < order.available_amount() { order.max_base_vol = order.reserved_amount() + max_vol; @@ -3338,9 +3333,9 @@ pub async fn lp_ordermatch_loop(ctx: MmArc) { order.clone(), MakerOrderCancellationReason::InsufficientBalance, ) - .compat() - .await - .ok(); + .compat() + .await + .ok(); } } } @@ -3558,11 +3553,11 @@ async fn process_maker_reserved(ctx: MmArc, from_pubkey: H256Json, reserved_msg: if (my_order.match_reserved(&reserved_msg) == MatchReservedResult::Matched && my_order.matches.is_empty()) && base_coin.is_coin_protocol_supported(&reserved_msg.base_protocol_info, None, lock_time, false) && rel_coin.is_coin_protocol_supported( - &reserved_msg.rel_protocol_info, - Some(reserved_msg.rel_amount.clone()), - lock_time, - false, - ) + &reserved_msg.rel_protocol_info, + Some(reserved_msg.rel_amount.clone()), + lock_time, + false, + ) { let connect = TakerConnect { sender_pubkey: H256Json::from(our_public_id.bytes), @@ -3620,7 +3615,7 @@ async fn process_maker_connected(ctx: MmArc, from_pubkey: PublicKey, connected: connected.maker_order_uuid ); return; - } + }, }; if order_match.reserved.sender_pubkey != unprefixed_from.into() { @@ -3688,21 +3683,21 @@ async fn process_taker_request(ctx: MmArc, from_pubkey: H256Json, taker_request: atomic_locktime_v, ) as f64 * rel_coin.maker_locktime_multiplier()) - .ceil() as u64; + .ceil() as u64; if !order.matches.contains_key(&taker_request.uuid) && base_coin.is_coin_protocol_supported( - taker_request.base_protocol_info_for_maker(), - Some(base_amount.clone()), - maker_lock_duration, - true, - ) + taker_request.base_protocol_info_for_maker(), + Some(base_amount.clone()), + maker_lock_duration, + true, + ) && rel_coin.is_coin_protocol_supported( - taker_request.rel_protocol_info_for_maker(), - None, - maker_lock_duration, - true, - ) + taker_request.rel_protocol_info_for_maker(), + None, + maker_lock_duration, + true, + ) { let reserved = MakerReserved { dest_pub_key: taker_request.sender_pubkey, @@ -3780,7 +3775,7 @@ async fn process_taker_connect(ctx: MmArc, sender_pubkey: PublicKey, connect_msg connect_msg.taker_order_uuid ); return; - } + }, }; if order_match.request.sender_pubkey != sender_unprefixed.into() { log::warn!("Connect message sender pubkey != request message sender pubkey"); @@ -4815,7 +4810,7 @@ pub async fn update_maker_order(ctx: &MmArc, req: MakerOrderUpdateReq) -> Result try_s!(validate_price(new_price.clone())); update_msg.with_new_price(new_price.clone().into()); new_price - } + }, None => order_before_update.price.clone(), }; @@ -4963,7 +4958,10 @@ pub async fn order_status(ctx: MmArc, req: Json) -> Result>, St for db_id in db_ids { let storage = MyOrdersStorage::new(ctx.clone(), Some(&db_id)); - if let (Ok(order), Ok(cancellation_reason)) = (storage.load_order_from_history(req.uuid).await, &storage.select_order_status(req.uuid).await) { + if let (Ok(order), Ok(cancellation_reason)) = ( + storage.load_order_from_history(req.uuid).await, + &storage.select_order_status(req.uuid).await, + ) { info!("Order with UUID=({}) found for db_id=({db_id})", req.uuid); let res = json!(OrderForRpcWithCancellationReason { order: OrderForRpc::from(&order), @@ -4974,7 +4972,7 @@ pub async fn order_status(ctx: MmArc, req: Json) -> Result>, St .body(json::to_vec(&res).expect("Serialization failed")) .map_err(|e| ERRL!("{}", e)); }; - }; + } Err("No orders found across databases".to_string()) } @@ -5093,7 +5091,7 @@ pub async fn orders_history_by_filter(ctx: MmArc, req: Json) -> Result Result (), } @@ -5247,7 +5245,7 @@ pub async fn cancel_order_rpc(ctx: MmArc, req: Json) -> Result> return Response::builder() .body(json::to_vec(&res).expect("Serialization failed")) .map_err(|e| ERRL!("{}", e)); - } + }, // error is returned Entry::Vacant(_) => (), } @@ -5429,8 +5427,7 @@ pub async fn orders_kick_start(ctx: &MmArc) -> Result, String> { coins.insert(order.request.rel.clone()); taker_orders.insert(order.request.uuid, order); } - }; - + } Ok(coins) } @@ -5531,7 +5528,7 @@ pub async fn cancel_orders_by(ctx: &MmArc, cancel_by: CancelBy) -> Result<(Vec { let mut to_remove = Vec::new(); for (uuid, order) in maker_orders.iter() { @@ -5551,7 +5548,7 @@ pub async fn cancel_orders_by(ctx: &MmArc, cancel_by: CancelBy) -> Result<(Vec { let mut to_remove = Vec::new(); for (uuid, order) in maker_orders.iter() { @@ -5571,7 +5568,7 @@ pub async fn cancel_orders_by(ctx: &MmArc, cancel_by: CancelBy) -> Result<(Vec match e.get() { OrderbookRequestingState::Requested => { // We are subscribed to the topic and the orderbook was requested already true - } + }, OrderbookRequestingState::NotRequested { subscribed_at } => { // We are subscribed to the topic. Also we didn't request the orderbook, // True if enough time has passed for the orderbook to fill by OrdermatchRequest::SyncPubkeyOrderbookState. *subscribed_at + ORDERBOOK_REQUESTING_TIMEOUT < current_timestamp - } + }, }, } }; @@ -5715,7 +5712,7 @@ fn choose_maker_confs_and_notas( maker_settings.rel_confs, maker_settings.rel_nota, ) - } + }, TakerAction::Buy => { let maker_coin_confs = if taker_settings.base_confs < maker_settings.base_confs { taker_settings.base_confs @@ -5733,7 +5730,7 @@ fn choose_maker_confs_and_notas( maker_settings.rel_confs, maker_settings.rel_nota, ) - } + }, }, None => ( maker_settings.base_confs, @@ -5833,12 +5830,12 @@ fn orderbook_address( coins::eth::addr_from_pubkey_str(pubkey) .map(OrderbookAddress::Transparent) .map_to_mm(OrderbookAddrErr::AddrFromPubkeyError) - } + }, CoinProtocol::UTXO | CoinProtocol::QTUM | CoinProtocol::QRC20 { .. } | CoinProtocol::BCH { .. } => { coins::utxo::address_by_conf_and_pubkey_str(coin, conf, pubkey, addr_format) .map(OrderbookAddress::Transparent) .map_to_mm(OrderbookAddrErr::AddrFromPubkeyError) - } + }, CoinProtocol::SLPTOKEN { platform, .. } => { let platform_conf = coin_conf(ctx, &platform); if platform_conf.is_null() { @@ -5852,12 +5849,12 @@ fn orderbook_address( .mm_err(|e| OrderbookAddrErr::AddrFromPubkeyError(e.to_string())), _ => MmError::err(OrderbookAddrErr::InvalidPlatformCoinProtocol(platform)), } - } + }, CoinProtocol::TENDERMINT(protocol) => Ok(coins::tendermint::account_id_from_pubkey_hex( &protocol.account_prefix, pubkey, ) - .map(|id| OrderbookAddress::Transparent(id.to_string()))?), + .map(|id| OrderbookAddress::Transparent(id.to_string()))?), CoinProtocol::TENDERMINTTOKEN(proto) => { let platform_conf = coin_conf(ctx, &proto.platform); if platform_conf.is_null() { @@ -5870,17 +5867,17 @@ fn orderbook_address( &platform.account_prefix, pubkey, ) - .map(|id| OrderbookAddress::Transparent(id.to_string()))?), + .map(|id| OrderbookAddress::Transparent(id.to_string()))?), _ => MmError::err(OrderbookAddrErr::InvalidPlatformCoinProtocol(format!( "Platform protocol {:?} is not TENDERMINT", platform_protocol ))), } - } + }, #[cfg(all(feature = "enable-solana", not(target_arch = "wasm32")))] CoinProtocol::SOLANA | CoinProtocol::SPLTOKEN { .. } => { MmError::err(OrderbookAddrErr::CoinIsNotSupported(coin.to_owned())) - } + }, CoinProtocol::ZHTLC { .. } => Ok(OrderbookAddress::Shielded), #[cfg(not(target_arch = "wasm32"))] // Todo: Shielded address is used for lightning for now, the lightning node public key can be used for the orderbook entry pubkey From 69d9886a90e9f9c2084cb01b3c16d0b142343460 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 25 Apr 2024 07:48:00 +0100 Subject: [PATCH 071/186] wip lp_ordermatch --- mm2src/mm2_main/src/lp_ordermatch.rs | 28 ++++++++++-- .../src/lp_ordermatch/my_orders_storage.rs | 45 +++++++++++-------- 2 files changed, 50 insertions(+), 23 deletions(-) diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index ac0c4866d5..1e5ca949cf 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -25,8 +25,8 @@ use best_orders::BestOrdersAction; use blake2::digest::{Update, VariableOutput}; use blake2::Blake2bVar; use coins::utxo::{compressed_pub_key_from_priv_raw, ChecksumType, UtxoAddressFormat}; -use coins::{coin_conf, find_pair, find_unique_account_ids_active, lp_coinfind, BalanceTradeFeeUpdatedHandler, - CoinProtocol, CoinsContext, FeeApproxStage, MarketCoinOps, MmCoinEnum}; +use coins::{coin_conf, find_pair, find_unique_account_ids_active, lp_coinfind, lp_coinfind_or_err, + BalanceTradeFeeUpdatedHandler, CoinProtocol, CoinsContext, FeeApproxStage, MarketCoinOps, MmCoinEnum}; use common::executor::{simple_map::AbortableSimpleMap, AbortSettings, AbortableSystem, AbortedError, SpawnAbortable, SpawnFuture, Timer}; use common::log::{error, info, warn, LogOnError}; @@ -101,6 +101,8 @@ cfg_wasm32! { pub use lp_bot::{start_simple_market_maker_bot, stop_simple_market_maker_bot, StartSimpleMakerBotRequest, TradingBotEvent}; +use self::my_orders_storage::{MyOrdersError, MyOrdersResult}; + #[path = "lp_ordermatch/my_orders_storage.rs"] mod my_orders_storage; #[path = "lp_ordermatch/new_protocol.rs"] mod new_protocol; @@ -1678,6 +1680,15 @@ impl TakerOrder { } fn p2p_keypair(&self) -> Option<&KeyPair> { self.p2p_privkey.as_ref().map(|key| key.key_pair()) } + + async fn db_id(&self, ctx: &MmArc) -> MyOrdersResult> { + lp_coinfind_or_err(ctx, &self.request.base) + .await + .mm_err(|err| { + MyOrdersError::ErrorSaving(format!("Error finding/deriving wallet pubkey for db_id: {err:?}")) + }) + .map(|coin| coin.account_db_id()) + } } #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] @@ -2095,6 +2106,15 @@ impl MakerOrder { fn was_updated(&self) -> bool { self.updated_at != Some(self.created_at) } fn p2p_keypair(&self) -> Option<&KeyPair> { self.p2p_privkey.as_ref().map(|key| key.key_pair()) } + + async fn db_id(&self, ctx: &MmArc) -> MyOrdersResult> { + lp_coinfind_or_err(ctx, &self.base) + .await + .mm_err(|err| { + MyOrdersError::ErrorSaving(format!("Error finding/deriving wallet pubkey for db_id: {err:?}")) + }) + .map(|coin| coin.account_db_id()) + } } impl From for MakerOrder { @@ -4957,7 +4977,7 @@ pub async fn order_status(ctx: MmArc, req: Json) -> Result>, St } for db_id in db_ids { - let storage = MyOrdersStorage::new(ctx.clone(), Some(&db_id)); + let storage = MyOrdersStorage::new(ctx.clone(), Some(db_id.clone())); if let (Ok(order), Ok(cancellation_reason)) = ( storage.load_order_from_history(req.uuid).await, &storage.select_order_status(req.uuid).await, @@ -5408,7 +5428,7 @@ pub async fn orders_kick_start(ctx: &MmArc) -> Result, String> { let db_ids = find_unique_account_ids_active(ctx).await?; for db_id in db_ids { - let storage = MyOrdersStorage::new(ctx.clone(), Some(&db_id)); + let storage = MyOrdersStorage::new(ctx.clone(), Some(db_id)); let saved_maker_orders = try_s!(storage.load_active_maker_orders().await); let saved_taker_orders = try_s!(storage.load_active_taker_orders().await); diff --git a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs index 2501768722..96479f4e0c 100644 --- a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs +++ b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs @@ -1,7 +1,7 @@ use super::{MakerOrder, MakerOrderCancellationReason, MyOrdersFilter, Order, RecentOrdersSelectResult, TakerOrder, TakerOrderCancellationReason}; use async_trait::async_trait; -use common::log::LogOnError; +use common::log::{error, warn, LogOnError}; use common::{BoxFut, PagingOptions}; use derive_more::Display; use futures::{FutureExt, TryFutureExt}; @@ -36,8 +36,8 @@ pub enum MyOrdersError { } pub async fn save_my_new_maker_order(ctx: MmArc, order: &MakerOrder) -> MyOrdersResult<()> { - // TODO db_id shouldn't be None - let storage = MyOrdersStorage::new(ctx, None); + let db_id = order.db_id(&ctx).await?; + let storage = MyOrdersStorage::new(ctx, db_id); storage .save_new_active_maker_order(order) .await @@ -50,8 +50,8 @@ pub async fn save_my_new_maker_order(ctx: MmArc, order: &MakerOrder) -> MyOrders } pub async fn save_my_new_taker_order(ctx: MmArc, order: &TakerOrder) -> MyOrdersResult<()> { - // TODO db_id - let storage = MyOrdersStorage::new(ctx, None); + let db_id = order.db_id(&ctx).await?; + let storage = MyOrdersStorage::new(ctx, db_id); storage .save_new_active_taker_order(order) .await @@ -64,8 +64,8 @@ pub async fn save_my_new_taker_order(ctx: MmArc, order: &TakerOrder) -> MyOrders } pub async fn save_maker_order_on_update(ctx: MmArc, order: &MakerOrder) -> MyOrdersResult<()> { - // TODO db_id - let storage = MyOrdersStorage::new(ctx, None); + let db_id = order.db_id(&ctx).await?; + let storage = MyOrdersStorage::new(ctx, db_id); storage.update_active_maker_order(order).await?; if order.save_in_history { @@ -80,8 +80,14 @@ pub fn delete_my_taker_order(ctx: MmArc, order: TakerOrder, reason: TakerOrderCa let uuid = order.request.uuid; let save_in_history = order.save_in_history; - // TODO db_id - let storage = MyOrdersStorage::new(ctx, None); + let db_id = match order.db_id(&ctx).await { + Ok(val) => val, + Err(err) => { + error!("{err}"); + None + }, + }; + let storage = MyOrdersStorage::new(ctx, db_id); storage .delete_active_taker_order(uuid) .await @@ -117,8 +123,14 @@ pub fn delete_my_maker_order(ctx: MmArc, order: MakerOrder, reason: MakerOrderCa let uuid = order_to_save.uuid; let save_in_history = order_to_save.save_in_history; - // TODO db_id - let storage = MyOrdersStorage::new(ctx, None); + let db_id = match order_to_save.db_id(&ctx).await { + Ok(val) => val, + Err(err) => { + warn!("{err}"); + None + }, + }; + let storage = MyOrdersStorage::new(ctx, db_id); if order_to_save.was_updated() { if let Ok(order_from_file) = storage.load_active_maker_order(order_to_save.uuid).await { order_to_save = order_from_file; @@ -239,12 +251,7 @@ mod native_impl { } impl MyOrdersStorage { - pub fn new(ctx: MmArc, db_id: Option<&str>) -> MyOrdersStorage { - MyOrdersStorage { - ctx, - db_id: db_id.map(|e| e.to_string()), - } - } + pub fn new(ctx: MmArc, db_id: Option) -> MyOrdersStorage { MyOrdersStorage { ctx, db_id } } } #[async_trait] @@ -412,10 +419,10 @@ mod wasm_impl { } impl MyOrdersStorage { - pub fn new(ctx: MmArc, db_id: Option<&str>) -> MyOrdersStorage { + pub fn new(ctx: MmArc, db_id: Option) -> MyOrdersStorage { MyOrdersStorage { ctx: OrdermatchContext::from_ctx(&ctx).expect("!OrdermatchContext::from_ctx"), - db_id: db_id.map(|e| e.to_string()), + db_id, } } } From 875c9df08a1227be3c4fef78ecba689c978f86a3 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 25 Apr 2024 18:34:02 +0100 Subject: [PATCH 072/186] cleanup lp_ordermatch.rs --- mm2src/mm2_main/src/lp_ordermatch.rs | 128 +++++++++++++-------------- 1 file changed, 62 insertions(+), 66 deletions(-) diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 1e5ca949cf..6dbbc9b7e1 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -2753,6 +2753,7 @@ struct OrdermatchContext { pending_maker_reserved: AsyncMutex>>, #[cfg(target_arch = "wasm32")] ordermatch_db: ConstructibleDb, + db_id: Option, } #[allow(unused)] @@ -2790,6 +2791,7 @@ pub fn init_ordermatch_context(ctx: &MmArc, db_id: Option<&str>) -> OrdermatchIn original_tickers, #[cfg(target_arch = "wasm32")] ordermatch_db: ConstructibleDb::new(ctx, db_id), + db_id: db_id.map(|d| d.to_string()), }; from_ctx(&ctx.ordermatch_ctx, move || Ok(ordermatch_context)) @@ -2819,7 +2821,8 @@ impl OrdermatchContext { orderbook_tickers: Default::default(), original_tickers: Default::default(), #[cfg(target_arch = "wasm32")] - ordermatch_db: ConstructibleDb::new(ctx, None), + ordermatch_db: ConstructibleDb::new(ctx, db_id), + db_id: None, }) }))) } @@ -3390,8 +3393,7 @@ pub async fn clean_memory_loop(ctx_weak: MmWeak) { /// The function locks the [`OrdermatchContext::my_maker_orders`] and [`OrdermatchContext::my_taker_orders`] mutexes. async fn handle_timed_out_taker_orders(ctx: MmArc, ordermatch_ctx: &OrdermatchContext) { let mut my_taker_orders = ordermatch_ctx.my_taker_orders.lock().await; - // TODO db_id - let storage = MyOrdersStorage::new(ctx.clone(), None); + let storage = MyOrdersStorage::new(ctx.clone(), ordermatch_ctx.db_id.clone()); let mut my_actual_taker_orders = HashMap::with_capacity(my_taker_orders.len()); for (uuid, order) in my_taker_orders.drain() { @@ -3481,8 +3483,7 @@ async fn check_balance_for_maker_orders(ctx: MmArc, ordermatch_ctx: &OrdermatchC /// The function locks the [`OrdermatchContext::my_maker_orders`] mutex. async fn handle_timed_out_maker_matches(ctx: MmArc, ordermatch_ctx: &OrdermatchContext) { let now = now_ms(); - // TODO db_id - let storage = MyOrdersStorage::new(ctx.clone(), None); + let storage = MyOrdersStorage::new(ctx.clone(), ordermatch_ctx.db_id.clone()); let my_maker_orders = ordermatch_ctx.maker_orders_ctx.lock().orders.clone(); for (_, order) in my_maker_orders.iter() { @@ -3596,8 +3597,7 @@ async fn process_maker_reserved(ctx: MmArc, from_pubkey: H256Json, reserved_msg: my_order .matches .insert(taker_match.reserved.maker_order_uuid, taker_match); - // TODO db_id - MyOrdersStorage::new(ctx, None) + MyOrdersStorage::new(ctx, base_coin.account_db_id()) .update_active_taker_order(my_order) .await .error_log_with_msg("!update_active_taker_order"); @@ -3674,8 +3674,7 @@ async fn process_taker_request(ctx: MmArc, from_pubkey: H256Json, taker_request: } let ordermatch_ctx = OrdermatchContext::from_ctx(&ctx).unwrap(); - // TODO db_id - let storage = MyOrdersStorage::new(ctx.clone(), None); + let storage = MyOrdersStorage::new(ctx.clone(), ordermatch_ctx.db_id.clone()); let mut my_orders = ordermatch_ctx.maker_orders_ctx.lock().orders.clone(); let filtered = my_orders .iter_mut() @@ -3824,8 +3823,7 @@ async fn process_taker_connect(ctx: MmArc, sender_pubkey: PublicKey, connect_msg updated_msg.with_new_max_volume(my_order.available_amount().into()); maker_order_updated_p2p_notify(ctx.clone(), topic, updated_msg, my_order.p2p_keypair()); } - // TODO db_id - MyOrdersStorage::new(ctx, None) + MyOrdersStorage::new(ctx, ordermatch_ctx.db_id.clone()) .update_active_maker_order(&my_order) .await .error_log_with_msg("!update_active_maker_order"); @@ -5088,70 +5086,68 @@ pub struct FilteringOrder { /// Returns *all* uuids of swaps, which match the selected filter. pub async fn orders_history_by_filter(ctx: MmArc, req: Json) -> Result>, String> { - // TODO db_id - let storage = MyOrdersStorage::new(ctx.clone(), None); - + let mut results = vec![]; + let db_ids = try_s!(find_unique_account_ids_active(&ctx).await); let filter: MyOrdersFilter = try_s!(json::from_value(req)); - let db_result = try_s!(storage.select_orders_by_filter(&filter, None).await); - - let mut warnings = vec![]; - let rpc_orders = if filter.include_details { - let mut vec = Vec::with_capacity(db_result.orders.len()); - for order in db_result.orders.iter() { - let uuid = match Uuid::parse_str(order.uuid.as_str()) { - Ok(uuid) => uuid, - Err(e) => { - let warning = format!( - "Order details for Uuid {} were skipped because uuid could not be parsed", - order.uuid - ); - warn!("{}, error {}", warning, e); - warnings.push(UuidParseError { - uuid: order.uuid.clone(), - warning, - }); + for db_id in db_ids { + let storage = MyOrdersStorage::new(ctx.clone(), Some(db_id.clone())); + let db_result = try_s!(storage.select_orders_by_filter(&filter, None).await); + let mut warnings = vec![]; + + if filter.include_details { + let mut vec = Vec::with_capacity(db_result.orders.len()); + for order in db_result.orders.iter() { + let uuid = match Uuid::parse_str(order.uuid.as_str()) { + Ok(uuid) => uuid, + Err(e) => { + let warning = format!( + "Order details for Uuid {} were skipped because uuid could not be parsed", + order.uuid + ); + warn!("{}, error {}", warning, e); + warnings.push(UuidParseError { + uuid: order.uuid.clone(), + warning, + }); + continue; + }, + }; + + if let Ok(order) = storage.load_order_from_history(uuid).await { + vec.push(order); continue; - }, - }; + } - if let Ok(order) = storage.load_order_from_history(uuid).await { - vec.push(order); - continue; - } + let ordermatch_ctx = try_s!(OrdermatchContext::from_ctx(&ctx)); + if order.order_type == "Maker" { + let maybe_order_mutex = ordermatch_ctx.maker_orders_ctx.lock().get_order(&uuid).cloned(); + if let Some(maker_order_mutex) = maybe_order_mutex { + let maker_order = maker_order_mutex.lock().await.clone(); + vec.push(Order::Maker(maker_order)); + } + continue; + } - let ordermatch_ctx = try_s!(OrdermatchContext::from_ctx(&ctx)); - if order.order_type == "Maker" { - let maybe_order_mutex = ordermatch_ctx.maker_orders_ctx.lock().get_order(&uuid).cloned(); - if let Some(maker_order_mutex) = maybe_order_mutex { - let maker_order = maker_order_mutex.lock().await.clone(); - vec.push(Order::Maker(maker_order)); + let taker_orders = ordermatch_ctx.my_taker_orders.lock().await; + if let Some(taker_order) = taker_orders.get(&uuid) { + vec.push(Order::Taker(taker_order.to_owned())); } - continue; } - let taker_orders = ordermatch_ctx.my_taker_orders.lock().await; - if let Some(taker_order) = taker_orders.get(&uuid) { - vec.push(Order::Taker(taker_order.to_owned())); - } + let details: Vec<_> = vec.iter().map(OrderForRpc::from).collect(); + results.push(json!({ + "result": { + "orders": db_result.orders, + "details": details, + "found_records": db_result.total_count, + "warnings": warnings, + "db_id": db_id + } + })); } - vec - } else { - vec![] - }; - - let details: Vec<_> = rpc_orders.iter().map(OrderForRpc::from).collect(); - - let json = json!({ - "result": { - "orders": db_result.orders, - "details": details, - "found_records": db_result.total_count, - "warnings": warnings, - }}); - - let res = try_s!(json::to_vec(&json)); + } - Ok(try_s!(Response::builder().body(res))) + Ok(try_s!(Response::builder().body(try_s!(json::to_vec(&results))))) } #[derive(Deserialize)] From 4bba3fb0a16711f7f4168c4f174fb5febe8e4484 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 26 Apr 2024 09:37:17 +0100 Subject: [PATCH 073/186] new sql initialization TODOs --- mm2src/mm2_core/src/mm_ctx.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index d6298ef0ba..b6ea8fb48f 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -411,6 +411,8 @@ impl MmCtx { let mut store = HashMap::new(); store.insert(db_id, connection.clone()); drop(connections); + + // TODO: run migration and fix directions Some(connection) }; }; @@ -437,7 +439,8 @@ impl MmCtx { )); let mut store = HashMap::new(); store.insert(db_id, connection.clone()); - + drop(connections); + // TODO: run migration and fix directions connection }; } @@ -459,8 +462,8 @@ impl MmCtx { let connection = Arc::new(Mutex::new(try_s!(Connection::open(sqlite_file_path)))); let mut store = HashMap::new(); store.insert(db_id, connection.clone()); - drop(connections); + // TODO: run migration and fix directions Ok(connection) } } From 637286d879be80cbb420fd1f63d980fa4cad18eb Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 26 Apr 2024 17:27:04 +0100 Subject: [PATCH 074/186] todo for db_id in orders_kick_start --- mm2src/mm2_main/src/lp_ordermatch.rs | 35 +++++++++++++--------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 2be114bc12..c3abdec0cb 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -5421,28 +5421,25 @@ pub struct HistoricalOrder { pub async fn orders_kick_start(ctx: &MmArc) -> Result, String> { let mut coins = HashSet::new(); let ordermatch_ctx = try_s!(OrdermatchContext::from_ctx(ctx)); + let db_id: Option<&str> = None; // TODO + let storage = MyOrdersStorage::new(ctx.clone(), db_id); + let saved_maker_orders = try_s!(storage.load_active_maker_orders().await); + let saved_taker_orders = try_s!(storage.load_active_taker_orders().await); - let db_ids = find_unique_account_ids_active(ctx).await?; - for db_id in db_ids { - let storage = MyOrdersStorage::new(ctx.clone(), Some(db_id)); - let saved_maker_orders = try_s!(storage.load_active_maker_orders().await); - let saved_taker_orders = try_s!(storage.load_active_taker_orders().await); - - { - let mut maker_orders_ctx = ordermatch_ctx.maker_orders_ctx.lock(); - for order in saved_maker_orders { - coins.insert(order.base.clone()); - coins.insert(order.rel.clone()); - maker_orders_ctx.add_order(ctx.weak(), order.clone(), None); - } + { + let mut maker_orders_ctx = ordermatch_ctx.maker_orders_ctx.lock(); + for order in saved_maker_orders { + coins.insert(order.base.clone()); + coins.insert(order.rel.clone()); + maker_orders_ctx.add_order(ctx.weak(), order.clone(), None); } + } - let mut taker_orders = ordermatch_ctx.my_taker_orders.lock().await; - for order in saved_taker_orders { - coins.insert(order.request.base.clone()); - coins.insert(order.request.rel.clone()); - taker_orders.insert(order.request.uuid, order); - } + let mut taker_orders = ordermatch_ctx.my_taker_orders.lock().await; + for order in saved_taker_orders { + coins.insert(order.request.base.clone()); + coins.insert(order.request.rel.clone()); + taker_orders.insert(order.request.uuid, order); } Ok(coins) From 7cd60f813fd6119b150c24b86b7f06022a299e8f Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 29 Apr 2024 16:21:51 +0100 Subject: [PATCH 075/186] minor changes/improvements --- mm2src/mm2_main/src/database.rs | 45 +++++++++++++--------------- mm2src/mm2_main/src/lp_ordermatch.rs | 34 ++++++++++----------- 2 files changed, 37 insertions(+), 42 deletions(-) diff --git a/mm2src/mm2_main/src/database.rs b/mm2src/mm2_main/src/database.rs index 267fd1d35f..28e63b3404 100644 --- a/mm2src/mm2_main/src/database.rs +++ b/mm2src/mm2_main/src/database.rs @@ -7,7 +7,6 @@ pub mod my_orders; #[path = "database/stats_swaps.rs"] pub mod stats_swaps; use crate::CREATE_MY_SWAPS_TABLE; -use coins::find_unique_account_ids_any; use common::log::{debug, error, info}; use db_common::sqlite::run_optimization_pragmas; use db_common::sqlite::rusqlite::{params_from_iter, Result as SqlResult}; @@ -33,7 +32,7 @@ pub async fn init_and_migrate_sql_db(ctx: &MmArc, db_id: Option<&str>) -> SqlRes "Current migration is {}, skipping the init, trying to migrate", current_migration ); - migrate_sqlite_database(ctx, current_migration).await?; + migrate_sqlite_database(ctx, current_migration, db_id).await?; return Ok(()); } }, @@ -46,7 +45,7 @@ pub async fn init_and_migrate_sql_db(ctx: &MmArc, db_id: Option<&str>) -> SqlRes info!("Trying to initialize the SQLite database"); init_db(ctx, db_id)?; - migrate_sqlite_database(ctx, 1).await?; + migrate_sqlite_database(ctx, 1, db_id).await?; info!("SQLite database initialization is successful"); Ok(()) } @@ -144,29 +143,27 @@ async fn statements_for_migration(ctx: &MmArc, current_migration: i64) -> Option } } -pub async fn migrate_sqlite_database(ctx: &MmArc, current_migration: i64) -> SqlResult<()> { - let db_ids = find_unique_account_ids_any(ctx).await.expect("successful coin find"); - for db_id in db_ids { - let mut current_migration = current_migration; - info!("migrate_sqlite_database for db_id=({db_id}), current migration {current_migration}"); - while let Some(statements_with_params) = statements_for_migration(ctx, current_migration).await { - // `statements_for_migration` locks the [`MmCtx::sqlite_connection`] mutex, - // so we can't create a transaction outside of this loop. - let conn = ctx.sqlite_connection(Some(&db_id)); - let conn = conn.lock().unwrap(); - let transaction = conn.unchecked_transaction()?; - for (statement, params) in statements_with_params { - debug!("Executing SQL statement {statement:?} with params {params:?} for db_id: {db_id}"); - transaction.execute(statement, params_from_iter(params.iter()))?; - } - current_migration += 1; - transaction.execute("INSERT INTO migration (current_migration) VALUES (?1);", [ - current_migration, - ])?; - transaction.commit()?; +pub async fn migrate_sqlite_database(ctx: &MmArc, current_migration: i64, db_id: Option<&str>) -> SqlResult<()> { + let mut current_migration = current_migration; + info!("migrate_sqlite_database current migration {current_migration}"); + while let Some(statements_with_params) = statements_for_migration(ctx, current_migration).await { + // `statements_for_migration` locks the [`MmCtx::sqlite_connection`] mutex, + // so we can't create a transaction outside of this loop. + let conn = ctx.sqlite_connection(db_id); + let conn = conn.lock().unwrap(); + let transaction = conn.unchecked_transaction()?; + for (statement, params) in statements_with_params { + debug!("Executing SQL statement {statement:?} with params {params:?}"); + transaction.execute(statement, params_from_iter(params.iter()))?; } - info!("migrate_sqlite_database complete for db_id=({db_id}), migrated to {current_migration}"); + info!("migrate_sqlite_database complete, migrated to {current_migration}"); + current_migration += 1; + transaction.execute("INSERT INTO migration (current_migration) VALUES (?1);", [ + current_migration, + ])?; + transaction.commit()?; } + info!("migrate_sqlite_database complete migrated to {current_migration}"); Ok(()) } diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index c3abdec0cb..f63e6edd99 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -4948,7 +4948,7 @@ struct OrderForRpcWithCancellationReason<'a> { pub async fn order_status(ctx: MmArc, req: Json) -> Result>, String> { let req: OrderStatusReq = try_s!(json::from_value(req)); - let db_ids = find_unique_account_ids_active(&ctx).await?; + let db_id: Option = None; // TODO let ordermatch_ctx = try_s!(OrdermatchContext::from_ctx(&ctx)); let maybe_order_mutex = ordermatch_ctx.maker_orders_ctx.lock().get_order(&req.uuid).cloned(); @@ -4974,23 +4974,21 @@ pub async fn order_status(ctx: MmArc, req: Json) -> Result>, St .map_err(|e| ERRL!("{}", e)); } - for db_id in db_ids { - let storage = MyOrdersStorage::new(ctx.clone(), Some(db_id.clone())); - if let (Ok(order), Ok(cancellation_reason)) = ( - storage.load_order_from_history(req.uuid).await, - &storage.select_order_status(req.uuid).await, - ) { - info!("Order with UUID=({}) found for db_id=({db_id})", req.uuid); - let res = json!(OrderForRpcWithCancellationReason { - order: OrderForRpc::from(&order), - cancellation_reason, - }); + let storage = MyOrdersStorage::new(ctx.clone(), db_id.as_deref()); + if let (Ok(order), Ok(cancellation_reason)) = ( + storage.load_order_from_history(req.uuid).await, + &storage.select_order_status(req.uuid).await, + ) { + info!("Order with UUID=({})", req.uuid); + let res = json!(OrderForRpcWithCancellationReason { + order: OrderForRpc::from(&order), + cancellation_reason, + }); - return Response::builder() - .body(json::to_vec(&res).expect("Serialization failed")) - .map_err(|e| ERRL!("{}", e)); - }; - } + return Response::builder() + .body(json::to_vec(&res).expect("Serialization failed")) + .map_err(|e| ERRL!("{}", e)); + }; Err("No orders found across databases".to_string()) } @@ -5421,7 +5419,7 @@ pub struct HistoricalOrder { pub async fn orders_kick_start(ctx: &MmArc) -> Result, String> { let mut coins = HashSet::new(); let ordermatch_ctx = try_s!(OrdermatchContext::from_ctx(ctx)); - let db_id: Option<&str> = None; // TODO + let db_id: Option = None; // TODO let storage = MyOrdersStorage::new(ctx.clone(), db_id); let saved_maker_orders = try_s!(storage.load_active_maker_orders().await); let saved_taker_orders = try_s!(storage.load_active_taker_orders().await); From b03a0aa6827e20c39ebd7ead11dcbf6f185393a3 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 30 Apr 2024 11:40:02 +0100 Subject: [PATCH 076/186] improve and refactor async ocnnection management logic --- mm2src/coins/nft/nft_structs.rs | 40 +++++---- mm2src/coins/nft/storage/sql_storage.rs | 82 ++++++++++--------- mm2src/db_common/src/lib.rs | 16 ++++ mm2src/mm2_core/src/mm_ctx.rs | 14 ++-- .../src/account/storage/sqlite_storage.rs | 6 +- mm2src/mm2_main/src/lp_ordermatch.rs | 2 +- .../src/rpc/lp_commands/lp_commands_legacy.rs | 9 +- mm2src/mm2_test_helpers/src/for_tests.rs | 79 +++++++++--------- 8 files changed, 134 insertions(+), 114 deletions(-) diff --git a/mm2src/coins/nft/nft_structs.rs b/mm2src/coins/nft/nft_structs.rs index ed34255a79..e03804aeb7 100644 --- a/mm2src/coins/nft/nft_structs.rs +++ b/mm2src/coins/nft/nft_structs.rs @@ -23,8 +23,9 @@ use crate::{TransactionType, TxFeeDetails, WithdrawFee}; cfg_native! { use db_common::async_sql_conn::AsyncConnection; + use db_common::AsyncConnectionCtx; use futures::lock::Mutex as AsyncMutex; - use mm2_core::mm_ctx::{AsyncSqliteConnectionArc, log_sqlite_file_open_attempt, ASYNC_SQLITE_DB_ID}; + use mm2_core::mm_ctx::{log_sqlite_file_open_attempt, ASYNC_SQLITE_DB_ID}; } cfg_wasm32! { @@ -720,9 +721,6 @@ impl From for TransferMeta { } } -#[cfg(not(target_arch = "wasm32"))] -pub struct NftCacheDbSql(pub AsyncConnection); - /// The primary context for NFT operations within the MM environment. /// /// This struct provides an interface for interacting with the underlying data structures @@ -733,7 +731,7 @@ pub(crate) struct NftCtx { #[cfg(target_arch = "wasm32")] pub(crate) nft_cache_db: SharedDb, #[cfg(not(target_arch = "wasm32"))] - pub(crate) nft_cache_dbs: Arc>>, + pub(crate) nft_cache_dbs: Arc>, #[cfg(not(target_arch = "wasm32"))] ctx: MmArc, } @@ -771,23 +769,23 @@ impl NftCtx { db_id: Option<&str>, ) -> MmResult { let db_id = db_id.map(|d| d.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); - let mut connections = self.nft_cache_dbs.lock().await; - if let Some(async_conn) = connections.get(&db_id) { - let conn = NftCacheDbSql(async_conn.lock().await.clone()); - Ok(conn) - } else { - let sqlite_file_path = self.ctx.dbdir(Some(&db_id)).join(ASYNC_SQLITE_DB_ID); - log_sqlite_file_open_attempt(&sqlite_file_path); - let async_conn = Arc::new(AsyncMutex::new( - AsyncConnection::open(sqlite_file_path) - .await - .map_to_mm(|e| LockDBError::InternalError(e.to_string()))?, - )); - connections.insert(db_id, async_conn.clone()); - - let conn = NftCacheDbSql(async_conn.lock().await.clone()); - Ok(conn) + let mut connection = self.nft_cache_dbs.lock().await; + + // check if existing connection db_id is same as requested db and return the connection. + if db_id == connection.db_id { + return Ok(connection); } + + // else create and return new connection. + let sqlite_file_path = self.ctx.dbdir(Some(&db_id)).join(ASYNC_SQLITE_DB_ID); + log_sqlite_file_open_attempt(&sqlite_file_path); + let async_conn = AsyncConnection::open(sqlite_file_path) + .await + .map_to_mm(|e| LockDBError::InternalError(e.to_string()))?; + connection.connection = async_conn; + connection.db_id = db_id; + + Ok(connection) } #[cfg(target_arch = "wasm32")] diff --git a/mm2src/coins/nft/storage/sql_storage.rs b/mm2src/coins/nft/storage/sql_storage.rs index 9229ec1856..fcff626a00 100644 --- a/mm2src/coins/nft/storage/sql_storage.rs +++ b/mm2src/coins/nft/storage/sql_storage.rs @@ -1,7 +1,7 @@ use crate::nft::eth_addr_to_hex; -use crate::nft::nft_structs::{Chain, ContractType, ConvertChain, Nft, NftCacheDbSql, NftCommon, NftList, - NftListFilters, NftTokenAddrId, NftTransferCommon, NftTransferHistory, - NftTransferHistoryFilters, NftsTransferHistoryList, TransferMeta, UriMeta}; +use crate::nft::nft_structs::{Chain, ContractType, ConvertChain, Nft, NftCommon, NftList, NftListFilters, + NftTokenAddrId, NftTransferCommon, NftTransferHistory, NftTransferHistoryFilters, + NftsTransferHistoryList, TransferMeta, UriMeta}; use crate::nft::storage::{get_offset_limit, NftDetailsJson, NftListStorageOps, NftStorageError, NftTransferHistoryStorageOps, RemoveNftResult, TransferDetailsJson}; use async_trait::async_trait; @@ -11,7 +11,9 @@ use db_common::sqlite::rusqlite::types::{FromSqlError, Type}; use db_common::sqlite::rusqlite::{Connection, Error as SqlError, Result as SqlResult, Row, Statement}; use db_common::sqlite::sql_builder::SqlBuilder; use db_common::sqlite::{query_single_row, string_from_row, SafeTableName, CHECK_TABLE_EXISTS_SQL}; +use db_common::AsyncConnectionCtx; use ethereum_types::Address; +use futures_util::lock::MutexGuard; use mm2_err_handle::prelude::*; use mm2_number::{BigDecimal, BigUint}; use serde_json::Value as Json; @@ -546,12 +548,12 @@ fn is_table_empty(conn: &Connection, safe_table_name: SafeTableName) -> Result { type Error = AsyncConnError; async fn init(&self, chain: &Chain) -> MmResult<(), Self::Error> { let sql_nft_list = create_nft_list_table_sql(chain)?; - self.0 + self.connection .call(move |conn| { conn.execute(&sql_nft_list, []).map(|_| ())?; conn.execute(&create_scanned_nft_blocks_sql()?, []).map(|_| ())?; @@ -563,7 +565,7 @@ impl NftListStorageOps for NftCacheDbSql { async fn is_initialized(&self, chain: &Chain) -> MmResult { let table_name = chain.nft_list_table_name()?; - self.0 + self.connection .call(move |conn| { let nft_list_initialized = query_single_row(conn, CHECK_TABLE_EXISTS_SQL, [table_name.inner()], string_from_row)?; @@ -587,7 +589,7 @@ impl NftListStorageOps for NftCacheDbSql { page_number: Option, filters: Option, ) -> MmResult { - self.0 + self.connection .call(move |conn| { let sql_builder = get_nft_list_builder_preimage(chains, filters)?; let total_count_builder_sql = sql_builder @@ -622,7 +624,7 @@ impl NftListStorageOps for NftCacheDbSql { I: IntoIterator + Send + 'static, I::IntoIter: Send, { - self.0 + self.connection .call(move |conn| { let sql_transaction = conn.transaction()?; @@ -681,7 +683,7 @@ impl NftListStorageOps for NftCacheDbSql { token_id: BigUint, ) -> MmResult, Self::Error> { let table_name = chain.nft_list_table_name()?; - self.0 + self.connection .call(move |conn| { let sql = format!( "SELECT * FROM {} WHERE token_address=?1 AND token_id=?2", @@ -706,7 +708,7 @@ impl NftListStorageOps for NftCacheDbSql { let sql = delete_nft_sql(table_name)?; let params = [token_address, token_id.to_string()]; let scanned_block_params = [chain.to_ticker().to_string(), scanned_block.to_string()]; - self.0 + self.connection .call(move |conn| { let sql_transaction = conn.transaction()?; let rows_num = sql_transaction.execute(&sql, params)?; @@ -736,7 +738,7 @@ impl NftListStorageOps for NftCacheDbSql { table_name.inner() ); let params = [token_address, token_id.to_string()]; - self.0 + self.connection .call(move |conn| { let amount = query_single_row(conn, &sql, params, nft_amount_from_row)?; Ok(amount) @@ -747,7 +749,7 @@ impl NftListStorageOps for NftCacheDbSql { async fn refresh_nft_metadata(&self, chain: &Chain, nft: Nft) -> MmResult<(), Self::Error> { let sql = refresh_nft_metadata_sql(chain)?; - self.0 + self.connection .call(move |conn| { let sql_transaction = conn.transaction()?; let params = [ @@ -785,7 +787,7 @@ impl NftListStorageOps for NftCacheDbSql { async fn get_last_block_number(&self, chain: &Chain) -> MmResult, Self::Error> { let table_name = chain.nft_list_table_name()?; let sql = select_last_block_number_sql(table_name)?; - self.0 + self.connection .call(move |conn| { let block_number = query_single_row(conn, &sql, [], block_number_from_row)?; Ok(block_number) @@ -799,7 +801,7 @@ impl NftListStorageOps for NftCacheDbSql { async fn get_last_scanned_block(&self, chain: &Chain) -> MmResult, Self::Error> { let sql = select_last_scanned_block_sql()?; let params = [chain.to_ticker()]; - self.0 + self.connection .call(move |conn| { let block_number = query_single_row(conn, &sql, params, block_number_from_row)?; Ok(block_number) @@ -817,7 +819,7 @@ impl NftListStorageOps for NftCacheDbSql { table_name.inner() ); let scanned_block_params = [chain.to_ticker().to_string(), scanned_block.to_string()]; - self.0 + self.connection .call(move |conn| { let sql_transaction = conn.transaction()?; let params = [ @@ -841,7 +843,7 @@ impl NftListStorageOps for NftCacheDbSql { table_name.inner() ); let scanned_block_params = [chain.to_ticker().to_string(), nft.block_number.to_string()]; - self.0 + self.connection .call(move |conn| { let sql_transaction = conn.transaction()?; let params = [ @@ -860,7 +862,7 @@ impl NftListStorageOps for NftCacheDbSql { } async fn get_nfts_by_token_address(&self, chain: Chain, token_address: String) -> MmResult, Self::Error> { - self.0 + self.connection .call(move |conn| { let table_name = chain.nft_list_table_name()?; let mut stmt = get_nfts_by_token_address_statement(conn, table_name)?; @@ -884,7 +886,7 @@ impl NftListStorageOps for NftCacheDbSql { "UPDATE {} SET possible_spam = ?1 WHERE token_address = ?2;", table_name.inner() ); - self.0 + self.connection .call(move |conn| { let sql_transaction = conn.transaction()?; let params = [Some(i32::from(possible_spam).to_string()), Some(token_address.clone())]; @@ -898,7 +900,7 @@ impl NftListStorageOps for NftCacheDbSql { async fn get_animation_external_domains(&self, chain: &Chain) -> MmResult, Self::Error> { let safe_table_name = chain.nft_list_table_name()?; - self.0 + self.connection .call(move |conn| { let table_name = safe_table_name.inner(); let sql_query = format!( @@ -928,7 +930,7 @@ impl NftListStorageOps for NftCacheDbSql { OR image_domain = ?2 OR animation_domain = ?2 OR external_domain = ?2;", table_name.inner() ); - self.0 + self.connection .call(move |conn| { let sql_transaction = conn.transaction()?; let params = [Some(i32::from(possible_phishing).to_string()), Some(domain)]; @@ -946,7 +948,7 @@ impl NftListStorageOps for NftCacheDbSql { let table_scanned_blocks = scanned_nft_blocks_table_name()?; let sql_scanned_block = format!("DELETE from {} where chain=?1", table_scanned_blocks.inner()); let scanned_block_param = [chain.to_ticker()]; - self.0 + self.connection .call(move |conn| { let sql_transaction = conn.transaction()?; sql_transaction.execute(&sql_nft, [])?; @@ -963,7 +965,7 @@ impl NftListStorageOps for NftCacheDbSql { } async fn clear_all_nft_data(&self) -> MmResult<(), Self::Error> { - self.0 + self.connection .call(move |conn| { let sql_transaction = conn.transaction()?; for chain in Chain::variant_list().into_iter() { @@ -981,12 +983,12 @@ impl NftListStorageOps for NftCacheDbSql { } #[async_trait] -impl NftTransferHistoryStorageOps for NftCacheDbSql { +impl NftTransferHistoryStorageOps for MutexGuard<'_, AsyncConnectionCtx> { type Error = AsyncConnError; async fn init(&self, chain: &Chain) -> MmResult<(), Self::Error> { let sql_transfer_history = create_transfer_history_table_sql(chain)?; - self.0 + self.connection .call(move |conn| { conn.execute(&sql_transfer_history, []).map(|_| ())?; Ok(()) @@ -997,7 +999,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { async fn is_initialized(&self, chain: &Chain) -> MmResult { let table_name = chain.transfer_history_table_name()?; - self.0 + self.connection .call(move |conn| { let nft_list_initialized = query_single_row(conn, CHECK_TABLE_EXISTS_SQL, [table_name.inner()], string_from_row)?; @@ -1015,7 +1017,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { page_number: Option, filters: Option, ) -> MmResult { - self.0 + self.connection .call(move |conn| { let sql_builder = get_nft_transfer_builder_preimage(chains, filters)?; let total_count_builder_sql = sql_builder @@ -1050,7 +1052,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { I: IntoIterator + Send + 'static, I::IntoIter: Send, { - self.0 + self.connection .call(move |conn| { let sql_transaction = conn.transaction()?; for transfer in transfers { @@ -1099,7 +1101,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { async fn get_last_block_number(&self, chain: &Chain) -> MmResult, Self::Error> { let table_name = chain.transfer_history_table_name()?; let sql = select_last_block_number_sql(table_name)?; - self.0 + self.connection .call(move |conn| { let block_number = query_single_row(conn, &sql, [], block_number_from_row)?; Ok(block_number) @@ -1115,7 +1117,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { chain: Chain, from_block: u64, ) -> MmResult, Self::Error> { - self.0 + self.connection .call(move |conn| { let mut stmt = get_transfers_from_block_statement(conn, &chain)?; let transfers = stmt @@ -1133,7 +1135,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { token_address: String, token_id: BigUint, ) -> MmResult, Self::Error> { - self.0 + self.connection .call(move |conn| { let mut stmt = get_transfers_by_token_addr_id_statement(conn, chain)?; let transfers = stmt @@ -1156,7 +1158,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { "SELECT * FROM {} WHERE transaction_hash=?1 AND log_index = ?2", table_name.inner() ); - self.0 + self.connection .call(move |conn| { let transfer = query_single_row( conn, @@ -1193,7 +1195,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { Some(transfer_meta.token_address), Some(transfer_meta.token_id.to_string()), ]; - self.0 + self.connection .call(move |conn| { let sql_transaction = conn.transaction()?; sql_transaction.execute(&sql, params)?; @@ -1208,7 +1210,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { } async fn get_transfers_with_empty_meta(&self, chain: Chain) -> MmResult, Self::Error> { - self.0 + self.connection .call(move |conn| { let sql_builder = get_transfers_with_empty_meta_builder(conn, &chain)?; let token_addr_id_pair = sql_builder.query(token_address_id_from_row)?; @@ -1223,7 +1225,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { chain: Chain, token_address: String, ) -> MmResult, Self::Error> { - self.0 + self.connection .call(move |conn| { let table_name = chain.transfer_history_table_name()?; let mut stmt = get_nfts_by_token_address_statement(conn, table_name)?; @@ -1247,7 +1249,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { "UPDATE {} SET possible_spam = ?1 WHERE token_address = ?2;", table_name.inner() ); - self.0 + self.connection .call(move |conn| { let sql_transaction = conn.transaction()?; let params = [Some(i32::from(possible_spam).to_string()), Some(token_address.clone())]; @@ -1260,7 +1262,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { } async fn get_token_addresses(&self, chain: Chain) -> MmResult, Self::Error> { - self.0 + self.connection .call(move |conn| { let table_name = chain.transfer_history_table_name()?; let mut stmt = get_token_addresses_statement(conn, table_name)?; @@ -1275,7 +1277,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { async fn get_domains(&self, chain: &Chain) -> MmResult, Self::Error> { let safe_table_name = chain.transfer_history_table_name()?; - self.0 + self.connection .call(move |conn| { let table_name = safe_table_name.inner(); let sql_query = format!( @@ -1304,7 +1306,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { "UPDATE {} SET possible_phishing = ?1 WHERE token_domain = ?2 OR image_domain = ?2;", safe_table_name.inner() ); - self.0 + self.connection .call(move |conn| { let sql_transaction = conn.transaction()?; let params = [Some(i32::from(possible_phishing).to_string()), Some(domain)]; @@ -1318,7 +1320,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { async fn clear_history_data(&self, chain: &Chain) -> MmResult<(), Self::Error> { let table_name = chain.transfer_history_table_name()?; - self.0 + self.connection .call(move |conn| { let sql_transaction = conn.transaction()?; sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_name.inner()), [])?; @@ -1330,7 +1332,7 @@ impl NftTransferHistoryStorageOps for NftCacheDbSql { } async fn clear_all_history_data(&self) -> MmResult<(), Self::Error> { - self.0 + self.connection .call(move |conn| { let sql_transaction = conn.transaction()?; for chain in Chain::variant_list().into_iter() { diff --git a/mm2src/db_common/src/lib.rs b/mm2src/db_common/src/lib.rs index c1806e3b97..d630d30ecd 100644 --- a/mm2src/db_common/src/lib.rs +++ b/mm2src/db_common/src/lib.rs @@ -22,3 +22,19 @@ pub mod sql_build { pub use crate::sql_update::SqlUpdate; pub use crate::sql_value::{FromQuoted, SqlValue, SqlValueOptional}; } + +#[cfg(not(target_arch = "wasm32"))] +use async_sql_conn::AsyncConnection; +#[cfg(not(target_arch = "wasm32"))] use rusqlite::Connection; + +#[cfg(not(target_arch = "wasm32"))] +pub struct AsyncConnectionCtx { + pub db_id: String, + pub connection: AsyncConnection, +} + +#[cfg(not(target_arch = "wasm32"))] +pub struct SyncConnectionCtx { + pub db_id: String, + pub connection: Connection, +} diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index b6ea8fb48f..2aaf7f32c9 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -26,6 +26,7 @@ cfg_wasm32! { } cfg_native! { +use db_common::AsyncConnectionCtx; use db_common::async_sql_conn::AsyncConnection; use db_common::sqlite::rusqlite::Connection; use futures::lock::Mutex as AsyncMutex; @@ -42,8 +43,6 @@ const EXPORT_METRICS_INTERVAL: f64 = 5. * 60.; pub const ASYNC_SQLITE_DB_ID: &str = "KOMODEFI.db"; pub const SYNC_SQLITE_DB_ID: &str = "MM2.db"; -#[cfg(not(target_arch = "wasm32"))] -pub type AsyncSqliteConnectionArc = Arc>; #[cfg(not(target_arch = "wasm32"))] pub type SyncSqliteConnectionArc = Arc>; @@ -131,7 +130,7 @@ pub struct MmCtx { pub shared_sqlite_conn: Constructible>>, /// asynchronous handle for rusqlite connection. #[cfg(not(target_arch = "wasm32"))] - pub async_sqlite_connection: Constructible>>>, + pub async_sqlite_connection: Constructible>>, pub mm_version: String, pub datetime: String, pub mm_init_ctx: Mutex>>, @@ -387,9 +386,12 @@ impl MmCtx { log_sqlite_file_open_attempt(&sqlite_file_path); let async_conn = try_s!(AsyncConnection::open(sqlite_file_path).await); - let mut store = HashMap::new(); - store.insert(db_id, Arc::new(AsyncMutex::new(async_conn))); - try_s!(self.async_sqlite_connection.pin(Arc::new(AsyncMutex::new(store)))); + try_s!(self + .async_sqlite_connection + .pin(Arc::new(AsyncMutex::new(AsyncConnectionCtx { + connection: async_conn, + db_id, + })))); Ok(()) } diff --git a/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs b/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs index bf604ca5a0..2dbcc4cfd4 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs @@ -117,8 +117,10 @@ pub(crate) struct SqliteAccountStorage { impl SqliteAccountStorage { pub(crate) fn new(ctx: &MmArc) -> AccountStorageResult { // TODO db_id - let conn = ctx.sqlite_connection_res(None).map_to_mm(|_| { - AccountStorageError::Internal("'MmCtx::sqlite_connection' is not found or initialized".to_owned()) + let conn = ctx.sqlite_conn_opt(None).ok_or_else(|| { + MmError::new(AccountStorageError::Internal( + "'MmCtx::sqlite_connection' is not found or initialized".to_owned(), + )) })?; Ok(SqliteAccountStorage { conn }) diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index f63e6edd99..fdffe2194f 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -4974,7 +4974,7 @@ pub async fn order_status(ctx: MmArc, req: Json) -> Result>, St .map_err(|e| ERRL!("{}", e)); } - let storage = MyOrdersStorage::new(ctx.clone(), db_id.as_deref()); + let storage = MyOrdersStorage::new(ctx.clone(), db_id.clone()); if let (Ok(order), Ok(cancellation_reason)) = ( storage.load_order_from_history(req.uuid).await, &storage.select_order_status(req.uuid).await, diff --git a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs index 46b10c2afb..34653f3088 100644 --- a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs +++ b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs @@ -249,12 +249,9 @@ pub async fn my_balance(ctx: MmArc, req: Json) -> Result>, Stri #[cfg(not(target_arch = "wasm32"))] async fn close_async_connection(ctx: &MmArc) { if let Some(connections) = ctx.async_sqlite_connection.as_option() { - let connections = connections.lock().await; - for connection in connections.values() { - let mut conn = connection.lock().await; - if let Err(e) = conn.close().await { - error!("Error stopping AsyncConnection: {}", e); - } + let mut conn = connections.lock().await; + if let Err(e) = conn.connection.close().await { + error!("Error stopping AsyncConnection: {}", e); } } } diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index e8aeda02ef..32ba3ed79e 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -28,6 +28,7 @@ use std::num::NonZeroUsize; use std::process::Child; use std::sync::Mutex; use uuid::Uuid; +use db_common::AsyncConnectionCtx; cfg_native! { use common::block_on; @@ -416,10 +417,10 @@ impl Mm2TestConfForSwap { Mm2InitPrivKeyPolicy::Iguana => { let bob_passphrase = crate::get_passphrase!(".env.seed", "BOB_PASSPHRASE").unwrap(); Mm2TestConf::seednode(&bob_passphrase, coins) - }, + } Mm2InitPrivKeyPolicy::GlobalHDAccount => { Mm2TestConf::seednode_with_hd_account(Self::BOB_HD_PASSPHRASE, coins) - }, + } } } @@ -428,10 +429,10 @@ impl Mm2TestConfForSwap { Mm2InitPrivKeyPolicy::Iguana => { let alice_passphrase = crate::get_passphrase!(".env.client", "ALICE_PASSPHRASE").unwrap(); Mm2TestConf::light_node(&alice_passphrase, coins, &[bob_ip]) - }, + } Mm2InitPrivKeyPolicy::GlobalHDAccount => { Mm2TestConf::light_node_with_hd_account(Self::ALICE_HD_PASSPHRASE, coins, &[bob_ip]) - }, + } } } } @@ -1113,9 +1114,11 @@ pub async fn mm_ctx_with_custom_async_db() -> MmArc { let ctx = MmCtxBuilder::new().into_mm_arc(); let connection = AsyncConnection::open_in_memory().await.unwrap(); - let mut store = HashMap::new(); - store.insert(ctx.rmd160_hex(), Arc::new(AsyncMutex::new(connection))); - let _ = ctx.async_sqlite_connection.pin(Arc::new(AsyncMutex::new(store))); + let connection = AsyncConnectionCtx { + connection: connection, + db_id: ctx.rmd160_hex(), + }; + let _ = ctx.async_sqlite_connection.pin(Arc::new(AsyncMutex::new(connection))); ctx } @@ -1137,7 +1140,7 @@ impl RaiiKill { _ => { self.running = false; false - }, + } } } } @@ -1282,7 +1285,7 @@ impl MarketMakerIt { let dir = folder.join("DB"); conf["dbdir"] = dir.to_str().unwrap().into(); dir - }, + } }; try_s!(fs::create_dir(&folder)); @@ -1297,7 +1300,7 @@ impl MarketMakerIt { let path = folder.join("mm2.log"); conf["log"] = path.to_str().unwrap().into(); path - }, + } }; // If `local` is provided @@ -1389,8 +1392,8 @@ impl MarketMakerIt { /// Busy-wait on the log until the `pred` returns `true` or `timeout_sec` expires. #[cfg(not(target_arch = "wasm32"))] pub async fn wait_for_log(&mut self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { let start = now_float(); let ms = 50.min((timeout_sec * 1000.) as u64 / 20 + 10); @@ -1416,8 +1419,8 @@ impl MarketMakerIt { /// after process is stopped #[cfg(not(target_arch = "wasm32"))] pub async fn wait_for_log_after_stop(&self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { use common::try_or_ready_err; @@ -1430,19 +1433,19 @@ impl MarketMakerIt { } Retry(()) }) - .repeat_every_ms(ms) - .with_timeout_secs(timeout_sec) - .await - .map_err(|e| ERRL!("{:?}", e)) - // Convert `Result, String>` to `Result<(), String>` - .flatten() + .repeat_every_ms(ms) + .with_timeout_secs(timeout_sec) + .await + .map_err(|e| ERRL!("{:?}", e)) + // Convert `Result, String>` to `Result<(), String>` + .flatten() } /// Busy-wait on the instance in-memory log until the `pred` returns `true` or `timeout_sec` expires. #[cfg(target_arch = "wasm32")] pub async fn wait_for_log(&mut self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { wait_for_log(&self.ctx, timeout_sec, pred).await } @@ -1466,7 +1469,7 @@ impl MarketMakerIt { let body_str = json::to_string(&body).unwrap_or_else(|_| panic!("Response {:?} is not a valid JSON", body)); Ok((status_code, body_str, HeaderMap::new())) - }, + } Err(e) => Ok((StatusCode::INTERNAL_SERVER_ERROR, e, HeaderMap::new())), } } @@ -1532,7 +1535,7 @@ impl MarketMakerIt { } else { return ERR!("{}", err); } - }, + } }; if status != StatusCode::OK { return ERR!("MM didn't accept a stop. body: {}", body); @@ -1555,10 +1558,10 @@ impl MarketMakerIt { } Retry(()) }) - .repeat_every_secs(0.05) - .with_timeout_ms(timeout_ms) - .await - .map_err(|e| ERRL!("{:?}", e)) + .repeat_every_secs(0.05) + .with_timeout_ms(timeout_ms) + .await + .map_err(|e| ERRL!("{:?}", e)) } /// Currently, we cannot wait for the `Completed IAmrelay handling for peer` log entry on WASM node, @@ -1663,8 +1666,8 @@ impl Drop for MarketMakerIt { /// Busy-wait on the log until the `pred` returns `true` or `timeout_sec` expires. pub async fn wait_for_log(ctx: &MmArc, timeout_sec: f64, pred: F) -> Result<(), String> -where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { let start = now_float(); let ms = 50.min((timeout_sec * 1000.) as u64 / 20 + 10); @@ -1764,7 +1767,7 @@ pub fn mm_spat() -> (&'static str, MarketMakerIt, RaiiDump, RaiiDump) { "pass".into(), None, ) - .unwrap(); + .unwrap(); let (dump_log, dump_dashboard) = mm_dump(&mm.log_path); (passphrase, mm, dump_log, dump_dashboard) } @@ -1841,10 +1844,10 @@ pub fn from_env_file(env: Vec) -> (Option, Option) { match cap.get(1) { Some(name) if name.as_bytes() == b"PASSPHRASE" => { passphrase = cap.get(2).map(|v| String::from_utf8(v.as_bytes().into()).unwrap()) - }, + } Some(name) if name.as_bytes() == b"USERPASS" => { userpass = cap.get(2).map(|v| String::from_utf8(v.as_bytes().into()).unwrap()) - }, + } _ => (), } } @@ -2198,7 +2201,7 @@ pub async fn init_lightning_status(mm: &MarketMakerIt, task_id: u64) -> Json { pub fn new_mm2_temp_folder_path(ip: Option) -> PathBuf { let now = common::now_ms(); #[allow(deprecated)] - let now = Local.timestamp((now / 1000) as i64, (now % 1000) as u32 * 1_000_000); + let now = Local.timestamp((now / 1000) as i64, (now % 1000) as u32 * 1_000_000); let folder = match ip { Some(ip) => format!("mm2_{}_{}", now.format("%Y-%m-%d_%H-%M-%S-%3f"), ip), None => format!("mm2_{}", now.format("%Y-%m-%d_%H-%M-%S-%3f")), @@ -3302,7 +3305,7 @@ pub async fn wait_for_swaps_finish_and_check_status( BigDecimal::try_from(volume).unwrap(), BigDecimal::try_from(volume * maker_price).unwrap(), ) - .await; + .await; log!("Checking maker status.."); check_my_swap_status( @@ -3311,7 +3314,7 @@ pub async fn wait_for_swaps_finish_and_check_status( BigDecimal::try_from(volume).unwrap(), BigDecimal::try_from(volume * maker_price).unwrap(), ) - .await; + .await; } } @@ -3336,8 +3339,8 @@ pub async fn test_qrc20_history_impl(local_start: Option) { "pass".into(), local_start, ) - .await - .unwrap(); + .await + .unwrap(); let (_dump_log, _dump_dashboard) = mm.mm_dump(); #[cfg(not(target_arch = "wasm32"))] From 98cb9932ac23303e6b09df3a8f6afa0ff263a92f Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 30 Apr 2024 20:31:21 +0100 Subject: [PATCH 077/186] wip AccountContext --- .../account/storage/account_storage_tests.rs | 18 ++++---- .../src/account/storage/mod.rs | 22 +++++++--- .../src/account/storage/sqlite_storage.rs | 27 ++++++------ .../src/account/storage/wasm_storage.rs | 12 +++--- mm2src/mm2_gui_storage/src/context.rs | 23 ++++++++-- mm2src/mm2_gui_storage/src/rpc_commands.rs | 43 ++++++++++++------- mm2src/mm2_test_helpers/src/for_tests.rs | 1 - 7 files changed, 89 insertions(+), 57 deletions(-) diff --git a/mm2src/mm2_gui_storage/src/account/storage/account_storage_tests.rs b/mm2src/mm2_gui_storage/src/account/storage/account_storage_tests.rs index b44e83190a..4612a83314 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/account_storage_tests.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/account_storage_tests.rs @@ -70,7 +70,7 @@ async fn fill_storage(storage: &dyn AccountStorage, accounts: Vec) async fn test_init_collection_impl() { let ctx = mm_ctx_with_custom_db(); - let storage = AccountStorageBuilder::new(&ctx).build().unwrap(); + let storage = AccountStorageBuilder::new(&ctx, None).build().unwrap(); storage.init().await.unwrap(); // repetitive init must not fail @@ -79,7 +79,7 @@ async fn test_init_collection_impl() { async fn test_upload_account_impl() { let ctx = mm_ctx_with_custom_db(); - let storage = AccountStorageBuilder::new(&ctx).build().unwrap(); + let storage = AccountStorageBuilder::new(&ctx, None).build().unwrap(); storage.init().await.unwrap(); for account in accounts_for_test() { @@ -99,7 +99,7 @@ async fn test_upload_account_impl() { async fn test_enable_account_impl() { let ctx = mm_ctx_with_custom_db(); - let storage = AccountStorageBuilder::new(&ctx).build().unwrap(); + let storage = AccountStorageBuilder::new(&ctx, None).build().unwrap(); storage.init().await.unwrap(); let error = storage @@ -151,7 +151,7 @@ async fn test_enable_account_impl() { async fn test_set_name_desc_balance_impl() { let ctx = mm_ctx_with_custom_db(); - let storage = AccountStorageBuilder::new(&ctx).build().unwrap(); + let storage = AccountStorageBuilder::new(&ctx, None).build().unwrap(); storage.init().await.unwrap(); let accounts = accounts_for_test(); @@ -195,7 +195,7 @@ async fn test_set_name_desc_balance_impl() { async fn test_activate_deactivate_coins_impl() { let ctx = mm_ctx_with_custom_db(); - let storage = AccountStorageBuilder::new(&ctx).build().unwrap(); + let storage = AccountStorageBuilder::new(&ctx, None).build().unwrap(); storage.init().await.unwrap(); let accounts = accounts_for_test(); @@ -287,7 +287,7 @@ async fn test_activate_deactivate_coins_impl() { async fn test_load_enabled_account_with_coins_impl() { let ctx = mm_ctx_with_custom_db(); - let storage = AccountStorageBuilder::new(&ctx).build().unwrap(); + let storage = AccountStorageBuilder::new(&ctx, None).build().unwrap(); storage.init().await.unwrap(); let accounts = accounts_for_test(); @@ -357,7 +357,7 @@ async fn test_load_enabled_account_with_coins_impl() { async fn test_load_accounts_with_enabled_flag_impl() { let ctx = mm_ctx_with_custom_db(); - let storage = AccountStorageBuilder::new(&ctx).build().unwrap(); + let storage = AccountStorageBuilder::new(&ctx, None).build().unwrap(); storage.init().await.unwrap(); let accounts = accounts_for_test(); @@ -406,7 +406,7 @@ async fn test_load_accounts_with_enabled_flag_impl() { async fn test_delete_account_impl() { let ctx = mm_ctx_with_custom_db(); - let storage = AccountStorageBuilder::new(&ctx).build().unwrap(); + let storage = AccountStorageBuilder::new(&ctx, None).build().unwrap(); storage.init().await.unwrap(); let accounts = accounts_for_test(); @@ -464,7 +464,7 @@ async fn test_delete_account_impl() { async fn test_delete_account_clears_coins_impl() { let ctx = mm_ctx_with_custom_db(); - let storage = AccountStorageBuilder::new(&ctx).build().unwrap(); + let storage = AccountStorageBuilder::new(&ctx, None).build().unwrap(); storage.init().await.unwrap(); let accounts = accounts_for_test(); diff --git a/mm2src/mm2_gui_storage/src/account/storage/mod.rs b/mm2src/mm2_gui_storage/src/account/storage/mod.rs index a2a186d490..beb47b62a4 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/mod.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/mod.rs @@ -10,8 +10,10 @@ use std::error::Error as StdError; #[cfg(any(test, target_arch = "wasm32"))] mod account_storage_tests; -#[cfg(not(target_arch = "wasm32"))] mod sqlite_storage; -#[cfg(target_arch = "wasm32")] mod wasm_storage; +#[cfg(not(target_arch = "wasm32"))] +mod sqlite_storage; +#[cfg(target_arch = "wasm32")] +mod wasm_storage; const DEFAULT_ACCOUNT_IDX: u32 = 0; const DEFAULT_DEVICE_PUB: HwPubkey = HwPubkey::const_default(); @@ -87,7 +89,7 @@ impl AccountId { account_type, account_idx, device_pubkey ); MmError::err(AccountStorageError::ErrorDeserializing(error)) - }, + } } } } @@ -114,7 +116,7 @@ impl EnabledAccountId { (_, _) => { let error = format!("An invalid AccountId tuple: {:?}/{:?}", account_type, account_idx); MmError::err(AccountStorageError::ErrorDeserializing(error)) - }, + } } } } @@ -123,19 +125,25 @@ impl EnabledAccountId { /// The implementation depends on the target architecture. pub(crate) struct AccountStorageBuilder<'a> { ctx: &'a MmArc, + db_id: Option<&'a str>, } impl<'a> AccountStorageBuilder<'a> { - pub fn new(ctx: &'a MmArc) -> Self { AccountStorageBuilder { ctx } } + pub fn new(ctx: &'a MmArc, db_id: Option<&'a str>) -> Self { + AccountStorageBuilder { + ctx, + db_id, + } + } #[cfg(not(target_arch = "wasm32"))] pub fn build(self) -> AccountStorageResult { - sqlite_storage::SqliteAccountStorage::new(self.ctx).map(|storage| -> AccountStorageBoxed { Box::new(storage) }) + sqlite_storage::SqliteAccountStorage::new(self.ctx, self.db_id).map(|storage| -> AccountStorageBoxed { Box::new(storage) }) } #[cfg(target_arch = "wasm32")] pub fn build(self) -> AccountStorageResult { - Ok(Box::new(wasm_storage::WasmAccountStorage::new(self.ctx))) + Ok(Box::new(wasm_storage::WasmAccountStorage::new(self.ctx, self.db_id))) } } diff --git a/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs b/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs index 2dbcc4cfd4..803063d7b2 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs @@ -69,7 +69,7 @@ impl From for AccountStorageError { | SqlError::InvalidColumnType(_, _, _) => AccountStorageError::ErrorDeserializing(error), SqlError::Utf8Error(_) | SqlError::NulError(_) | SqlError::ToSqlConversionFailure(_) => { AccountStorageError::ErrorSerializing(error) - }, + } _ => AccountStorageError::Internal(error), } } @@ -115,9 +115,8 @@ pub(crate) struct SqliteAccountStorage { } impl SqliteAccountStorage { - pub(crate) fn new(ctx: &MmArc) -> AccountStorageResult { - // TODO db_id - let conn = ctx.sqlite_conn_opt(None).ok_or_else(|| { + pub(crate) fn new(ctx: &MmArc, db_id: Option<&str>) -> AccountStorageResult { + let conn = ctx.sqlite_conn_opt(db_id).ok_or_else(|| { MmError::new(AccountStorageError::Internal( "'MmCtx::sqlite_connection' is not found or initialized".to_owned(), )) @@ -164,7 +163,7 @@ impl SqliteAccountStorage { account_coins_table::DEVICE_PUBKEY, SqlType::Varchar(DEVICE_PUBKEY_MAX_LENGTH), ) - .not_null(), + .not_null(), ) .column(SqlColumn::new(account_coins_table::COIN, SqlType::Varchar(MAX_TICKER_LENGTH)).not_null()) .constraint( @@ -173,8 +172,8 @@ impl SqliteAccountStorage { account_coins_table::ACCOUNT_IDX => account_table::ACCOUNT_IDX, account_coins_table::DEVICE_PUBKEY => account_table::DEVICE_PUBKEY ])? - // Delete all coins from `account_coins_table` if the corresponding `account_table` record has been deleted. - .on_event(foreign_key::Event::OnDelete, foreign_key::Action::Cascade), + // Delete all coins from `account_coins_table` if the corresponding `account_table` record has been deleted. + .on_event(foreign_key::Event::OnDelete, foreign_key::Action::Cascade), ) .constraint(Unique::new(account_coins_table::ACCOUNT_ID_COIN_CONSTRAINT, [ account_coins_table::ACCOUNT_TYPE, @@ -197,7 +196,7 @@ impl SqliteAccountStorage { enabled_account_table::DEVICE_PUBKEY, SqlType::Varchar(DEVICE_PUBKEY_MAX_LENGTH), ) - .not_null(), + .not_null(), ) .constraint( ForeignKey::new(foreign_key::ParentTable(account_table::TABLE_NAME), foreign_columns![ @@ -205,8 +204,8 @@ impl SqliteAccountStorage { enabled_account_table::ACCOUNT_IDX => account_table::ACCOUNT_IDX, enabled_account_table::DEVICE_PUBKEY => account_table::DEVICE_PUBKEY, ])? - // Delete an enabled account from `enabled_account_table` if the corresponding `account_table` record has been deleted. - .on_event(foreign_key::Event::OnDelete, foreign_key::Action::Cascade), + // Delete an enabled account from `enabled_account_table` if the corresponding `account_table` record has been deleted. + .on_event(foreign_key::Event::OnDelete, foreign_key::Action::Cascade), ); create_sql.create().map_to_mm(AccountStorageError::from) } @@ -348,8 +347,8 @@ impl SqliteAccountStorage { /// Updates the given `account_id` account by applying the `update_cb` callback to an `SqlUpdate` SQL builder. fn update_account(conn: &Connection, account_id: AccountId, update_cb: F) -> AccountStorageResult<()> - where - F: FnOnce(&mut SqlUpdate) -> SqlResult<()>, + where + F: FnOnce(&mut SqlUpdate) -> SqlResult<()>, { let mut sql_update = SqlUpdate::new(conn, account_table::TABLE_NAME)?; update_cb(&mut sql_update)?; @@ -594,8 +593,8 @@ fn bigdecimal_from_row(row: &Row<'_>, idx: usize) -> Result(result: SqlResult, on_constraint_error: F) -> AccountStorageResult -where - F: FnOnce() -> AccountStorageError, + where + F: FnOnce() -> AccountStorageError, { result.map_to_mm(|e| { if is_constraint_error(&e) { diff --git a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs index a15a25c7fa..d2dac9d652 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs @@ -32,10 +32,10 @@ impl From for AccountStorageError { DbTransactionError::ErrorSerializingItem(_) => AccountStorageError::ErrorSerializing(desc), DbTransactionError::ErrorGettingItems(_) | DbTransactionError::ErrorCountingItems(_) => { AccountStorageError::ErrorLoading(desc) - }, + } DbTransactionError::ErrorUploadingItem(_) | DbTransactionError::ErrorDeletingItems(_) => { AccountStorageError::ErrorSaving(desc) - }, + } } } } @@ -61,7 +61,7 @@ pub(crate) struct WasmAccountStorage { } impl WasmAccountStorage { - pub fn new(ctx: &MmArc) -> Self { + pub fn new(ctx: &MmArc, _db_id: Option<&str>) -> Self { WasmAccountStorage { account_db: ConstructibleDb::new_shared_db(ctx).into_shared(), } @@ -166,8 +166,8 @@ impl WasmAccountStorage { /// Loads an account by `AccountId`, applies the given `f` function to it, /// and uploads changes to the storage. async fn update_account(&self, account_id: AccountId, f: F) -> AccountStorageResult<()> - where - F: FnOnce(&mut AccountTable), + where + F: FnOnce(&mut AccountTable), { let locked_db = self.lock_db_mutex().await?; let transaction = locked_db.inner.transaction().await?; @@ -331,7 +331,7 @@ impl AccountStorage for WasmAccountStorage { account.activated_coins.remove(ticker); } }) - .await + .await } } diff --git a/mm2src/mm2_gui_storage/src/context.rs b/mm2src/mm2_gui_storage/src/context.rs index 4a15f48541..00855c269e 100644 --- a/mm2src/mm2_gui_storage/src/context.rs +++ b/mm2src/mm2_gui_storage/src/context.rs @@ -4,16 +4,31 @@ use std::sync::Arc; pub(crate) struct AccountContext { storage: AccountStorageBoxed, + db_id: Option, } impl AccountContext { /// Obtains a reference to this crate context, creating it if necessary. - pub(crate) fn from_ctx(ctx: &MmArc) -> Result, String> { - from_ctx(&ctx.account_ctx, move || { + pub(crate) fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { + let account_context = from_ctx(&ctx.account_ctx, move || { Ok(AccountContext { - storage: AccountStorageBuilder::new(ctx).build().map_err(|e| e.to_string())?, + storage: AccountStorageBuilder::new(ctx, db_id).build().map_err(|e| e.to_string())?, + db_id: db_id.map(|e| e.to_string()), }) - }) + })?; + + if account_context.db_id.as_deref() != db_id { + let mut ctx_field = ctx.account_ctx.lock().unwrap(); + let account_context = Arc::new(AccountContext { + storage: AccountStorageBuilder::new(ctx, db_id).build().map_err(|e| e.to_string())?, + db_id: db_id.map(|e| e.to_string()), + }); + *ctx_field = Some(Arc::clone(&account_context) as Arc); + + return Ok(account_context); + }; + + Ok(account_context) } /// Initializes the storage and returns a reference to it. diff --git a/mm2src/mm2_gui_storage/src/rpc_commands.rs b/mm2src/mm2_gui_storage/src/rpc_commands.rs index 2c3363443c..f7632c4dc5 100644 --- a/mm2src/mm2_gui_storage/src/rpc_commands.rs +++ b/mm2src/mm2_gui_storage/src/rpc_commands.rs @@ -42,10 +42,10 @@ impl From for AccountRpcError { AccountStorageError::AccountExistsAlready(account_id) => AccountRpcError::AccountExistsAlready(account_id), AccountStorageError::ErrorDeserializing(e) | AccountStorageError::ErrorLoading(e) => { AccountRpcError::ErrorLoadingAccount(e) - }, + } AccountStorageError::ErrorSaving(e) | AccountStorageError::ErrorSerializing(e) => { AccountRpcError::ErrorSavingAccount(e) - }, + } AccountStorageError::Internal(internal) => AccountRpcError::Internal(internal), } } @@ -78,8 +78,8 @@ pub struct NewAccount { } impl From> for AccountInfo -where - AccountId: From, + where + AccountId: From, { fn from(orig: NewAccount) -> Self { AccountInfo { @@ -172,7 +172,8 @@ pub struct SetBalanceRequest { /// /// This RPC affects the storage **only**. It doesn't affect MarketMaker. pub async fn enable_account(ctx: MmArc, req: EnableAccountRequest) -> MmResult { - let account_ctx = AccountContext::from_ctx(&ctx).map_to_mm(AccountRpcError::Internal)?; + // TODO db_id + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; let account_id = match req.policy { EnableAccountPolicy::Existing(account_id) => account_id, EnableAccountPolicy::New(new_account) => { @@ -183,7 +184,7 @@ pub async fn enable_account(ctx: MmArc, req: EnableAccountRequest) -> MmResult MmResult MmResult { validate_new_account(&req.account)?; - let account_ctx = AccountContext::from_ctx(&ctx).map_to_mm(AccountRpcError::Internal)?; + // TODO db_id + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; account_ctx .storage() .await? @@ -213,7 +215,8 @@ pub async fn add_account(ctx: MmArc, req: AddAccountRequest) -> MmResult MmResult { - let account_ctx = AccountContext::from_ctx(&ctx).map_to_mm(AccountRpcError::Internal)?; + // TODO db_id + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; account_ctx.storage().await?.delete_account(req.account_id).await?; Ok(SuccessResponse::new()) } @@ -228,7 +231,8 @@ pub async fn get_accounts( ctx: MmArc, _req: GetAccountsRequest, ) -> MmResult, AccountRpcError> { - let account_ctx = AccountContext::from_ctx(&ctx).map_to_mm(AccountRpcError::Internal)?; + // TODO db_id + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; let accounts = account_ctx .storage() .await? @@ -249,7 +253,8 @@ pub async fn get_account_coins( ctx: MmArc, req: GetAccountCoinsRequest, ) -> MmResult { - let account_ctx = AccountContext::from_ctx(&ctx).map_to_mm(AccountRpcError::Internal)?; + // TODO db_id + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; let coins = account_ctx .storage() .await? @@ -271,7 +276,8 @@ pub async fn get_enabled_account( ctx: MmArc, _req: GetEnabledAccountRequest, ) -> MmResult { - let account_ctx = AccountContext::from_ctx(&ctx).map_to_mm(AccountRpcError::Internal)?; + // TODO db_id + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; let account = account_ctx.storage().await?.load_enabled_account_with_coins().await?; Ok(account) } @@ -279,7 +285,8 @@ pub async fn get_enabled_account( /// Sets the account name. pub async fn set_account_name(ctx: MmArc, req: SetAccountNameRequest) -> MmResult { validate_account_name(&req.name)?; - let account_ctx = AccountContext::from_ctx(&ctx).map_to_mm(AccountRpcError::Internal)?; + // TODO db_id + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; account_ctx.storage().await?.set_name(req.account_id, req.name).await?; Ok(SuccessResponse::new()) } @@ -290,7 +297,8 @@ pub async fn set_account_description( req: SetAccountDescriptionRequest, ) -> MmResult { validate_account_desc(&req.description)?; - let account_ctx = AccountContext::from_ctx(&ctx).map_to_mm(AccountRpcError::Internal)?; + // TODO db_id + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; account_ctx .storage() .await? @@ -305,7 +313,8 @@ pub async fn set_account_description( /// /// This RPC affects the storage **only**. It doesn't affect MarketMaker. pub async fn set_account_balance(ctx: MmArc, req: SetBalanceRequest) -> MmResult { - let account_ctx = AccountContext::from_ctx(&ctx).map_to_mm(AccountRpcError::Internal)?; + // TODO db_id + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; account_ctx .storage() .await? @@ -321,7 +330,8 @@ pub async fn set_account_balance(ctx: MmArc, req: SetBalanceRequest) -> MmResult /// This RPC affects the storage **only**. It doesn't affect MarketMaker. pub async fn activate_coins(ctx: MmArc, req: CoinRequest) -> MmResult { validate_tickers(&req.tickers)?; - let account_ctx = AccountContext::from_ctx(&ctx).map_to_mm(AccountRpcError::Internal)?; + // TODO db_id + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; account_ctx .storage() .await? @@ -336,7 +346,8 @@ pub async fn activate_coins(ctx: MmArc, req: CoinRequest) -> MmResult MmResult { - let account_ctx = AccountContext::from_ctx(&ctx).map_to_mm(AccountRpcError::Internal)?; + // TODO db_id + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; account_ctx .storage() .await? diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index 32ba3ed79e..5bc8338e51 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -28,7 +28,6 @@ use std::num::NonZeroUsize; use std::process::Child; use std::sync::Mutex; use uuid::Uuid; -use db_common::AsyncConnectionCtx; cfg_native! { use common::block_on; From 061d1ffcd56a46f12fff9daf00a64113d020bd8c Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 30 Apr 2024 20:41:18 +0100 Subject: [PATCH 078/186] minor changes --- mm2src/coins/lightning/ln_utils.rs | 2 +- .../sql_tx_history_storage_v2.rs | 6 +++-- .../utxo/utxo_block_header_storage/mod.rs | 4 ++-- mm2src/mm2_core/src/mm_ctx.rs | 23 ------------------- .../src/account/storage/mod.rs | 20 ++++++---------- .../src/account/storage/sqlite_storage.rs | 22 +++++++++--------- .../src/account/storage/wasm_storage.rs | 10 ++++---- mm2src/mm2_gui_storage/src/context.rs | 8 +++++-- mm2src/mm2_gui_storage/src/rpc_commands.rs | 10 ++++---- 9 files changed, 41 insertions(+), 64 deletions(-) diff --git a/mm2src/coins/lightning/ln_utils.rs b/mm2src/coins/lightning/ln_utils.rs index 08085ff3a3..65640ff7db 100644 --- a/mm2src/coins/lightning/ln_utils.rs +++ b/mm2src/coins/lightning/ln_utils.rs @@ -69,7 +69,7 @@ pub async fn init_persister( } pub async fn init_db(ctx: &MmArc, ticker: String) -> EnableLightningResult { - let shared = ctx.sqlite_connection_res(None).map_to_mm(|_| { + let shared = ctx.sqlite_conn_opt(None).or_mm_err(|| { EnableLightningError::DbError("'MmCtx::sqlite_connection' is not found or initialized".to_owned()) })?; diff --git a/mm2src/coins/tx_history_storage/sql_tx_history_storage_v2.rs b/mm2src/coins/tx_history_storage/sql_tx_history_storage_v2.rs index fd2f405d56..3246836a1a 100644 --- a/mm2src/coins/tx_history_storage/sql_tx_history_storage_v2.rs +++ b/mm2src/coins/tx_history_storage/sql_tx_history_storage_v2.rs @@ -376,8 +376,10 @@ pub struct SqliteTxHistoryStorage(Arc>); impl SqliteTxHistoryStorage { pub fn new(ctx: &MmArc, db_id: Option<&str>) -> Result> { - let sqlite_connection = ctx.sqlite_connection_res(db_id).map_to_mm(|_| { - CreateTxHistoryStorageError::Internal("'MmCtx::sqlite_connection' is not found or initialized".to_owned()) + let sqlite_connection = ctx.sqlite_conn_opt(db_id).ok_or_else(|| { + MmError::new(CreateTxHistoryStorageError::Internal( + "'MmCtx::sqlite_connection' is not found or initialized".to_owned(), + )) })?; Ok(SqliteTxHistoryStorage(sqlite_connection)) diff --git a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs index 0ba262948b..5c9ad0bc2a 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/mod.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/mod.rs @@ -34,8 +34,8 @@ impl BlockHeaderStorage { db_id: Option<&str>, ) -> Result { let sqlite_connection = ctx - .sqlite_connection_res(db_id) - .map_err(|_| BlockHeaderStorageError::Internal("sqlite_connection is not initialized".to_owned()))?; + .sqlite_conn_opt(db_id) + .ok_or_else(|| BlockHeaderStorageError::Internal("sqlite_connection is not initialized".to_owned()))?; Ok(BlockHeaderStorage { inner: Box::new(SqliteBlockHeadersStorage { diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 2aaf7f32c9..f8be7d0cc6 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -447,29 +447,6 @@ impl MmCtx { }; } - #[cfg(not(target_arch = "wasm32"))] - pub fn sqlite_connection_res(&self, db_id: Option<&str>) -> Result { - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex()); - let connections = self - .sqlite_connection - .ok_or("sqlite_connection is not initialized".to_string())? - .lock() - .unwrap(); - if let Some(connection) = connections.get(&db_id) { - Ok(connection.clone()) - } else { - let sqlite_file_path = self.dbdir(Some(&db_id)).join(SYNC_SQLITE_DB_ID); - log_sqlite_file_open_attempt(&sqlite_file_path); - - let connection = Arc::new(Mutex::new(try_s!(Connection::open(sqlite_file_path)))); - let mut store = HashMap::new(); - store.insert(db_id, connection.clone()); - drop(connections); - // TODO: run migration and fix directions - Ok(connection) - } - } - #[cfg(not(target_arch = "wasm32"))] pub fn init_sqlite_connection_for_test(&self, db_id: Option<&str>) -> Result { let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex()); diff --git a/mm2src/mm2_gui_storage/src/account/storage/mod.rs b/mm2src/mm2_gui_storage/src/account/storage/mod.rs index beb47b62a4..73deb9a698 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/mod.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/mod.rs @@ -10,10 +10,8 @@ use std::error::Error as StdError; #[cfg(any(test, target_arch = "wasm32"))] mod account_storage_tests; -#[cfg(not(target_arch = "wasm32"))] -mod sqlite_storage; -#[cfg(target_arch = "wasm32")] -mod wasm_storage; +#[cfg(not(target_arch = "wasm32"))] mod sqlite_storage; +#[cfg(target_arch = "wasm32")] mod wasm_storage; const DEFAULT_ACCOUNT_IDX: u32 = 0; const DEFAULT_DEVICE_PUB: HwPubkey = HwPubkey::const_default(); @@ -89,7 +87,7 @@ impl AccountId { account_type, account_idx, device_pubkey ); MmError::err(AccountStorageError::ErrorDeserializing(error)) - } + }, } } } @@ -116,7 +114,7 @@ impl EnabledAccountId { (_, _) => { let error = format!("An invalid AccountId tuple: {:?}/{:?}", account_type, account_idx); MmError::err(AccountStorageError::ErrorDeserializing(error)) - } + }, } } } @@ -129,16 +127,12 @@ pub(crate) struct AccountStorageBuilder<'a> { } impl<'a> AccountStorageBuilder<'a> { - pub fn new(ctx: &'a MmArc, db_id: Option<&'a str>) -> Self { - AccountStorageBuilder { - ctx, - db_id, - } - } + pub fn new(ctx: &'a MmArc, db_id: Option<&'a str>) -> Self { AccountStorageBuilder { ctx, db_id } } #[cfg(not(target_arch = "wasm32"))] pub fn build(self) -> AccountStorageResult { - sqlite_storage::SqliteAccountStorage::new(self.ctx, self.db_id).map(|storage| -> AccountStorageBoxed { Box::new(storage) }) + sqlite_storage::SqliteAccountStorage::new(self.ctx, self.db_id) + .map(|storage| -> AccountStorageBoxed { Box::new(storage) }) } #[cfg(target_arch = "wasm32")] diff --git a/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs b/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs index 803063d7b2..b6618cc896 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/sqlite_storage.rs @@ -69,7 +69,7 @@ impl From for AccountStorageError { | SqlError::InvalidColumnType(_, _, _) => AccountStorageError::ErrorDeserializing(error), SqlError::Utf8Error(_) | SqlError::NulError(_) | SqlError::ToSqlConversionFailure(_) => { AccountStorageError::ErrorSerializing(error) - } + }, _ => AccountStorageError::Internal(error), } } @@ -163,7 +163,7 @@ impl SqliteAccountStorage { account_coins_table::DEVICE_PUBKEY, SqlType::Varchar(DEVICE_PUBKEY_MAX_LENGTH), ) - .not_null(), + .not_null(), ) .column(SqlColumn::new(account_coins_table::COIN, SqlType::Varchar(MAX_TICKER_LENGTH)).not_null()) .constraint( @@ -172,8 +172,8 @@ impl SqliteAccountStorage { account_coins_table::ACCOUNT_IDX => account_table::ACCOUNT_IDX, account_coins_table::DEVICE_PUBKEY => account_table::DEVICE_PUBKEY ])? - // Delete all coins from `account_coins_table` if the corresponding `account_table` record has been deleted. - .on_event(foreign_key::Event::OnDelete, foreign_key::Action::Cascade), + // Delete all coins from `account_coins_table` if the corresponding `account_table` record has been deleted. + .on_event(foreign_key::Event::OnDelete, foreign_key::Action::Cascade), ) .constraint(Unique::new(account_coins_table::ACCOUNT_ID_COIN_CONSTRAINT, [ account_coins_table::ACCOUNT_TYPE, @@ -196,7 +196,7 @@ impl SqliteAccountStorage { enabled_account_table::DEVICE_PUBKEY, SqlType::Varchar(DEVICE_PUBKEY_MAX_LENGTH), ) - .not_null(), + .not_null(), ) .constraint( ForeignKey::new(foreign_key::ParentTable(account_table::TABLE_NAME), foreign_columns![ @@ -204,8 +204,8 @@ impl SqliteAccountStorage { enabled_account_table::ACCOUNT_IDX => account_table::ACCOUNT_IDX, enabled_account_table::DEVICE_PUBKEY => account_table::DEVICE_PUBKEY, ])? - // Delete an enabled account from `enabled_account_table` if the corresponding `account_table` record has been deleted. - .on_event(foreign_key::Event::OnDelete, foreign_key::Action::Cascade), + // Delete an enabled account from `enabled_account_table` if the corresponding `account_table` record has been deleted. + .on_event(foreign_key::Event::OnDelete, foreign_key::Action::Cascade), ); create_sql.create().map_to_mm(AccountStorageError::from) } @@ -347,8 +347,8 @@ impl SqliteAccountStorage { /// Updates the given `account_id` account by applying the `update_cb` callback to an `SqlUpdate` SQL builder. fn update_account(conn: &Connection, account_id: AccountId, update_cb: F) -> AccountStorageResult<()> - where - F: FnOnce(&mut SqlUpdate) -> SqlResult<()>, + where + F: FnOnce(&mut SqlUpdate) -> SqlResult<()>, { let mut sql_update = SqlUpdate::new(conn, account_table::TABLE_NAME)?; update_cb(&mut sql_update)?; @@ -593,8 +593,8 @@ fn bigdecimal_from_row(row: &Row<'_>, idx: usize) -> Result(result: SqlResult, on_constraint_error: F) -> AccountStorageResult - where - F: FnOnce() -> AccountStorageError, +where + F: FnOnce() -> AccountStorageError, { result.map_to_mm(|e| { if is_constraint_error(&e) { diff --git a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs index d2dac9d652..13853e25d3 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs @@ -32,10 +32,10 @@ impl From for AccountStorageError { DbTransactionError::ErrorSerializingItem(_) => AccountStorageError::ErrorSerializing(desc), DbTransactionError::ErrorGettingItems(_) | DbTransactionError::ErrorCountingItems(_) => { AccountStorageError::ErrorLoading(desc) - } + }, DbTransactionError::ErrorUploadingItem(_) | DbTransactionError::ErrorDeletingItems(_) => { AccountStorageError::ErrorSaving(desc) - } + }, } } } @@ -166,8 +166,8 @@ impl WasmAccountStorage { /// Loads an account by `AccountId`, applies the given `f` function to it, /// and uploads changes to the storage. async fn update_account(&self, account_id: AccountId, f: F) -> AccountStorageResult<()> - where - F: FnOnce(&mut AccountTable), + where + F: FnOnce(&mut AccountTable), { let locked_db = self.lock_db_mutex().await?; let transaction = locked_db.inner.transaction().await?; @@ -331,7 +331,7 @@ impl AccountStorage for WasmAccountStorage { account.activated_coins.remove(ticker); } }) - .await + .await } } diff --git a/mm2src/mm2_gui_storage/src/context.rs b/mm2src/mm2_gui_storage/src/context.rs index 00855c269e..926d94cbdf 100644 --- a/mm2src/mm2_gui_storage/src/context.rs +++ b/mm2src/mm2_gui_storage/src/context.rs @@ -12,7 +12,9 @@ impl AccountContext { pub(crate) fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { let account_context = from_ctx(&ctx.account_ctx, move || { Ok(AccountContext { - storage: AccountStorageBuilder::new(ctx, db_id).build().map_err(|e| e.to_string())?, + storage: AccountStorageBuilder::new(ctx, db_id) + .build() + .map_err(|e| e.to_string())?, db_id: db_id.map(|e| e.to_string()), }) })?; @@ -20,7 +22,9 @@ impl AccountContext { if account_context.db_id.as_deref() != db_id { let mut ctx_field = ctx.account_ctx.lock().unwrap(); let account_context = Arc::new(AccountContext { - storage: AccountStorageBuilder::new(ctx, db_id).build().map_err(|e| e.to_string())?, + storage: AccountStorageBuilder::new(ctx, db_id) + .build() + .map_err(|e| e.to_string())?, db_id: db_id.map(|e| e.to_string()), }); *ctx_field = Some(Arc::clone(&account_context) as Arc); diff --git a/mm2src/mm2_gui_storage/src/rpc_commands.rs b/mm2src/mm2_gui_storage/src/rpc_commands.rs index f7632c4dc5..b5b6c93fff 100644 --- a/mm2src/mm2_gui_storage/src/rpc_commands.rs +++ b/mm2src/mm2_gui_storage/src/rpc_commands.rs @@ -42,10 +42,10 @@ impl From for AccountRpcError { AccountStorageError::AccountExistsAlready(account_id) => AccountRpcError::AccountExistsAlready(account_id), AccountStorageError::ErrorDeserializing(e) | AccountStorageError::ErrorLoading(e) => { AccountRpcError::ErrorLoadingAccount(e) - } + }, AccountStorageError::ErrorSaving(e) | AccountStorageError::ErrorSerializing(e) => { AccountRpcError::ErrorSavingAccount(e) - } + }, AccountStorageError::Internal(internal) => AccountRpcError::Internal(internal), } } @@ -78,8 +78,8 @@ pub struct NewAccount { } impl From> for AccountInfo - where - AccountId: From, +where + AccountId: From, { fn from(orig: NewAccount) -> Self { AccountInfo { @@ -184,7 +184,7 @@ pub async fn enable_account(ctx: MmArc, req: EnableAccountRequest) -> MmResult Date: Wed, 1 May 2024 21:42:12 +0100 Subject: [PATCH 079/186] sae dev state --- mm2src/mm2_main/src/lp_swap.rs | 5 +- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 30 ++++++-- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 2 +- .../mm2_main/src/lp_swap/my_swaps_storage.rs | 4 +- .../src/lp_swap/recreate_swap_data.rs | 2 + mm2src/mm2_main/src/lp_swap/saved_swap.rs | 18 +++-- mm2src/mm2_main/src/lp_swap/swap_lock.rs | 9 +-- mm2src/mm2_main/src/lp_swap/swap_v2_common.rs | 10 +-- mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs | 6 +- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 23 ++++-- mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 2 +- mm2src/mm2_test_helpers/src/for_tests.rs | 73 +++++++++---------- 12 files changed, 108 insertions(+), 76 deletions(-) diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index e9bea3b958..b212ab01ac 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -566,9 +566,10 @@ impl SwapsContext { /// Removes storage for the swap with specific uuid. pub fn remove_msg_v2_store(&self, uuid: &Uuid) { self.swap_v2_msgs.lock().unwrap().remove(uuid); } - // TODO #[cfg(target_arch = "wasm32")] - pub async fn swap_db(&self) -> InitDbResult> { self.swap_db.get_or_initialize(None).await } + pub async fn swap_db(&self, db_id: Option<&str>) -> InitDbResult> { + self.swap_db.get_or_initialize(db_id).await + } } #[derive(Debug, Deserialize)] diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index c43454cd8d..01a778733f 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -176,6 +176,8 @@ pub struct MakerSwapData { pub taker_coin_htlc_pubkey: Option, /// Temporary privkey used to sign P2P messages when applicable pub p2p_privkey: Option, + // dynamic database id for maker from it's coin rmd160 + pub db_id: Option, } pub struct MakerSwapMut { @@ -279,6 +281,9 @@ impl MakerSwap { } } + #[inline] + fn db_id(&self) -> Option { self.maker_coin.account_db_id() } + fn apply_event(&self, event: MakerSwapEvent) { match event { MakerSwapEvent::Started(data) => { @@ -572,6 +577,7 @@ impl MakerSwap { maker_coin_htlc_pubkey: Some(maker_coin_htlc_pubkey.as_slice().into()), taker_coin_htlc_pubkey: Some(taker_coin_htlc_pubkey.as_slice().into()), p2p_privkey: self.p2p_privkey.map(SerializableSecp256k1Keypair::from), + db_id: self.db_id(), }; // This will be done during order match @@ -1305,8 +1311,8 @@ impl MakerSwap { taker_coin: MmCoinEnum, swap_uuid: &Uuid, ) -> Result<(Self, Option), String> { - let account_key = maker_coin.account_db_id(); - let saved = match SavedSwap::load_my_swap_from_db(&ctx, account_key.as_deref(), *swap_uuid).await { + let saved = match SavedSwap::load_my_swap_from_db(&ctx, maker_coin.account_db_id().as_deref(), *swap_uuid).await + { Ok(Some(saved)) => saved, Ok(None) => return ERR!("Couldn't find a swap with the uuid '{}'", swap_uuid), Err(e) => return ERR!("{}", e), @@ -1846,6 +1852,7 @@ impl MakerSavedSwap { maker_coin_htlc_pubkey: None, taker_coin_htlc_pubkey: None, p2p_privkey: None, + db_id: None, }), }, MakerSavedEvent { @@ -1976,7 +1983,7 @@ impl MakerSavedSwap { } } - // TODO: Adjust for private coins when/if they are braodcasted + // TODO: Adjust for private coins when/if they are broadcasted // TODO: Adjust for HD wallet when completed pub fn swap_pubkeys(&self) -> Result { let maker = match &self.events.first() { @@ -2002,6 +2009,15 @@ impl MakerSavedSwap { Ok(SwapPubkeys { maker, taker }) } + + pub fn db_id(&self) -> Option { + if let Some(events) = self.events.first() { + if let MakerSwapEvent::Started(data) = &events.event { + return data.db_id.clone(); + } + } + None + } } #[allow(clippy::large_enum_variant)] @@ -2100,9 +2116,8 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { }; } let running_swap = Arc::new(swap); - let account_id = running_swap.maker_coin.account_db_id(); let weak_ref = Arc::downgrade(&running_swap); - let swap_ctx = SwapsContext::from_ctx(&ctx, account_id.as_deref()).unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx, running_swap.db_id().as_deref()).unwrap(); swap_ctx.init_msg_store(running_swap.uuid, running_swap.taker); swap_ctx.running_swaps.lock().unwrap().push(weak_ref); @@ -2151,13 +2166,14 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { }, None => { if let Err(e) = - mark_swap_as_finished(ctx.clone(), running_swap.uuid, account_id.as_deref()).await + mark_swap_as_finished(ctx.clone(), running_swap.uuid, running_swap.db_id().as_deref()).await { error!("!mark_swap_finished({}): {}", uuid, e); } if to_broadcast { - if let Err(e) = broadcast_my_swap_status(&ctx, uuid, account_id.as_deref()).await { + if let Err(e) = broadcast_my_swap_status(&ctx, uuid, running_swap.db_id().as_deref()).await + { error!("!broadcast_my_swap_status({}): {}", uuid, e); } } diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index baf89c4404..3950d6eb87 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -187,7 +187,7 @@ impl StateMachineStorage for MakerSwapStorage { async fn store_repr(&mut self, uuid: Self::MachineId, repr: Self::DbRepr) -> Result<(), Self::Error> { let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.db_id.as_deref()).expect("SwapsContext::from_ctx should not fail"); - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let filters_table = transaction.table::().await?; diff --git a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs index dfe74434c8..366a52014b 100644 --- a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs +++ b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs @@ -181,7 +181,7 @@ mod wasm_impl { db_id: Option<&str>, ) -> MySwapsResult<()> { let swap_ctx = SwapsContext::from_ctx(&self.ctx, db_id).map_to_mm(MySwapsError::InternalError)?; - let db = swap_ctx.swap_db().await?; + let db = swap_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let my_swaps_table = transaction.table::().await?; @@ -204,7 +204,7 @@ mod wasm_impl { db_id: &str, ) -> MySwapsResult { let swap_ctx = SwapsContext::from_ctx(&self.ctx, Some(db_id)).map_to_mm(MySwapsError::InternalError)?; - let db = swap_ctx.swap_db().await?; + let db = swap_ctx.swap_db(Some(db_id)).await?; let transaction = db.transaction().await?; let my_swaps_table = transaction.table::().await?; diff --git a/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs b/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs index 56e7877b70..58801ec0b6 100644 --- a/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs +++ b/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs @@ -149,6 +149,7 @@ fn recreate_maker_swap(ctx: MmArc, taker_swap: TakerSavedSwap) -> RecreateSwapRe maker_coin_htlc_pubkey: negotiated_event.maker_coin_htlc_pubkey, taker_coin_htlc_pubkey: negotiated_event.taker_coin_htlc_pubkey, p2p_privkey: None, + db_id: maker_swap.db_id(), }); maker_swap.events.push(MakerSavedEvent { timestamp: started_event_timestamp, @@ -347,6 +348,7 @@ async fn recreate_taker_swap(ctx: MmArc, maker_swap: MakerSavedSwap) -> Recreate maker_coin_htlc_pubkey: negotiated_event.maker_coin_htlc_pubkey, taker_coin_htlc_pubkey: negotiated_event.taker_coin_htlc_pubkey, p2p_privkey: None, + db_id: taker_swap.db_id(), }); taker_swap.events.push(TakerSavedEvent { timestamp: started_event_timestamp, diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index 3163fd3ade..03f35d19c7 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -334,7 +334,7 @@ mod wasm_impl { pub async fn migrate_swaps_data(ctx: &MmArc, db_id: Option<&str>) -> MmResult<(), SavedSwapError> { info!("migrate_swaps_data: {db_id:?}"); let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).map_to_mm(SavedSwapError::InternalError)?; - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let migration_table = transaction.table::().await?; @@ -424,8 +424,9 @@ mod wasm_impl { db_id: Option<&str>, uuid: Uuid, ) -> SavedSwapResult> { + info!("load_my_swap_from_db: {db_id:?}"); let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).map_to_mm(SavedSwapError::InternalError)?; - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -438,8 +439,9 @@ mod wasm_impl { } async fn load_all_my_swaps_from_db(ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult> { + info!("load_all_my_swaps_from_db: {db_id:?}"); let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).map_to_mm(SavedSwapError::InternalError)?; - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -461,7 +463,7 @@ mod wasm_impl { }; let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).map_to_mm(SavedSwapError::InternalError)?; - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -486,7 +488,7 @@ mod tests { async fn get_all_items(ctx: &MmArc) -> Vec<(ItemId, SavedSwapTable)> { let swaps_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); - let db = swaps_ctx.swap_db().await.expect("Error getting SwapDb"); + let db = swaps_ctx.swap_db(None).await.expect("Error getting SwapDb"); let transaction = db.transaction().await.expect("Error creating transaction"); let table = transaction .table::() @@ -551,7 +553,7 @@ mod tests { let ctx = MmCtxBuilder::new().with_test_db_namespace().into_mm_arc(); let swaps_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); - let db = swaps_ctx.swap_db().await.expect("Error getting SwapDb"); + let db = swaps_ctx.swap_db(None).await.expect("Error getting SwapDb"); let transaction = db.transaction().await.expect("Error creating transaction"); let table = transaction .table::() @@ -577,7 +579,7 @@ mod tests { let swaps_ctx = SwapsContext::from_ctx(&ctx, account_id).unwrap(); { - let db = swaps_ctx.swap_db().await.expect("Error getting SwapDb"); + let db = swaps_ctx.swap_db(None).await.expect("Error getting SwapDb"); let transaction = db.transaction().await.expect("Error creating transaction"); let table = transaction .table::() @@ -597,7 +599,7 @@ mod tests { wasm_impl::migrate_swaps_data(&ctx, account_id).await.unwrap(); - let db = swaps_ctx.swap_db().await.expect("Error getting SwapDb"); + let db = swaps_ctx.swap_db(None).await.expect("Error getting SwapDb"); let transaction = db.transaction().await.expect("Error creating transaction"); let table = transaction .table::() diff --git a/mm2src/mm2_main/src/lp_swap/swap_lock.rs b/mm2src/mm2_main/src/lp_swap/swap_lock.rs index 9f6e4673e1..ea82dc6798 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_lock.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_lock.rs @@ -129,7 +129,7 @@ mod wasm_lock { async fn lock(ctx: &MmArc, uuid: Uuid, ttl_sec: f64) -> SwapLockResult> { // TODO: db_id let swaps_ctx = SwapsContext::from_ctx(ctx, None).map_to_mm(SwapLockError::InternalError)?; - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(None).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -160,7 +160,7 @@ mod wasm_lock { async fn touch(&self) -> SwapLockResult<()> { // TODO: db_id let swaps_ctx = SwapsContext::from_ctx(&self.ctx, None).map_to_mm(SwapLockError::InternalError)?; - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(None).await?; let item = SwapLockTable { uuid: self.swap_uuid, @@ -184,7 +184,7 @@ mod wasm_lock { async fn release(ctx: MmArc, record_id: ItemId) -> SwapLockResult<()> { // TODO: db_id let swaps_ctx = SwapsContext::from_ctx(&ctx, None).map_to_mm(SwapLockError::InternalError)?; - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(None).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; table.delete_item(record_id).await?; @@ -209,9 +209,8 @@ mod tests { wasm_bindgen_test_configure!(run_in_browser); async fn get_all_items(ctx: &MmArc) -> Vec<(ItemId, SwapLockTable)> { - // TODO: db_id let swaps_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); - let db = swaps_ctx.swap_db().await.expect("Error getting SwapDb"); + let db = swaps_ctx.swap_db(None).await.expect("Error getting SwapDb"); let transaction = db.transaction().await.expect("Error creating transaction"); let table = transaction.table::().await.expect("Error opening table"); table.get_all_items().await.expect("Error getting items") diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs index 446a234e16..d21d50fcdc 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs @@ -121,7 +121,7 @@ pub(super) async fn has_db_record_for( db_id: Option<&str>, ) -> MmResult { let swaps_ctx = SwapsContext::from_ctx(&ctx, db_id).expect("SwapsContext::from_ctx should not fail"); - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; let maybe_item = table.get_item_by_unique_index("uuid", id).await?; @@ -165,7 +165,7 @@ pub(super) async fn store_swap_event, ) -> MmResult<(), SwapStateMachineError> { let swaps_ctx = SwapsContext::from_ctx(&ctx, db_id).expect("SwapsContext::from_ctx should not fail"); - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -192,7 +192,7 @@ pub(super) async fn get_swap_repr( db_id: Option<&str>, ) -> MmResult { let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).expect("SwapsContext::from_ctx should not fail"); - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -231,7 +231,7 @@ pub(super) async fn get_unfinished_swaps_uuids( .with_value(BoolAsInt::new(false))? .with_value(swap_type)?; let swaps_ctx = SwapsContext::from_ctx(&ctx, db_id).expect("SwapsContext::from_ctx should not fail"); - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; let table_items = table.get_items_by_multi_index(index).await?; @@ -261,7 +261,7 @@ pub(super) async fn mark_swap_as_finished( db_id: Option<&str>, ) -> MmResult<(), SwapStateMachineError> { let swaps_ctx = SwapsContext::from_ctx(&ctx, db_id).expect("SwapsContext::from_ctx should not fail"); - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; let mut item = match table.get_item_by_unique_index("uuid", id).await? { diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs index aa626c13df..f78bf03578 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs @@ -85,7 +85,7 @@ pub(super) async fn get_swap_type( use crate::mm2::lp_swap::swap_wasm_db::MySwapsFiltersTable; let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).unwrap(); - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; let item = match table.get_item_by_unique_index("uuid", uuid).await? { @@ -199,7 +199,7 @@ pub(super) async fn get_maker_swap_data_for_rpc( db_id: Option<&str>, ) -> MmResult>, SwapV2DbError> { let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).unwrap(); - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; let item = match table.get_item_by_unique_index("uuid", uuid).await? { @@ -240,7 +240,7 @@ pub(super) async fn get_taker_swap_data_for_rpc( db_id: Option<&str>, ) -> MmResult>, SwapV2DbError> { let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).unwrap(); - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; let item = match table.get_item_by_unique_index("uuid", uuid).await? { diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index b4af346e50..7129dec9cb 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -352,6 +352,15 @@ impl TakerSavedSwap { Ok(SwapPubkeys { maker, taker }) } + + pub fn db_id(&self) -> Option { + if let Some(events) = self.events.first() { + if let TakerSwapEvent::Started(data) = &events.event { + return data.db_id.clone(); + } + } + None + } } #[allow(clippy::large_enum_variant)] @@ -445,10 +454,8 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { let uuid = swap.uuid.to_string(); let to_broadcast = !(swap.maker_coin.is_privacy() || swap.taker_coin.is_privacy()); let running_swap = Arc::new(swap); - let account_id = running_swap.taker_coin.account_db_id(); - info!("USING COIN PUBKEY: {account_id:?}"); let weak_ref = Arc::downgrade(&running_swap); - let swap_ctx = SwapsContext::from_ctx(&ctx, account_id.as_deref()).unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx, running_swap.db_id().as_deref()).unwrap(); swap_ctx.init_msg_store(running_swap.uuid, running_swap.maker); swap_ctx.running_swaps.lock().unwrap().push(weak_ref); @@ -490,14 +497,14 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { }, None => { if let Err(e) = - mark_swap_as_finished(ctx.clone(), running_swap.uuid, account_id.as_deref()).await + mark_swap_as_finished(ctx.clone(), running_swap.uuid, running_swap.db_id().as_deref()).await { error!("!mark_swap_finished({}): {}", uuid, e); } if to_broadcast { if let Err(e) = - broadcast_my_swap_status(&ctx, running_swap.uuid, account_id.as_deref()).await + broadcast_my_swap_status(&ctx, running_swap.uuid, running_swap.db_id().as_deref()).await { error!("!broadcast_my_swap_status({}): {}", uuid, e); } @@ -554,6 +561,8 @@ pub struct TakerSwapData { pub taker_coin_htlc_pubkey: Option, /// Temporary privkey used to sign P2P messages when applicable pub p2p_privkey: Option, + // dynamic database id for taker from it's coin rmd160 + pub db_id: Option, } pub struct TakerSwapMut { @@ -793,6 +802,9 @@ impl TakerSwap { #[inline] fn wait_refund_until(&self) -> u64 { self.r().data.taker_payment_lock + 3700 } + #[inline] + fn db_id(&self) -> Option { self.taker_coin.account_db_id() } + fn apply_event(&self, event: TakerSwapEvent) { match event { TakerSwapEvent::Started(data) => { @@ -1112,6 +1124,7 @@ impl TakerSwap { maker_coin_htlc_pubkey: Some(maker_coin_htlc_pubkey.as_slice().into()), taker_coin_htlc_pubkey: Some(taker_coin_htlc_pubkey.as_slice().into()), p2p_privkey: self.p2p_privkey.map(SerializableSecp256k1Keypair::from), + db_id: self.taker_coin.account_db_id(), }; // This will be done during order match diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index 4176c8992b..1f12bcac03 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -219,7 +219,7 @@ impl StateMachineStorage for TakerSwapStorage { async fn store_repr(&mut self, uuid: Self::MachineId, repr: Self::DbRepr) -> Result<(), Self::Error> { let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.db_id.as_deref()).expect("SwapsContext::from_ctx should not fail"); - let db = swaps_ctx.swap_db().await?; + let db = swaps_ctx.swap_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; let filters_table = transaction.table::().await?; diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index 5bc8338e51..b5085d69b3 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -416,10 +416,10 @@ impl Mm2TestConfForSwap { Mm2InitPrivKeyPolicy::Iguana => { let bob_passphrase = crate::get_passphrase!(".env.seed", "BOB_PASSPHRASE").unwrap(); Mm2TestConf::seednode(&bob_passphrase, coins) - } + }, Mm2InitPrivKeyPolicy::GlobalHDAccount => { Mm2TestConf::seednode_with_hd_account(Self::BOB_HD_PASSPHRASE, coins) - } + }, } } @@ -428,10 +428,10 @@ impl Mm2TestConfForSwap { Mm2InitPrivKeyPolicy::Iguana => { let alice_passphrase = crate::get_passphrase!(".env.client", "ALICE_PASSPHRASE").unwrap(); Mm2TestConf::light_node(&alice_passphrase, coins, &[bob_ip]) - } + }, Mm2InitPrivKeyPolicy::GlobalHDAccount => { Mm2TestConf::light_node_with_hd_account(Self::ALICE_HD_PASSPHRASE, coins, &[bob_ip]) - } + }, } } } @@ -1114,7 +1114,7 @@ pub async fn mm_ctx_with_custom_async_db() -> MmArc { let connection = AsyncConnection::open_in_memory().await.unwrap(); let connection = AsyncConnectionCtx { - connection: connection, + connection, db_id: ctx.rmd160_hex(), }; let _ = ctx.async_sqlite_connection.pin(Arc::new(AsyncMutex::new(connection))); @@ -1139,7 +1139,7 @@ impl RaiiKill { _ => { self.running = false; false - } + }, } } } @@ -1284,7 +1284,7 @@ impl MarketMakerIt { let dir = folder.join("DB"); conf["dbdir"] = dir.to_str().unwrap().into(); dir - } + }, }; try_s!(fs::create_dir(&folder)); @@ -1299,7 +1299,7 @@ impl MarketMakerIt { let path = folder.join("mm2.log"); conf["log"] = path.to_str().unwrap().into(); path - } + }, }; // If `local` is provided @@ -1391,8 +1391,8 @@ impl MarketMakerIt { /// Busy-wait on the log until the `pred` returns `true` or `timeout_sec` expires. #[cfg(not(target_arch = "wasm32"))] pub async fn wait_for_log(&mut self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { let start = now_float(); let ms = 50.min((timeout_sec * 1000.) as u64 / 20 + 10); @@ -1418,8 +1418,8 @@ impl MarketMakerIt { /// after process is stopped #[cfg(not(target_arch = "wasm32"))] pub async fn wait_for_log_after_stop(&self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { use common::try_or_ready_err; @@ -1432,19 +1432,19 @@ impl MarketMakerIt { } Retry(()) }) - .repeat_every_ms(ms) - .with_timeout_secs(timeout_sec) - .await - .map_err(|e| ERRL!("{:?}", e)) - // Convert `Result, String>` to `Result<(), String>` - .flatten() + .repeat_every_ms(ms) + .with_timeout_secs(timeout_sec) + .await + .map_err(|e| ERRL!("{:?}", e)) + // Convert `Result, String>` to `Result<(), String>` + .flatten() } /// Busy-wait on the instance in-memory log until the `pred` returns `true` or `timeout_sec` expires. #[cfg(target_arch = "wasm32")] pub async fn wait_for_log(&mut self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { wait_for_log(&self.ctx, timeout_sec, pred).await } @@ -1468,7 +1468,7 @@ impl MarketMakerIt { let body_str = json::to_string(&body).unwrap_or_else(|_| panic!("Response {:?} is not a valid JSON", body)); Ok((status_code, body_str, HeaderMap::new())) - } + }, Err(e) => Ok((StatusCode::INTERNAL_SERVER_ERROR, e, HeaderMap::new())), } } @@ -1534,7 +1534,7 @@ impl MarketMakerIt { } else { return ERR!("{}", err); } - } + }, }; if status != StatusCode::OK { return ERR!("MM didn't accept a stop. body: {}", body); @@ -1557,10 +1557,10 @@ impl MarketMakerIt { } Retry(()) }) - .repeat_every_secs(0.05) - .with_timeout_ms(timeout_ms) - .await - .map_err(|e| ERRL!("{:?}", e)) + .repeat_every_secs(0.05) + .with_timeout_ms(timeout_ms) + .await + .map_err(|e| ERRL!("{:?}", e)) } /// Currently, we cannot wait for the `Completed IAmrelay handling for peer` log entry on WASM node, @@ -1665,8 +1665,8 @@ impl Drop for MarketMakerIt { /// Busy-wait on the log until the `pred` returns `true` or `timeout_sec` expires. pub async fn wait_for_log(ctx: &MmArc, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, +where + F: Fn(&str) -> bool, { let start = now_float(); let ms = 50.min((timeout_sec * 1000.) as u64 / 20 + 10); @@ -1766,7 +1766,7 @@ pub fn mm_spat() -> (&'static str, MarketMakerIt, RaiiDump, RaiiDump) { "pass".into(), None, ) - .unwrap(); + .unwrap(); let (dump_log, dump_dashboard) = mm_dump(&mm.log_path); (passphrase, mm, dump_log, dump_dashboard) } @@ -1843,10 +1843,10 @@ pub fn from_env_file(env: Vec) -> (Option, Option) { match cap.get(1) { Some(name) if name.as_bytes() == b"PASSPHRASE" => { passphrase = cap.get(2).map(|v| String::from_utf8(v.as_bytes().into()).unwrap()) - } + }, Some(name) if name.as_bytes() == b"USERPASS" => { userpass = cap.get(2).map(|v| String::from_utf8(v.as_bytes().into()).unwrap()) - } + }, _ => (), } } @@ -1938,7 +1938,6 @@ pub async fn enable_eth_coin( json::from_str(&enable.1).unwrap() } - pub async fn enable_spl(mm: &MarketMakerIt, coin: &str) -> Json { let req = json!({ "userpass": mm.userpass, @@ -2200,7 +2199,7 @@ pub async fn init_lightning_status(mm: &MarketMakerIt, task_id: u64) -> Json { pub fn new_mm2_temp_folder_path(ip: Option) -> PathBuf { let now = common::now_ms(); #[allow(deprecated)] - let now = Local.timestamp((now / 1000) as i64, (now % 1000) as u32 * 1_000_000); + let now = Local.timestamp((now / 1000) as i64, (now % 1000) as u32 * 1_000_000); let folder = match ip { Some(ip) => format!("mm2_{}_{}", now.format("%Y-%m-%d_%H-%M-%S-%3f"), ip), None => format!("mm2_{}", now.format("%Y-%m-%d_%H-%M-%S-%3f")), @@ -3304,7 +3303,7 @@ pub async fn wait_for_swaps_finish_and_check_status( BigDecimal::try_from(volume).unwrap(), BigDecimal::try_from(volume * maker_price).unwrap(), ) - .await; + .await; log!("Checking maker status.."); check_my_swap_status( @@ -3313,7 +3312,7 @@ pub async fn wait_for_swaps_finish_and_check_status( BigDecimal::try_from(volume).unwrap(), BigDecimal::try_from(volume * maker_price).unwrap(), ) - .await; + .await; } } @@ -3338,8 +3337,8 @@ pub async fn test_qrc20_history_impl(local_start: Option) { "pass".into(), local_start, ) - .await - .unwrap(); + .await + .unwrap(); let (_dump_log, _dump_dashboard) = mm.mm_dump(); #[cfg(not(target_arch = "wasm32"))] From e2877a8a86f023b5bb8cd32afb4dbb94fc46757e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 1 May 2024 21:58:05 +0100 Subject: [PATCH 080/186] save dev state --- mm2src/mm2_main/src/lp_swap.rs | 10 ++++------ mm2src/mm2_main/src/lp_swap/maker_swap.rs | 6 ------ mm2src/mm2_main/src/lp_swap/saved_swap.rs | 19 ++++--------------- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 6 ------ 4 files changed, 8 insertions(+), 33 deletions(-) diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index b212ab01ac..a278f62628 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -337,7 +337,7 @@ pub async fn process_swap_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PRequest return match json::from_slice::(msg) { Ok(mut status) => { status.data.fetch_and_set_usd_prices().await; - let account_id = status.data.account_db_id(&ctx).await; + let account_id = status.data.account_db_id().await; if let Err(e) = save_stats_swap(&ctx, &status.data, account_id.as_deref()).await { error!("Error saving the swap {} status: {}", status.data.uuid(), e); } @@ -361,8 +361,7 @@ pub async fn process_swap_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PRequest }; debug!("Processing swap msg {:?} for uuid {}", msg, uuid); - let db_dir = None; - let swap_ctx = SwapsContext::from_ctx(&ctx, db_dir).unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); let mut msgs = swap_ctx.swap_msgs.lock().unwrap(); if let Some(msg_store) = msgs.get_mut(&uuid) { if msg_store.accept_only_from.bytes == msg.2.unprefixed() { @@ -399,14 +398,13 @@ async fn recv_swap_msg( mut getter: impl FnMut(&mut SwapMsgStore) -> Option, uuid: &Uuid, timeout: u64, - db_id: Option<&str>, ) -> Result { let started = now_sec(); let timeout = BASIC_COMM_TIMEOUT + timeout; let wait_until = started + timeout; loop { Timer::sleep(1.).await; - let swap_ctx = SwapsContext::from_ctx(&ctx, db_id).unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); let mut msgs = swap_ctx.swap_msgs.lock().unwrap(); if let Some(msg_store) = msgs.get_mut(uuid) { if let Some(msg) = getter(msg_store) { @@ -1598,7 +1596,7 @@ pub async fn import_swaps(ctx: MmArc, req: Json) -> Result>, St let mut imported = vec![]; let mut skipped = HashMap::new(); for swap in swaps { - let accound_id = swap.account_db_id(&ctx).await; + let accound_id = swap.account_db_id().await; match swap.save_to_db(&ctx, accound_id.as_deref()).await { Ok(_) => { if let Some(info) = swap.get_my_info() { diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index 01a778733f..83654381f8 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -600,13 +600,11 @@ impl MakerSwap { NEGOTIATION_TIMEOUT_SEC as f64 / 6., self.p2p_privkey, ); - let db_id = self.maker_coin.account_db_id(); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.negotiation_reply.take(), &self.uuid, NEGOTIATION_TIMEOUT_SEC, - db_id.as_deref(), ); let taker_data = match recv_fut.await { Ok(d) => d, @@ -706,13 +704,11 @@ impl MakerSwap { self.p2p_privkey, ); - let db_id = self.maker_coin.account_db_id(); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.taker_fee.take(), &self.uuid, TAKER_FEE_RECV_TIMEOUT_SEC, - db_id.as_deref(), ); let payload = match recv_fut.await { Ok(d) => d, @@ -943,13 +939,11 @@ impl MakerSwap { // wait for 3/5, we need to leave some time space for transaction to be confirmed let wait_duration = (self.r().data.lock_duration * 3) / 5; - let db_id = self.maker_coin.account_db_id(); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.taker_payment.take(), &self.uuid, wait_duration, - db_id.as_deref(), ); // Todo: taker_payment should be a message on lightning network not a swap message let payload = match recv_fut.await { diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index 03f35d19c7..5702a66890 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -154,22 +154,11 @@ impl SavedSwap { } } - pub async fn account_db_id(&self, ctx: &MmArc) -> Option { - let coin_ticker = match self { - SavedSwap::Maker(swap) => &swap.maker_coin, - SavedSwap::Taker(swap) => &swap.taker_coin, - }; - - if let Some(ticker) = coin_ticker { - if let Ok(coin) = coins::lp_coinfind_any(ctx, ticker).await { - let coin = coin.map(|c| c.inner); - if let Some(coin) = coin { - return coin.account_db_id(); - } - }; + pub async fn account_db_id(&self) -> Option { + match self { + SavedSwap::Maker(swap) => swap.db_id(), + SavedSwap::Taker(swap) => swap.db_id(), } - - None } } diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 7129dec9cb..b3e0fb3d05 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -1136,13 +1136,11 @@ impl TakerSwap { async fn negotiate(&self) -> Result<(Option, Vec), String> { const NEGOTIATE_TIMEOUT_SEC: u64 = 90; - let db_id = self.maker_coin.account_db_id(); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.negotiation.take(), &self.uuid, NEGOTIATE_TIMEOUT_SEC, - db_id.as_deref(), ); let maker_data = match recv_fut.await { Ok(d) => d, @@ -1249,13 +1247,11 @@ impl TakerSwap { NEGOTIATE_TIMEOUT_SEC as f64 / 6., self.p2p_privkey, ); - let db_id = self.taker_coin.account_db_id(); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.negotiated.take(), &self.uuid, NEGOTIATE_TIMEOUT_SEC, - db_id.as_deref(), ); let negotiated = match recv_fut.await { Ok(d) => d, @@ -1346,13 +1342,11 @@ impl TakerSwap { self.p2p_privkey, ); - let db_id = self.maker_coin.account_db_id(); let recv_fut = recv_swap_msg( self.ctx.clone(), |store| store.maker_payment.take(), &self.uuid, MAKER_PAYMENT_WAIT_TIMEOUT_SEC, - db_id.as_deref(), ); let payload = match recv_fut.await { Ok(p) => p, From 577e18bacbae2f87589057aa1dc9659c50d09909 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 1 May 2024 22:14:11 +0100 Subject: [PATCH 081/186] remove SwapsContext db_id param --- mm2src/mm2_main/src/lp_swap.rs | 28 ++++++++----------- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 2 +- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 12 +++----- .../mm2_main/src/lp_swap/my_swaps_storage.rs | 4 +-- mm2src/mm2_main/src/lp_swap/pubkey_banning.rs | 10 +++---- mm2src/mm2_main/src/lp_swap/saved_swap.rs | 18 ++++++------ mm2src/mm2_main/src/lp_swap/swap_lock.rs | 8 +++--- mm2src/mm2_main/src/lp_swap/swap_v2_common.rs | 14 +++++----- mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs | 6 ++-- mm2src/mm2_main/src/lp_swap/swap_watcher.rs | 3 +- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 4 +-- mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 12 +++----- 12 files changed, 54 insertions(+), 67 deletions(-) diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index a278f62628..ab9e3990e9 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -361,7 +361,7 @@ pub async fn process_swap_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PRequest }; debug!("Processing swap msg {:?} for uuid {}", msg, uuid); - let swap_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); let mut msgs = swap_ctx.swap_msgs.lock().unwrap(); if let Some(msg_store) = msgs.get_mut(&uuid) { if msg_store.accept_only_from.bytes == msg.2.unprefixed() { @@ -404,7 +404,7 @@ async fn recv_swap_msg( let wait_until = started + timeout; loop { Timer::sleep(1.).await; - let swap_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); let mut msgs = swap_ctx.swap_msgs.lock().unwrap(); if let Some(msg_store) = msgs.get_mut(uuid) { if let Some(msg) = getter(msg_store) { @@ -532,7 +532,7 @@ struct SwapsContext { impl SwapsContext { /// Obtains a reference to this crate context, creating it if necessary. #[allow(unused_variables)] - fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { + fn from_ctx(ctx: &MmArc) -> Result, String> { Ok(try_s!(from_ctx(&ctx.swaps_ctx, move || { Ok(SwapsContext { running_swaps: Mutex::new(vec![]), @@ -544,8 +544,9 @@ impl SwapsContext { TAKER_SWAP_ENTRY_TIMEOUT_SEC, ))), locked_amounts: Mutex::new(HashMap::new()), + // Using None for db_id here won't matter much since calling `SwapsContext::swap_db(db_id)` will using the provided db_id. #[cfg(target_arch = "wasm32")] - swap_db: ConstructibleDb::new(ctx, db_id), + swap_db: ConstructibleDb::new(ctx, None), }) }))) } @@ -619,8 +620,7 @@ pub async fn get_locked_amount_rpc( /// Get total amount of selected coin locked by all currently ongoing swaps pub fn get_locked_amount(ctx: &MmArc, coin: &str) -> MmNumber { - // TODO: db_id - let swap_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); + let swap_ctx = SwapsContext::from_ctx(ctx).unwrap(); let swap_lock = swap_ctx.running_swaps.lock().unwrap(); let mut locked = swap_lock @@ -660,8 +660,7 @@ pub fn get_locked_amount(ctx: &MmArc, coin: &str) -> MmNumber { /// Get number of currently running swaps pub fn running_swaps_num(ctx: &MmArc) -> u64 { - // TODO: db_id - let swap_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); + let swap_ctx = SwapsContext::from_ctx(ctx).unwrap(); let swaps = swap_ctx.running_swaps.lock().unwrap(); swaps.iter().fold(0, |total, swap| match swap.upgrade() { Some(_) => total + 1, @@ -671,8 +670,7 @@ pub fn running_swaps_num(ctx: &MmArc) -> u64 { /// Get total amount of selected coin locked by all currently ongoing swaps except the one with selected uuid fn get_locked_amount_by_other_swaps(ctx: &MmArc, except_uuid: &Uuid, coin: &str) -> MmNumber { - // TODO: db_id - let swap_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); + let swap_ctx = SwapsContext::from_ctx(ctx).unwrap(); let swap_lock = swap_ctx.running_swaps.lock().unwrap(); swap_lock @@ -694,8 +692,7 @@ fn get_locked_amount_by_other_swaps(ctx: &MmArc, except_uuid: &Uuid, coin: &str) } pub fn active_swaps_using_coins(ctx: &MmArc, coins: &HashSet) -> Result, String> { - // TODO: db_id - let swap_ctx = try_s!(SwapsContext::from_ctx(ctx, None)); + let swap_ctx = try_s!(SwapsContext::from_ctx(ctx)); let swaps = try_s!(swap_ctx.running_swaps.lock()); let mut uuids = vec![]; for swap in swaps.iter() { @@ -717,8 +714,7 @@ pub fn active_swaps_using_coins(ctx: &MmArc, coins: &HashSet) -> Result< } pub fn active_swaps(ctx: &MmArc) -> Result, String> { - // TODO: db_id - let swap_ctx = try_s!(SwapsContext::from_ctx(ctx, None)); + let swap_ctx = try_s!(SwapsContext::from_ctx(ctx)); let swaps = swap_ctx.running_swaps.lock().unwrap(); let mut uuids = vec![]; for swap in swaps.iter() { @@ -1803,7 +1799,7 @@ pub fn process_swap_v2_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PProcessRes let uuid = Uuid::from_str(topic).map_to_mm(|e| P2PProcessError::DecodeError(e.to_string()))?; - let swap_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); let mut msgs = swap_ctx.swap_v2_msgs.lock().unwrap(); if let Some(msg_store) = msgs.get_mut(&uuid) { let signed_message = SignedMessage::decode(msg).map_to_mm(|e| P2PProcessError::DecodeError(e.to_string()))?; @@ -1871,7 +1867,7 @@ async fn recv_swap_v2_msg( loop { Timer::sleep(1.).await; // TODO: db_id - let swap_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); let mut msgs = swap_ctx.swap_v2_msgs.lock().unwrap(); if let Some(msg_store) = msgs.get_mut(uuid) { if let Some(msg) = getter(msg_store) { diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index 83654381f8..b8247254ec 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -2111,7 +2111,7 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { } let running_swap = Arc::new(swap); let weak_ref = Arc::downgrade(&running_swap); - let swap_ctx = SwapsContext::from_ctx(&ctx, running_swap.db_id().as_deref()).unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); swap_ctx.init_msg_store(running_swap.uuid, running_swap.taker); swap_ctx.running_swaps.lock().unwrap().push(weak_ref); diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index 3950d6eb87..a8daa4d4f3 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -185,8 +185,7 @@ impl StateMachineStorage for MakerSwapStorage { #[cfg(target_arch = "wasm32")] async fn store_repr(&mut self, uuid: Self::MachineId, repr: Self::DbRepr) -> Result<(), Self::Error> { - let swaps_ctx = - SwapsContext::from_ctx(&self.ctx, self.db_id.as_deref()).expect("SwapsContext::from_ctx should not fail"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; @@ -692,8 +691,7 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.maker_coin.account_db_id().as_deref()) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); let maker_coin_ticker: String = self.maker_coin.ticker().into(); let new_locked = LockedAmountInfo { swap_uuid: self.uuid, @@ -712,8 +710,7 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.maker_coin.account_db_id().as_deref()) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); let ticker = self.maker_coin.ticker(); if let Some(maker_coin_locked) = swaps_ctx.locked_amounts.lock().unwrap().get_mut(ticker) { maker_coin_locked.retain(|locked| locked.swap_uuid != self.uuid); @@ -747,8 +744,7 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.maker_coin.account_db_id().as_deref()) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); let maker_coin_ticker: String = self.maker_coin.ticker().into(); let new_locked = LockedAmountInfo { swap_uuid: self.uuid, diff --git a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs index 366a52014b..4d77892aea 100644 --- a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs +++ b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs @@ -180,7 +180,7 @@ mod wasm_impl { swap_type: u8, db_id: Option<&str>, ) -> MySwapsResult<()> { - let swap_ctx = SwapsContext::from_ctx(&self.ctx, db_id).map_to_mm(MySwapsError::InternalError)?; + let swap_ctx = SwapsContext::from_ctx(&self.ctx).map_to_mm(MySwapsError::InternalError)?; let db = swap_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let my_swaps_table = transaction.table::().await?; @@ -203,7 +203,7 @@ mod wasm_impl { paging_options: Option<&PagingOptions>, db_id: &str, ) -> MySwapsResult { - let swap_ctx = SwapsContext::from_ctx(&self.ctx, Some(db_id)).map_to_mm(MySwapsError::InternalError)?; + let swap_ctx = SwapsContext::from_ctx(&self.ctx).map_to_mm(MySwapsError::InternalError)?; let db = swap_ctx.swap_db(Some(db_id)).await?; let transaction = db.transaction().await?; let my_swaps_table = transaction.table::().await?; diff --git a/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs b/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs index 11065cb704..4a628a59d9 100644 --- a/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs +++ b/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs @@ -22,7 +22,7 @@ pub enum BanReason { pub fn ban_pubkey_on_failed_swap(ctx: &MmArc, pubkey: H256, swap_uuid: &Uuid, event: SwapEvent) { // TODO: db_id - let ctx = SwapsContext::from_ctx(ctx, None).unwrap(); + let ctx = SwapsContext::from_ctx(ctx).unwrap(); let mut banned = ctx.banned_pubkeys.lock().unwrap(); banned.insert(pubkey.into(), BanReason::FailedSwap { caused_by_swap: *swap_uuid, @@ -32,14 +32,14 @@ pub fn ban_pubkey_on_failed_swap(ctx: &MmArc, pubkey: H256, swap_uuid: &Uuid, ev pub fn is_pubkey_banned(ctx: &MmArc, pubkey: &H256Json) -> bool { // TODO: db_id - let ctx = SwapsContext::from_ctx(ctx, None).unwrap(); + let ctx = SwapsContext::from_ctx(ctx).unwrap(); let banned = ctx.banned_pubkeys.lock().unwrap(); banned.contains_key(pubkey) } pub async fn list_banned_pubkeys_rpc(ctx: MmArc) -> Result>, String> { // TODO: db_id - let ctx = try_s!(SwapsContext::from_ctx(&ctx, None)); + let ctx = try_s!(SwapsContext::from_ctx(&ctx)); let res = try_s!(json::to_vec(&json!({ "result": *try_s!(ctx.banned_pubkeys.lock()), }))); @@ -55,7 +55,7 @@ struct BanPubkeysReq { pub async fn ban_pubkey_rpc(ctx: MmArc, req: Json) -> Result>, String> { let req: BanPubkeysReq = try_s!(json::from_value(req)); // TODO: db_id - let ctx = try_s!(SwapsContext::from_ctx(&ctx, None)); + let ctx = try_s!(SwapsContext::from_ctx(&ctx)); let mut banned_pubs = try_s!(ctx.banned_pubkeys.lock()); match banned_pubs.entry(req.pubkey) { @@ -80,7 +80,7 @@ enum UnbanPubkeysReq { pub async fn unban_pubkeys_rpc(ctx: MmArc, req: Json) -> Result>, String> { let req: UnbanPubkeysReq = try_s!(json::from_value(req["unban_by"].clone())); // TODO: db_id - let ctx = try_s!(SwapsContext::from_ctx(&ctx, None)); + let ctx = try_s!(SwapsContext::from_ctx(&ctx)); let mut banned_pubs = try_s!(ctx.banned_pubkeys.lock()); let mut unbanned = HashMap::new(); let mut were_not_banned = vec![]; diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index 5702a66890..cefd412b38 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -322,7 +322,7 @@ mod wasm_impl { pub async fn migrate_swaps_data(ctx: &MmArc, db_id: Option<&str>) -> MmResult<(), SavedSwapError> { info!("migrate_swaps_data: {db_id:?}"); - let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).map_to_mm(SavedSwapError::InternalError)?; + let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let migration_table = transaction.table::().await?; @@ -414,7 +414,7 @@ mod wasm_impl { uuid: Uuid, ) -> SavedSwapResult> { info!("load_my_swap_from_db: {db_id:?}"); - let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).map_to_mm(SavedSwapError::InternalError)?; + let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -429,7 +429,7 @@ mod wasm_impl { async fn load_all_my_swaps_from_db(ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult> { info!("load_all_my_swaps_from_db: {db_id:?}"); - let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).map_to_mm(SavedSwapError::InternalError)?; + let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -451,7 +451,7 @@ mod wasm_impl { saved_swap, }; - let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).map_to_mm(SavedSwapError::InternalError)?; + let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -476,7 +476,7 @@ mod tests { wasm_bindgen_test_configure!(run_in_browser); async fn get_all_items(ctx: &MmArc) -> Vec<(ItemId, SavedSwapTable)> { - let swaps_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); + let swaps_ctx = SwapsContext::from_ctx(ctx).unwrap(); let db = swaps_ctx.swap_db(None).await.expect("Error getting SwapDb"); let transaction = db.transaction().await.expect("Error creating transaction"); let table = transaction @@ -541,7 +541,7 @@ mod tests { async fn test_get_current_migration() { let ctx = MmCtxBuilder::new().with_test_db_namespace().into_mm_arc(); - let swaps_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); + let swaps_ctx = SwapsContext::from_ctx(&ctx).unwrap(); let db = swaps_ctx.swap_db(None).await.expect("Error getting SwapDb"); let transaction = db.transaction().await.expect("Error creating transaction"); let table = transaction @@ -566,9 +566,9 @@ mod tests { let account_id = None; saved_swap.save_to_db(&ctx, account_id).await.expect("!save_to_db"); - let swaps_ctx = SwapsContext::from_ctx(&ctx, account_id).unwrap(); + let swaps_ctx = SwapsContext::from_ctx(&ctx).unwrap(); { - let db = swaps_ctx.swap_db(None).await.expect("Error getting SwapDb"); + let db = swaps_ctx.swap_db(account_id).await.expect("Error getting SwapDb"); let transaction = db.transaction().await.expect("Error creating transaction"); let table = transaction .table::() @@ -588,7 +588,7 @@ mod tests { wasm_impl::migrate_swaps_data(&ctx, account_id).await.unwrap(); - let db = swaps_ctx.swap_db(None).await.expect("Error getting SwapDb"); + let db = swaps_ctx.swap_db(account_id).await.expect("Error getting SwapDb"); let transaction = db.transaction().await.expect("Error creating transaction"); let table = transaction .table::() diff --git a/mm2src/mm2_main/src/lp_swap/swap_lock.rs b/mm2src/mm2_main/src/lp_swap/swap_lock.rs index ea82dc6798..29dbf8f214 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_lock.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_lock.rs @@ -127,8 +127,8 @@ mod wasm_lock { #[async_trait] impl SwapLockOps for SwapLock { async fn lock(ctx: &MmArc, uuid: Uuid, ttl_sec: f64) -> SwapLockResult> { + let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SwapLockError::InternalError)?; // TODO: db_id - let swaps_ctx = SwapsContext::from_ctx(ctx, None).map_to_mm(SwapLockError::InternalError)?; let db = swaps_ctx.swap_db(None).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -158,8 +158,8 @@ mod wasm_lock { } async fn touch(&self) -> SwapLockResult<()> { + let swaps_ctx = SwapsContext::from_ctx(&self.ctx).map_to_mm(SwapLockError::InternalError)?; // TODO: db_id - let swaps_ctx = SwapsContext::from_ctx(&self.ctx, None).map_to_mm(SwapLockError::InternalError)?; let db = swaps_ctx.swap_db(None).await?; let item = SwapLockTable { @@ -182,8 +182,8 @@ mod wasm_lock { impl SwapLock { async fn release(ctx: MmArc, record_id: ItemId) -> SwapLockResult<()> { + let swaps_ctx = SwapsContext::from_ctx(&ctx).map_to_mm(SwapLockError::InternalError)?; // TODO: db_id - let swaps_ctx = SwapsContext::from_ctx(&ctx, None).map_to_mm(SwapLockError::InternalError)?; let db = swaps_ctx.swap_db(None).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -209,7 +209,7 @@ mod tests { wasm_bindgen_test_configure!(run_in_browser); async fn get_all_items(ctx: &MmArc) -> Vec<(ItemId, SwapLockTable)> { - let swaps_ctx = SwapsContext::from_ctx(ctx, None).unwrap(); + let swaps_ctx = SwapsContext::from_ctx(ctx).unwrap(); let db = swaps_ctx.swap_db(None).await.expect("Error getting SwapDb"); let transaction = db.transaction().await.expect("Error creating transaction"); let table = transaction.table::().await.expect("Error opening table"); diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs index d21d50fcdc..ad4bf22636 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs @@ -120,7 +120,7 @@ pub(super) async fn has_db_record_for( id: &Uuid, db_id: Option<&str>, ) -> MmResult { - let swaps_ctx = SwapsContext::from_ctx(&ctx, db_id).expect("SwapsContext::from_ctx should not fail"); + let swaps_ctx = SwapsContext::from_ctx(&ctx).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -164,7 +164,7 @@ pub(super) async fn store_swap_event, ) -> MmResult<(), SwapStateMachineError> { - let swaps_ctx = SwapsContext::from_ctx(&ctx, db_id).expect("SwapsContext::from_ctx should not fail"); + let swaps_ctx = SwapsContext::from_ctx(&ctx).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -191,7 +191,7 @@ pub(super) async fn get_swap_repr( id: Uuid, db_id: Option<&str>, ) -> MmResult { - let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).expect("SwapsContext::from_ctx should not fail"); + let swaps_ctx = SwapsContext::from_ctx(ctx).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; @@ -230,7 +230,7 @@ pub(super) async fn get_unfinished_swaps_uuids( let index = MultiIndex::new(IS_FINISHED_SWAP_TYPE_INDEX) .with_value(BoolAsInt::new(false))? .with_value(swap_type)?; - let swaps_ctx = SwapsContext::from_ctx(&ctx, db_id).expect("SwapsContext::from_ctx should not fail"); + let swaps_ctx = SwapsContext::from_ctx(&ctx).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -260,7 +260,7 @@ pub(super) async fn mark_swap_as_finished( id: Uuid, db_id: Option<&str>, ) -> MmResult<(), SwapStateMachineError> { - let swaps_ctx = SwapsContext::from_ctx(&ctx, db_id).expect("SwapsContext::from_ctx should not fail"); + let swaps_ctx = SwapsContext::from_ctx(&ctx).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -276,7 +276,7 @@ pub(super) async fn mark_swap_as_finished( pub(super) fn init_additional_context_impl(ctx: &MmArc, swap_info: ActiveSwapV2Info, other_p2p_pubkey: PublicKey) { subscribe_to_topic(ctx, swap_v2_topic(&swap_info.uuid)); // TODO: db_id - let swap_ctx = SwapsContext::from_ctx(ctx, None).expect("SwapsContext::from_ctx should not fail"); + let swap_ctx = SwapsContext::from_ctx(ctx).expect("SwapsContext::from_ctx should not fail"); swap_ctx.init_msg_v2_store(swap_info.uuid, other_p2p_pubkey); swap_ctx .active_swaps_v2_infos @@ -288,7 +288,7 @@ pub(super) fn init_additional_context_impl(ctx: &MmArc, swap_info: ActiveSwapV2I pub(super) fn clean_up_context_impl(ctx: &MmArc, uuid: &Uuid, maker_coin: &str, taker_coin: &str) { unsubscribe_from_topic(ctx, swap_v2_topic(uuid)); // TODO: db_id - let swap_ctx = SwapsContext::from_ctx(ctx, None).expect("SwapsContext::from_ctx should not fail"); + let swap_ctx = SwapsContext::from_ctx(ctx).expect("SwapsContext::from_ctx should not fail"); swap_ctx.remove_msg_v2_store(uuid); swap_ctx.active_swaps_v2_infos.lock().unwrap().remove(uuid); diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs index f78bf03578..91737bccf6 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs @@ -84,7 +84,7 @@ pub(super) async fn get_swap_type( ) -> MmResult, SwapV2DbError> { use crate::mm2::lp_swap::swap_wasm_db::MySwapsFiltersTable; - let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).unwrap(); + let swaps_ctx = SwapsContext::from_ctx(ctx).unwrap(); let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -198,7 +198,7 @@ pub(super) async fn get_maker_swap_data_for_rpc( uuid: &Uuid, db_id: Option<&str>, ) -> MmResult>, SwapV2DbError> { - let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).unwrap(); + let swaps_ctx = SwapsContext::from_ctx(ctx).unwrap(); let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -239,7 +239,7 @@ pub(super) async fn get_taker_swap_data_for_rpc( uuid: &Uuid, db_id: Option<&str>, ) -> MmResult>, SwapV2DbError> { - let swaps_ctx = SwapsContext::from_ctx(ctx, db_id).unwrap(); + let swaps_ctx = SwapsContext::from_ctx(ctx).unwrap(); let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; diff --git a/mm2src/mm2_main/src/lp_swap/swap_watcher.rs b/mm2src/mm2_main/src/lp_swap/swap_watcher.rs index c43517b925..a8df8f2455 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_watcher.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_watcher.rs @@ -587,8 +587,7 @@ fn spawn_taker_swap_watcher(ctx: MmArc, watcher_data: TakerSwapWatcherData, veri return; } - // TODO: db_id - let swap_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); if swap_ctx.swap_msgs.lock().unwrap().contains_key(&watcher_data.uuid) { return; } diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index b3e0fb3d05..0bcd20d689 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -455,7 +455,7 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { let to_broadcast = !(swap.maker_coin.is_privacy() || swap.taker_coin.is_privacy()); let running_swap = Arc::new(swap); let weak_ref = Arc::downgrade(&running_swap); - let swap_ctx = SwapsContext::from_ctx(&ctx, running_swap.db_id().as_deref()).unwrap(); + let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); swap_ctx.init_msg_store(running_swap.uuid, running_swap.maker); swap_ctx.running_swaps.lock().unwrap().push(weak_ref); @@ -3248,7 +3248,7 @@ mod taker_swap_tests { taker_saved_swap, )) .unwrap(); - let swaps_ctx = SwapsContext::from_ctx(&ctx, None).unwrap(); + let swaps_ctx = SwapsContext::from_ctx(&ctx).unwrap(); let arc = Arc::new(swap); let weak_ref = Arc::downgrade(&arc); swaps_ctx.running_swaps.lock().unwrap().push(weak_ref); diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index 1f12bcac03..8860cdf845 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -217,8 +217,7 @@ impl StateMachineStorage for TakerSwapStorage { #[cfg(target_arch = "wasm32")] async fn store_repr(&mut self, uuid: Self::MachineId, repr: Self::DbRepr) -> Result<(), Self::Error> { - let swaps_ctx = - SwapsContext::from_ctx(&self.ctx, self.db_id.as_deref()).expect("SwapsContext::from_ctx should not fail"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("SwapsContext::from_ctx should not fail"); let db = swaps_ctx.swap_db(self.db_id.as_deref()).await?; let transaction = db.transaction().await?; @@ -808,8 +807,7 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.taker_coin.account_db_id().as_deref()) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); let taker_coin_ticker: String = self.taker_coin.ticker().into(); let new_locked = LockedAmountInfo { swap_uuid: self.uuid, @@ -828,8 +826,7 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.taker_coin.account_db_id().as_deref()) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); let ticker = self.taker_coin.ticker(); if let Some(taker_coin_locked) = swaps_ctx.locked_amounts.lock().unwrap().get_mut(ticker) { taker_coin_locked.retain(|locked| locked.swap_uuid != self.uuid); @@ -857,8 +854,7 @@ impl { - let swaps_ctx = SwapsContext::from_ctx(&self.ctx, self.taker_coin.account_db_id().as_deref()) - .expect("from_ctx should not fail at this point"); + let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); let taker_coin_ticker: String = self.taker_coin.ticker().into(); let new_locked = LockedAmountInfo { swap_uuid: self.uuid, From 9eaeb2ebd5e0b463e8fb04530f7d892e16fa27be Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 1 May 2024 22:40:20 +0100 Subject: [PATCH 082/186] fix tests --- mm2src/mm2_test_helpers/src/for_tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index b5085d69b3..f92ba24bd4 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -1113,7 +1113,7 @@ pub async fn mm_ctx_with_custom_async_db() -> MmArc { let ctx = MmCtxBuilder::new().into_mm_arc(); let connection = AsyncConnection::open_in_memory().await.unwrap(); - let connection = AsyncConnectionCtx { + let connection = db_common::AsyncConnectionCtx { connection, db_id: ctx.rmd160_hex(), }; From 9882cccedc4837da2b6e386fea3040b5524695a4 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 1 May 2024 22:58:25 +0100 Subject: [PATCH 083/186] update ordermatch --- mm2src/mm2_main/src/lp_native_dex.rs | 2 +- mm2src/mm2_main/src/lp_ordermatch.rs | 23 +++++++++++++---------- mm2src/mm2_main/src/lp_swap.rs | 2 +- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 5afb9671ec..18344e3916 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -454,7 +454,7 @@ fn init_wasm_event_streaming(ctx: &MmArc) { } pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { - init_ordermatch_context(&ctx, None)?; + init_ordermatch_context(&ctx)?; init_p2p(ctx.clone()).await?; if !CryptoCtx::is_init(&ctx)? { diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index fdffe2194f..a3083952e8 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -2751,13 +2751,13 @@ struct OrdermatchContext { /// Pending MakerReserved messages for a specific TakerOrder UUID /// Used to select a trade with the best price upon matching pending_maker_reserved: AsyncMutex>>, + // Using None for db_id here won't matter much since calling `OrdermatchContext::ordermatch_db(db_id)` will use the provided db_id. #[cfg(target_arch = "wasm32")] ordermatch_db: ConstructibleDb, - db_id: Option, } #[allow(unused)] -pub fn init_ordermatch_context(ctx: &MmArc, db_id: Option<&str>) -> OrdermatchInitResult<()> { +pub fn init_ordermatch_context(ctx: &MmArc) -> OrdermatchInitResult<()> { // Helper #[derive(Deserialize)] struct CoinConf { @@ -2790,8 +2790,7 @@ pub fn init_ordermatch_context(ctx: &MmArc, db_id: Option<&str>) -> OrdermatchIn orderbook_tickers, original_tickers, #[cfg(target_arch = "wasm32")] - ordermatch_db: ConstructibleDb::new(ctx, db_id), - db_id: db_id.map(|d| d.to_string()), + ordermatch_db: ConstructibleDb::new(ctx, None), }; from_ctx(&ctx.ordermatch_ctx, move || Ok(ordermatch_context)) @@ -2820,9 +2819,9 @@ impl OrdermatchContext { pending_maker_reserved: Default::default(), orderbook_tickers: Default::default(), original_tickers: Default::default(), + // Using None for db_id here won't matter much since calling `OrdermatchContext::ordermatch_db(db_id)` will use the provided db_id. #[cfg(target_arch = "wasm32")] - ordermatch_db: ConstructibleDb::new(ctx, db_id), - db_id: None, + ordermatch_db: ConstructibleDb::new(ctx, None), }) }))) } @@ -3393,7 +3392,8 @@ pub async fn clean_memory_loop(ctx_weak: MmWeak) { /// The function locks the [`OrdermatchContext::my_maker_orders`] and [`OrdermatchContext::my_taker_orders`] mutexes. async fn handle_timed_out_taker_orders(ctx: MmArc, ordermatch_ctx: &OrdermatchContext) { let mut my_taker_orders = ordermatch_ctx.my_taker_orders.lock().await; - let storage = MyOrdersStorage::new(ctx.clone(), ordermatch_ctx.db_id.clone()); + // TODO: db_id + let storage = MyOrdersStorage::new(ctx.clone(), None); let mut my_actual_taker_orders = HashMap::with_capacity(my_taker_orders.len()); for (uuid, order) in my_taker_orders.drain() { @@ -3483,7 +3483,8 @@ async fn check_balance_for_maker_orders(ctx: MmArc, ordermatch_ctx: &OrdermatchC /// The function locks the [`OrdermatchContext::my_maker_orders`] mutex. async fn handle_timed_out_maker_matches(ctx: MmArc, ordermatch_ctx: &OrdermatchContext) { let now = now_ms(); - let storage = MyOrdersStorage::new(ctx.clone(), ordermatch_ctx.db_id.clone()); + // TODO: db_id + let storage = MyOrdersStorage::new(ctx.clone(), None); let my_maker_orders = ordermatch_ctx.maker_orders_ctx.lock().orders.clone(); for (_, order) in my_maker_orders.iter() { @@ -3674,7 +3675,8 @@ async fn process_taker_request(ctx: MmArc, from_pubkey: H256Json, taker_request: } let ordermatch_ctx = OrdermatchContext::from_ctx(&ctx).unwrap(); - let storage = MyOrdersStorage::new(ctx.clone(), ordermatch_ctx.db_id.clone()); + // TODO: db_id + let storage = MyOrdersStorage::new(ctx.clone(), None); let mut my_orders = ordermatch_ctx.maker_orders_ctx.lock().orders.clone(); let filtered = my_orders .iter_mut() @@ -3823,7 +3825,8 @@ async fn process_taker_connect(ctx: MmArc, sender_pubkey: PublicKey, connect_msg updated_msg.with_new_max_volume(my_order.available_amount().into()); maker_order_updated_p2p_notify(ctx.clone(), topic, updated_msg, my_order.p2p_keypair()); } - MyOrdersStorage::new(ctx, ordermatch_ctx.db_id.clone()) + // TODO: db_id + MyOrdersStorage::new(ctx, None) .update_active_maker_order(&my_order) .await .error_log_with_msg("!update_active_maker_order"); diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index ab9e3990e9..888df21db0 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -544,7 +544,7 @@ impl SwapsContext { TAKER_SWAP_ENTRY_TIMEOUT_SEC, ))), locked_amounts: Mutex::new(HashMap::new()), - // Using None for db_id here won't matter much since calling `SwapsContext::swap_db(db_id)` will using the provided db_id. + // Using None for db_id here won't matter much since calling `SwapsContext::swap_db(db_id)` will use the provided db_id. #[cfg(target_arch = "wasm32")] swap_db: ConstructibleDb::new(ctx, None), }) From 8a2f4a76e7ed2dcc7360bd2afe79389bf7dc0d3a Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 2 May 2024 00:54:51 +0100 Subject: [PATCH 084/186] debug db_id f or TakerOrder --- mm2src/mm2_main/src/lp_ordermatch.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index a3083952e8..403d29a0ca 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -1682,7 +1682,7 @@ impl TakerOrder { fn p2p_keypair(&self) -> Option<&KeyPair> { self.p2p_privkey.as_ref().map(|key| key.key_pair()) } async fn db_id(&self, ctx: &MmArc) -> MyOrdersResult> { - lp_coinfind_or_err(ctx, &self.request.base) + lp_coinfind_or_err(ctx, &self.taker_coin_ticker()) .await .mm_err(|err| { MyOrdersError::ErrorSaving(format!("Error finding/deriving wallet pubkey for db_id: {err:?}")) From 4594ff60bd475c7fd3f660b5e490908bd6c7d9f5 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 2 May 2024 12:44:16 +0100 Subject: [PATCH 085/186] fix ordermatch test bug and improve db_id getter --- mm2src/mm2_main/src/lp_ordermatch.rs | 32 +++++++------------ .../src/lp_ordermatch/my_orders_storage.rs | 28 ++++------------ mm2src/mm2_main/src/ordermatch_tests.rs | 29 +++++++++++++++++ 3 files changed, 47 insertions(+), 42 deletions(-) diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 403d29a0ca..8ec02cd841 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -25,8 +25,8 @@ use best_orders::BestOrdersAction; use blake2::digest::{Update, VariableOutput}; use blake2::Blake2bVar; use coins::utxo::{compressed_pub_key_from_priv_raw, ChecksumType, UtxoAddressFormat}; -use coins::{coin_conf, find_pair, find_unique_account_ids_active, lp_coinfind, lp_coinfind_or_err, - BalanceTradeFeeUpdatedHandler, CoinProtocol, CoinsContext, FeeApproxStage, MarketCoinOps, MmCoinEnum}; +use coins::{coin_conf, find_pair, find_unique_account_ids_active, lp_coinfind, BalanceTradeFeeUpdatedHandler, + CoinProtocol, CoinsContext, FeeApproxStage, MarketCoinOps, MmCoinEnum}; use common::executor::{simple_map::AbortableSimpleMap, AbortSettings, AbortableSystem, AbortedError, SpawnAbortable, SpawnFuture, Timer}; use common::log::{error, info, warn, LogOnError}; @@ -101,8 +101,6 @@ cfg_wasm32! { pub use lp_bot::{start_simple_market_maker_bot, stop_simple_market_maker_bot, StartSimpleMakerBotRequest, TradingBotEvent}; -use self::my_orders_storage::{MyOrdersError, MyOrdersResult}; - #[path = "lp_ordermatch/my_orders_storage.rs"] mod my_orders_storage; #[path = "lp_ordermatch/new_protocol.rs"] mod new_protocol; @@ -1519,6 +1517,7 @@ impl<'a> TakerOrderBuilder<'a> { base_orderbook_ticker: self.base_orderbook_ticker, rel_orderbook_ticker: self.rel_orderbook_ticker, p2p_privkey, + db_id: self.base_coin.account_db_id(), }) } @@ -1559,6 +1558,7 @@ impl<'a> TakerOrderBuilder<'a> { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: self.base_coin.account_db_id(), } } } @@ -1580,6 +1580,7 @@ pub struct TakerOrder { /// A custom priv key for more privacy to prevent linking orders of the same node between each other /// Commonly used with privacy coins (ARRR, ZCash, etc.) p2p_privkey: Option, + db_id: Option, } /// Result of match_reserved function @@ -1681,14 +1682,7 @@ impl TakerOrder { fn p2p_keypair(&self) -> Option<&KeyPair> { self.p2p_privkey.as_ref().map(|key| key.key_pair()) } - async fn db_id(&self, ctx: &MmArc) -> MyOrdersResult> { - lp_coinfind_or_err(ctx, &self.taker_coin_ticker()) - .await - .mm_err(|err| { - MyOrdersError::ErrorSaving(format!("Error finding/deriving wallet pubkey for db_id: {err:?}")) - }) - .map(|coin| coin.account_db_id()) - } + fn db_id(&self) -> Option { self.db_id.clone() } } #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] @@ -1720,6 +1714,7 @@ pub struct MakerOrder { /// A custom priv key for more privacy to prevent linking orders of the same node between each other /// Commonly used with privacy coins (ARRR, ZCash, etc.) p2p_privkey: Option, + db_id: Option, } pub struct MakerOrderBuilder<'a> { @@ -1975,6 +1970,7 @@ impl<'a> MakerOrderBuilder<'a> { base_orderbook_ticker: self.base_orderbook_ticker, rel_orderbook_ticker: self.rel_orderbook_ticker, p2p_privkey, + db_id: self.base_coin.account_db_id(), }) } @@ -1999,6 +1995,7 @@ impl<'a> MakerOrderBuilder<'a> { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: self.base_coin.account_db_id(), } } } @@ -2107,14 +2104,7 @@ impl MakerOrder { fn p2p_keypair(&self) -> Option<&KeyPair> { self.p2p_privkey.as_ref().map(|key| key.key_pair()) } - async fn db_id(&self, ctx: &MmArc) -> MyOrdersResult> { - lp_coinfind_or_err(ctx, &self.base) - .await - .mm_err(|err| { - MyOrdersError::ErrorSaving(format!("Error finding/deriving wallet pubkey for db_id: {err:?}")) - }) - .map(|coin| coin.account_db_id()) - } + fn db_id(&self) -> Option { self.db_id.clone() } } impl From for MakerOrder { @@ -2138,6 +2128,7 @@ impl From for MakerOrder { base_orderbook_ticker: taker_order.base_orderbook_ticker, rel_orderbook_ticker: taker_order.rel_orderbook_ticker, p2p_privkey: taker_order.p2p_privkey, + db_id: None, // TODO, }, // The "buy" taker order is recreated with reversed pair as Maker order is always considered as "sell" TakerAction::Buy => { @@ -2160,6 +2151,7 @@ impl From for MakerOrder { base_orderbook_ticker: taker_order.rel_orderbook_ticker, rel_orderbook_ticker: taker_order.base_orderbook_ticker, p2p_privkey: taker_order.p2p_privkey, + db_id: None, // TODO, } }, } diff --git a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs index 96479f4e0c..3707c503c4 100644 --- a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs +++ b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs @@ -1,7 +1,7 @@ use super::{MakerOrder, MakerOrderCancellationReason, MyOrdersFilter, Order, RecentOrdersSelectResult, TakerOrder, TakerOrderCancellationReason}; use async_trait::async_trait; -use common::log::{error, warn, LogOnError}; +use common::log::LogOnError; use common::{BoxFut, PagingOptions}; use derive_more::Display; use futures::{FutureExt, TryFutureExt}; @@ -36,8 +36,7 @@ pub enum MyOrdersError { } pub async fn save_my_new_maker_order(ctx: MmArc, order: &MakerOrder) -> MyOrdersResult<()> { - let db_id = order.db_id(&ctx).await?; - let storage = MyOrdersStorage::new(ctx, db_id); + let storage = MyOrdersStorage::new(ctx, order.db_id()); storage .save_new_active_maker_order(order) .await @@ -50,8 +49,7 @@ pub async fn save_my_new_maker_order(ctx: MmArc, order: &MakerOrder) -> MyOrders } pub async fn save_my_new_taker_order(ctx: MmArc, order: &TakerOrder) -> MyOrdersResult<()> { - let db_id = order.db_id(&ctx).await?; - let storage = MyOrdersStorage::new(ctx, db_id); + let storage = MyOrdersStorage::new(ctx, order.db_id()); storage .save_new_active_taker_order(order) .await @@ -64,7 +62,7 @@ pub async fn save_my_new_taker_order(ctx: MmArc, order: &TakerOrder) -> MyOrders } pub async fn save_maker_order_on_update(ctx: MmArc, order: &MakerOrder) -> MyOrdersResult<()> { - let db_id = order.db_id(&ctx).await?; + let db_id = order.db_id(); let storage = MyOrdersStorage::new(ctx, db_id); storage.update_active_maker_order(order).await?; @@ -80,14 +78,7 @@ pub fn delete_my_taker_order(ctx: MmArc, order: TakerOrder, reason: TakerOrderCa let uuid = order.request.uuid; let save_in_history = order.save_in_history; - let db_id = match order.db_id(&ctx).await { - Ok(val) => val, - Err(err) => { - error!("{err}"); - None - }, - }; - let storage = MyOrdersStorage::new(ctx, db_id); + let storage = MyOrdersStorage::new(ctx, order.db_id()); storage .delete_active_taker_order(uuid) .await @@ -123,14 +114,7 @@ pub fn delete_my_maker_order(ctx: MmArc, order: MakerOrder, reason: MakerOrderCa let uuid = order_to_save.uuid; let save_in_history = order_to_save.save_in_history; - let db_id = match order_to_save.db_id(&ctx).await { - Ok(val) => val, - Err(err) => { - warn!("{err}"); - None - }, - }; - let storage = MyOrdersStorage::new(ctx, db_id); + let storage = MyOrdersStorage::new(ctx, order_to_save.db_id()); if order_to_save.was_updated() { if let Ok(order_from_file) = storage.load_active_maker_order(order_to_save.uuid).await { order_to_save = order_from_file; diff --git a/mm2src/mm2_main/src/ordermatch_tests.rs b/mm2src/mm2_main/src/ordermatch_tests.rs index 94d9732260..01ebda2871 100644 --- a/mm2src/mm2_main/src/ordermatch_tests.rs +++ b/mm2src/mm2_main/src/ordermatch_tests.rs @@ -34,6 +34,7 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let request = TakerRequest { @@ -72,6 +73,7 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let request = TakerRequest { @@ -110,6 +112,7 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let request = TakerRequest { @@ -148,6 +151,7 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let request = TakerRequest { @@ -186,6 +190,7 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let request = TakerRequest { @@ -224,6 +229,7 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let request = TakerRequest { @@ -264,6 +270,7 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let request = TakerRequest { base: "KMD".to_owned(), @@ -304,6 +311,7 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let request = TakerRequest { base: "REL".to_owned(), @@ -375,6 +383,7 @@ fn test_maker_order_available_amount() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; maker.matches.insert(new_uuid(), MakerMatch { request: TakerRequest { @@ -476,6 +485,7 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let reserved = MakerReserved { @@ -520,6 +530,7 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let reserved = MakerReserved { @@ -564,6 +575,7 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let reserved = MakerReserved { @@ -608,6 +620,7 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let reserved = MakerReserved { @@ -652,6 +665,7 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let reserved = MakerReserved { @@ -696,6 +710,7 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let reserved = MakerReserved { @@ -740,6 +755,7 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let reserved = MakerReserved { @@ -784,6 +800,7 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let reserved = MakerReserved { @@ -828,6 +845,7 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let reserved = MakerReserved { @@ -875,6 +893,7 @@ fn test_taker_order_cancellable() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; assert!(order.is_cancellable()); @@ -905,6 +924,7 @@ fn test_taker_order_cancellable() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; order.matches.insert(new_uuid(), TakerMatch { @@ -962,6 +982,7 @@ fn prepare_for_cancel_by(ctx: &MmArc) -> mpsc::Receiver { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }, None, ); @@ -984,6 +1005,7 @@ fn prepare_for_cancel_by(ctx: &MmArc) -> mpsc::Receiver { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }, None, ); @@ -1006,6 +1028,7 @@ fn prepare_for_cancel_by(ctx: &MmArc) -> mpsc::Receiver { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }, None, ); @@ -1033,6 +1056,7 @@ fn prepare_for_cancel_by(ctx: &MmArc) -> mpsc::Receiver { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }); rx } @@ -1128,6 +1152,7 @@ fn test_taker_order_match_by() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let reserved = MakerReserved { @@ -1182,6 +1207,7 @@ fn test_maker_order_was_updated() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let mut update_msg = MakerOrderUpdated::new(maker_order.uuid); update_msg.with_new_price(BigRational::from_integer(2.into())); @@ -3181,6 +3207,7 @@ fn test_maker_order_balance_loops() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; let morty_order = MakerOrder { @@ -3200,6 +3227,7 @@ fn test_maker_order_balance_loops() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; assert!(!maker_orders_ctx.balance_loop_exists(rick_ticker)); @@ -3232,6 +3260,7 @@ fn test_maker_order_balance_loops() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, }; maker_orders_ctx.add_order(ctx.weak(), rick_order_2.clone(), None); From 9ff52de610d35fc8c4fc4ce49f5c14606f94eb04 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 2 May 2024 14:38:49 +0100 Subject: [PATCH 086/186] fix wasm test --- mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs index 3707c503c4..6fbacbe123 100644 --- a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs +++ b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs @@ -736,6 +736,7 @@ mod tests { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, } } @@ -764,6 +765,7 @@ mod tests { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, + db_id: None, } } From 49b926009eaf2cb2811af7449cfcb1bfa9d94c0f Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 2 May 2024 19:25:21 +0100 Subject: [PATCH 087/186] check recent swaps test --- mm2src/mm2_test_helpers/src/for_tests.rs | 71 ++++++++++++------------ 1 file changed, 36 insertions(+), 35 deletions(-) diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index f92ba24bd4..843a42697c 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -416,10 +416,10 @@ impl Mm2TestConfForSwap { Mm2InitPrivKeyPolicy::Iguana => { let bob_passphrase = crate::get_passphrase!(".env.seed", "BOB_PASSPHRASE").unwrap(); Mm2TestConf::seednode(&bob_passphrase, coins) - }, + } Mm2InitPrivKeyPolicy::GlobalHDAccount => { Mm2TestConf::seednode_with_hd_account(Self::BOB_HD_PASSPHRASE, coins) - }, + } } } @@ -428,10 +428,10 @@ impl Mm2TestConfForSwap { Mm2InitPrivKeyPolicy::Iguana => { let alice_passphrase = crate::get_passphrase!(".env.client", "ALICE_PASSPHRASE").unwrap(); Mm2TestConf::light_node(&alice_passphrase, coins, &[bob_ip]) - }, + } Mm2InitPrivKeyPolicy::GlobalHDAccount => { Mm2TestConf::light_node_with_hd_account(Self::ALICE_HD_PASSPHRASE, coins, &[bob_ip]) - }, + } } } } @@ -1139,7 +1139,7 @@ impl RaiiKill { _ => { self.running = false; false - }, + } } } } @@ -1284,7 +1284,7 @@ impl MarketMakerIt { let dir = folder.join("DB"); conf["dbdir"] = dir.to_str().unwrap().into(); dir - }, + } }; try_s!(fs::create_dir(&folder)); @@ -1299,7 +1299,7 @@ impl MarketMakerIt { let path = folder.join("mm2.log"); conf["log"] = path.to_str().unwrap().into(); path - }, + } }; // If `local` is provided @@ -1391,8 +1391,8 @@ impl MarketMakerIt { /// Busy-wait on the log until the `pred` returns `true` or `timeout_sec` expires. #[cfg(not(target_arch = "wasm32"))] pub async fn wait_for_log(&mut self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { let start = now_float(); let ms = 50.min((timeout_sec * 1000.) as u64 / 20 + 10); @@ -1418,8 +1418,8 @@ impl MarketMakerIt { /// after process is stopped #[cfg(not(target_arch = "wasm32"))] pub async fn wait_for_log_after_stop(&self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { use common::try_or_ready_err; @@ -1432,19 +1432,19 @@ impl MarketMakerIt { } Retry(()) }) - .repeat_every_ms(ms) - .with_timeout_secs(timeout_sec) - .await - .map_err(|e| ERRL!("{:?}", e)) - // Convert `Result, String>` to `Result<(), String>` - .flatten() + .repeat_every_ms(ms) + .with_timeout_secs(timeout_sec) + .await + .map_err(|e| ERRL!("{:?}", e)) + // Convert `Result, String>` to `Result<(), String>` + .flatten() } /// Busy-wait on the instance in-memory log until the `pred` returns `true` or `timeout_sec` expires. #[cfg(target_arch = "wasm32")] pub async fn wait_for_log(&mut self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { wait_for_log(&self.ctx, timeout_sec, pred).await } @@ -1468,7 +1468,7 @@ impl MarketMakerIt { let body_str = json::to_string(&body).unwrap_or_else(|_| panic!("Response {:?} is not a valid JSON", body)); Ok((status_code, body_str, HeaderMap::new())) - }, + } Err(e) => Ok((StatusCode::INTERNAL_SERVER_ERROR, e, HeaderMap::new())), } } @@ -1534,7 +1534,7 @@ impl MarketMakerIt { } else { return ERR!("{}", err); } - }, + } }; if status != StatusCode::OK { return ERR!("MM didn't accept a stop. body: {}", body); @@ -1557,10 +1557,10 @@ impl MarketMakerIt { } Retry(()) }) - .repeat_every_secs(0.05) - .with_timeout_ms(timeout_ms) - .await - .map_err(|e| ERRL!("{:?}", e)) + .repeat_every_secs(0.05) + .with_timeout_ms(timeout_ms) + .await + .map_err(|e| ERRL!("{:?}", e)) } /// Currently, we cannot wait for the `Completed IAmrelay handling for peer` log entry on WASM node, @@ -1665,8 +1665,8 @@ impl Drop for MarketMakerIt { /// Busy-wait on the log until the `pred` returns `true` or `timeout_sec` expires. pub async fn wait_for_log(ctx: &MmArc, timeout_sec: f64, pred: F) -> Result<(), String> -where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { let start = now_float(); let ms = 50.min((timeout_sec * 1000.) as u64 / 20 + 10); @@ -1766,7 +1766,7 @@ pub fn mm_spat() -> (&'static str, MarketMakerIt, RaiiDump, RaiiDump) { "pass".into(), None, ) - .unwrap(); + .unwrap(); let (dump_log, dump_dashboard) = mm_dump(&mm.log_path); (passphrase, mm, dump_log, dump_dashboard) } @@ -1843,10 +1843,10 @@ pub fn from_env_file(env: Vec) -> (Option, Option) { match cap.get(1) { Some(name) if name.as_bytes() == b"PASSPHRASE" => { passphrase = cap.get(2).map(|v| String::from_utf8(v.as_bytes().into()).unwrap()) - }, + } Some(name) if name.as_bytes() == b"USERPASS" => { userpass = cap.get(2).map(|v| String::from_utf8(v.as_bytes().into()).unwrap()) - }, + } _ => (), } } @@ -2199,7 +2199,7 @@ pub async fn init_lightning_status(mm: &MarketMakerIt, task_id: u64) -> Json { pub fn new_mm2_temp_folder_path(ip: Option) -> PathBuf { let now = common::now_ms(); #[allow(deprecated)] - let now = Local.timestamp((now / 1000) as i64, (now % 1000) as u32 * 1_000_000); + let now = Local.timestamp((now / 1000) as i64, (now % 1000) as u32 * 1_000_000); let folder = match ip { Some(ip) => format!("mm2_{}_{}", now.format("%Y-%m-%d_%H-%M-%S-%3f"), ip), None => format!("mm2_{}", now.format("%Y-%m-%d_%H-%M-%S-%3f")), @@ -2436,6 +2436,7 @@ pub async fn check_recent_swaps(mm: &MarketMakerIt, expected_len: usize) { .unwrap(); assert!(response.0.is_success(), "!status of my_recent_swaps {}", response.1); let swaps_response: Json = json::from_str(&response.1).unwrap(); + info!("RECENT SWAPS: {swaps_response:?}"); let swaps: &Vec = swaps_response["result"]["swaps"].as_array().unwrap(); assert_eq!(expected_len, swaps.len()); } @@ -3303,7 +3304,7 @@ pub async fn wait_for_swaps_finish_and_check_status( BigDecimal::try_from(volume).unwrap(), BigDecimal::try_from(volume * maker_price).unwrap(), ) - .await; + .await; log!("Checking maker status.."); check_my_swap_status( @@ -3312,7 +3313,7 @@ pub async fn wait_for_swaps_finish_and_check_status( BigDecimal::try_from(volume).unwrap(), BigDecimal::try_from(volume * maker_price).unwrap(), ) - .await; + .await; } } @@ -3337,8 +3338,8 @@ pub async fn test_qrc20_history_impl(local_start: Option) { "pass".into(), local_start, ) - .await - .unwrap(); + .await + .unwrap(); let (_dump_log, _dump_dashboard) = mm.mm_dump(); #[cfg(not(target_arch = "wasm32"))] From 6db023650be9ab4098603dd873aa13e15e4df06a Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 3 May 2024 00:30:50 +0100 Subject: [PATCH 088/186] debug check_my_reecent_swaps rpc --- mm2src/mm2_test_helpers/src/for_tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index 843a42697c..4544c337aa 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -2437,7 +2437,7 @@ pub async fn check_recent_swaps(mm: &MarketMakerIt, expected_len: usize) { assert!(response.0.is_success(), "!status of my_recent_swaps {}", response.1); let swaps_response: Json = json::from_str(&response.1).unwrap(); info!("RECENT SWAPS: {swaps_response:?}"); - let swaps: &Vec = swaps_response["result"]["swaps"].as_array().unwrap(); + let swaps: &Vec = swaps_response["result"][0]["swaps"].as_array().unwrap(); assert_eq!(expected_len, swaps.len()); } From f970a1212a860baccb8a556f6230533b280f0880 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 3 May 2024 00:46:51 +0100 Subject: [PATCH 089/186] remove debug line --- mm2src/mm2_test_helpers/src/for_tests.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index 4544c337aa..ddddb3e845 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -2436,7 +2436,6 @@ pub async fn check_recent_swaps(mm: &MarketMakerIt, expected_len: usize) { .unwrap(); assert!(response.0.is_success(), "!status of my_recent_swaps {}", response.1); let swaps_response: Json = json::from_str(&response.1).unwrap(); - info!("RECENT SWAPS: {swaps_response:?}"); let swaps: &Vec = swaps_response["result"][0]["swaps"].as_array().unwrap(); assert_eq!(expected_len, swaps.len()); } From 094a65931170302f64fc2e62bcdf56cbafed218e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 3 May 2024 12:50:24 +0100 Subject: [PATCH 090/186] update swaplock with db_id --- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 157 +++++------ mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 189 +++++++------- mm2src/mm2_main/src/lp_swap/swap_lock.rs | 46 ++-- mm2src/mm2_main/src/lp_swap/swap_v2_common.rs | 32 +-- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 204 ++++++++------- mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 243 ++++++++---------- 6 files changed, 428 insertions(+), 443 deletions(-) diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index b8247254ec..19232bc111 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -294,7 +294,7 @@ impl MakerSwap { fmt = "Maker swap {} has successfully started", self.uuid ); - }, + } MakerSwapEvent::StartFailed(err) => self.errors.lock().push(err), MakerSwapEvent::Negotiated(data) => { self.taker_payment_lock @@ -308,11 +308,11 @@ impl MakerSwap { if data.taker_coin_swap_contract_addr.is_some() { self.w().data.taker_coin_swap_contract_address = data.taker_coin_swap_contract_addr; } - }, + } MakerSwapEvent::NegotiateFailed(err) => self.errors.lock().push(err), MakerSwapEvent::MakerPaymentInstructionsReceived(instructions) => { self.w().payment_instructions = instructions - }, + } MakerSwapEvent::TakerFeeValidated(tx) => self.w().taker_fee = Some(tx), MakerSwapEvent::TakerFeeValidateFailed(err) => self.errors.lock().push(err), MakerSwapEvent::MakerPaymentSent(tx) => self.w().maker_payment = Some(tx), @@ -323,7 +323,7 @@ impl MakerSwap { MakerSwapEvent::TakerPaymentWaitConfirmStarted => (), MakerSwapEvent::TakerPaymentValidatedAndConfirmed => { self.taker_payment_confirmed.store(true, Ordering::Relaxed) - }, + } MakerSwapEvent::TakerPaymentValidateFailed(err) => self.errors.lock().push(err), MakerSwapEvent::TakerPaymentWaitConfirmFailed(err) => self.errors.lock().push(err), MakerSwapEvent::TakerPaymentSpent(tx) => self.w().taker_payment_spend = Some(tx), @@ -488,8 +488,8 @@ impl MakerSwap { Err(e) => { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::StartFailed( ERRL!("!maker_coin.get_sender_trade_fee {}", e).into(), - )])) - }, + )])); + } }; let taker_payment_spend_trade_fee_fut = self.taker_coin.get_receiver_trade_fee(stage); let taker_payment_spend_trade_fee = match taker_payment_spend_trade_fee_fut.compat().await { @@ -497,8 +497,8 @@ impl MakerSwap { Err(e) => { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::StartFailed( ERRL!("!taker_coin.get_receiver_trade_fee {}", e).into(), - )])) - }, + )])); + } }; let params = MakerSwapPreparedParams { @@ -514,14 +514,14 @@ impl MakerSwap { Some(params), stage, ) - .await + .await { Ok(_) => (), Err(e) => { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::StartFailed( ERRL!("!check_balance_for_maker_swap {}", e).into(), - )])) - }, + )])); + } }; let started_at = now_sec(); @@ -530,8 +530,8 @@ impl MakerSwap { Err(e) => { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::StartFailed( ERRL!("!maker_coin.current_block {}", e).into(), - )])) - }, + )])); + } }; let taker_coin_start_block = match self.taker_coin.current_block().compat().await { @@ -539,8 +539,8 @@ impl MakerSwap { Err(e) => { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::StartFailed( ERRL!("!taker_coin.current_block {}", e).into(), - )])) - }, + )])); + } }; let maker_coin_swap_contract_address = self.maker_coin.swap_contract_address(); @@ -613,7 +613,7 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::NegotiateFailed( ERRL!("{:?}", e).into(), )])); - }, + } }; drop(send_abort_handle); let time_dif = self.r().data.started_at.abs_diff(taker_data.started_at()); @@ -633,7 +633,7 @@ impl MakerSwap { taker_data.payment_locktime(), expected_lock_time ) - .into(), + .into(), )])); } @@ -647,7 +647,7 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::NegotiateFailed( ERRL!("!maker_coin.negotiate_swap_contract_addr {}", e).into(), )])); - }, + } }; let taker_coin_swap_contract_addr = match self @@ -660,7 +660,7 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::NegotiateFailed( ERRL!("!taker_coin.negotiate_swap_contract_addr {}", e).into(), )])); - }, + } }; // Validate maker_coin_htlc_pubkey realness @@ -715,8 +715,8 @@ impl MakerSwap { Err(e) => { return Ok((Some(MakerSwapCommand::Finish), vec![ MakerSwapEvent::TakerFeeValidateFailed(ERRL!("{}", e).into()), - ])) - }, + ])); + } }; drop(send_abort_handle); @@ -738,9 +738,9 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![ MakerSwapEvent::TakerFeeValidateFailed(e.to_string().into()), ])); - }, + } } - }, + } None => None, }; swap_events.push(MakerSwapEvent::MakerPaymentInstructionsReceived(instructions)); @@ -750,8 +750,8 @@ impl MakerSwap { Err(e) => { return Ok((Some(MakerSwapCommand::Finish), vec![ MakerSwapEvent::TakerFeeValidateFailed(ERRL!("{:?}", e).into()), - ])) - }, + ])); + } }; let hash = taker_fee.tx_hash(); @@ -787,7 +787,7 @@ impl MakerSwap { attempts += 1; Timer::sleep(10.).await; } - }, + } }; } @@ -837,8 +837,8 @@ impl MakerSwap { Err(err) => { return Ok((Some(MakerSwapCommand::Finish), vec![ MakerSwapEvent::MakerPaymentTransactionFailed(err.into_inner().to_string().into()), - ])) - }, + ])); + } } } else { None @@ -869,15 +869,15 @@ impl MakerSwap { ERRL!("{}", err.get_plain_text_format()).into(), ), ])); - }, + } } - }, + } }, Err(e) => { return Ok((Some(MakerSwapCommand::Finish), vec![ MakerSwapEvent::MakerPaymentTransactionFailed(ERRL!("{}", e).into()), - ])) - }, + ])); + } }; let tx_hash = transaction.tx_hash(); @@ -904,7 +904,7 @@ impl MakerSwap { wait_until: self.wait_refund_until(), }, ])); - }, + } }; let msg = SwapMsg::MakerPayment(payment_data_msg); let abort_send_handle = broadcast_swap_msg_every( @@ -954,8 +954,8 @@ impl MakerSwap { MakerSwapEvent::MakerPaymentWaitRefundStarted { wait_until: self.wait_refund_until(), }, - ])) - }, + ])); + } }; drop(abort_send_handle); @@ -969,8 +969,8 @@ impl MakerSwap { MakerSwapEvent::MakerPaymentWaitRefundStarted { wait_until: self.wait_refund_until(), }, - ])) - }, + ])); + } }; let tx_hash = taker_payment.tx_hash(); @@ -1029,8 +1029,8 @@ impl MakerSwap { Err(err) => { return Ok((Some(MakerSwapCommand::Finish), vec![ MakerSwapEvent::MakerPaymentTransactionFailed(err.into_inner().to_string().into()), - ])) - }, + ])); + } } } else { None @@ -1118,13 +1118,13 @@ impl MakerSwap { "!taker_coin.send_maker_spends_taker_payment: {}", err.get_plain_text_format() ) - .into(), + .into(), ), MakerSwapEvent::MakerPaymentWaitRefundStarted { wait_until: self.wait_refund_until(), }, ])); - }, + } }; broadcast_p2p_tx_msg( @@ -1221,7 +1221,7 @@ impl MakerSwap { Err(e) => { error!("Error {} on can_refund_htlc, retrying in 30 seconds", e); Timer::sleep(30.).await; - }, + } } } @@ -1261,10 +1261,10 @@ impl MakerSwap { "!maker_coin.send_maker_refunds_payment: {}", err.get_plain_text_format() ) - .into(), + .into(), ), ])); - }, + } }; broadcast_p2p_tx_msg( @@ -1417,15 +1417,15 @@ impl MakerSwap { "Taker payment was already spent by {} tx {:02x}", selfi.taker_coin.ticker(), tx.tx_hash() - ) - }, + ); + } Ok(Some(FoundSwapTxSpend::Refunded(tx))) => { return ERR!( "Taker payment was already refunded by {} tx {:02x}", selfi.taker_coin.ticker(), tx.tx_hash() - ) - }, + ); + } Err(e) => return ERR!("Error {} when trying to find taker payment spend", e), Ok(None) => (), // payment is not spent, continue } @@ -1493,7 +1493,7 @@ impl MakerSwap { Some(tx) => tx.tx_hex(), None => return ERR!("Maker payment transaction was not found"), } - }, + } }; let search_input = SearchForSwapTxSpendInput { @@ -1517,7 +1517,7 @@ impl MakerSwap { coin: self.taker_coin.ticker().to_string(), transaction, }) - }, + } Ok(Some(FoundSwapTxSpend::Refunded(tx))) => ERR!( "Maker payment was already refunded by {} tx {:02x}", self.maker_coin.ticker(), @@ -1558,7 +1558,7 @@ impl MakerSwap { } return ERR!("{}", err.get_plain_text_format()); - }, + } }; Ok(RecoveredSwap { @@ -1566,7 +1566,7 @@ impl MakerSwap { coin: self.maker_coin.ticker().to_string(), transaction, }) - }, + } } } } @@ -1675,14 +1675,14 @@ impl MakerSwapEvent { MakerSwapEvent::MakerPaymentDataSendFailed(_) => "Maker payment failed...".to_owned(), MakerSwapEvent::MakerPaymentWaitConfirmFailed(_) => { "Maker payment wait for confirmation failed...".to_owned() - }, + } MakerSwapEvent::TakerPaymentReceived(_) => "Taker payment received...".to_owned(), MakerSwapEvent::TakerPaymentWaitConfirmStarted => "Taker payment wait confirm started...".to_owned(), MakerSwapEvent::TakerPaymentValidatedAndConfirmed => "Taker payment validated and confirmed...".to_owned(), MakerSwapEvent::TakerPaymentValidateFailed(_) => "Taker payment validate failed...".to_owned(), MakerSwapEvent::TakerPaymentWaitConfirmFailed(_) => { "Taker payment wait for confirmation failed...".to_owned() - }, + } MakerSwapEvent::TakerPaymentSpent(_) => "Taker payment spent...".to_owned(), MakerSwapEvent::TakerPaymentSpendFailed(_) => "Taker payment spend failed...".to_owned(), MakerSwapEvent::TakerPaymentSpendConfirmStarted => "Taker payment send wait confirm started...".to_owned(), @@ -1690,7 +1690,7 @@ impl MakerSwapEvent { MakerSwapEvent::TakerPaymentSpendConfirmFailed(_) => "Taker payment spend confirm failed...".to_owned(), MakerSwapEvent::MakerPaymentWaitRefundStarted { wait_until } => { format!("Maker payment wait refund till {} started...", wait_until) - }, + } MakerSwapEvent::MakerPaymentRefundStarted => "Maker payment refund started...".to_owned(), MakerSwapEvent::MakerPaymentRefunded(_) => "Maker payment refunded...".to_owned(), MakerSwapEvent::MakerPaymentRefundFailed(_) => "Maker payment refund failed...".to_owned(), @@ -1760,7 +1760,7 @@ impl MakerSavedEvent { MakerSwapEvent::TakerPaymentSpendConfirmFailed(_) => Some(MakerSwapCommand::PrepareForMakerPaymentRefund), MakerSwapEvent::MakerPaymentWaitRefundStarted { .. } => { Some(MakerSwapCommand::PrepareForMakerPaymentRefund) - }, + } MakerSwapEvent::MakerPaymentRefundStarted => Some(MakerSwapCommand::RefundMakerPayment), MakerSwapEvent::MakerPaymentRefunded(_) => Some(MakerSwapCommand::FinalizeMakerPaymentRefund), MakerSwapEvent::MakerPaymentRefundFailed(_) => Some(MakerSwapCommand::Finish), @@ -1929,7 +1929,7 @@ impl MakerSavedSwap { | MakerSwapEvent::TakerPaymentSpendConfirmed | MakerSwapEvent::MakerPaymentRefunded(_) => { return false; - }, + } _ => (), } } @@ -1995,7 +1995,7 @@ impl MakerSavedSwap { return ERR!("taker's pubkey is empty"); }; key.to_string() - }, + } _ => return ERR!("Swap must be negotiated to get taker's pubkey"), }, None => return ERR!("Can't get taker's pubkey while there's no Negotiated event"), @@ -2031,6 +2031,13 @@ impl RunMakerSwapInput { RunMakerSwapInput::KickStart { swap_uuid, .. } => swap_uuid, } } + + fn db_id(&self) -> Option { + match self { + RunMakerSwapInput::StartNew(swap) => swap.db_id(), + RunMakerSwapInput::KickStart { maker_coin, .. } => maker_coin.account_db_id(), + } + } } /// Starts the maker swap and drives it to completion (until None next command received). @@ -2039,9 +2046,10 @@ impl RunMakerSwapInput { /// Every produced event is saved to local DB. Swap status is broadcasted to P2P network after completion. pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { let uuid = swap.uuid().to_owned(); + let db_id = swap.db_id().to_owned(); let mut attempts = 0; let swap_lock = loop { - match SwapLock::lock(&ctx, uuid, 40.).await { + match SwapLock::lock(&ctx, uuid, 40., db_id.as_deref()).await { Ok(Some(l)) => break l, Ok(None) => { if attempts >= 1 { @@ -2054,11 +2062,11 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { attempts += 1; Timer::sleep(40.).await; } - }, + } Err(e) => { error!("Swap {} file lock error: {}", uuid, e); return; - }, + } }; }; @@ -2073,16 +2081,16 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { Some(c) => { info!("Swap {} kick started.", uuid); (swap, c) - }, + } None => { warn!("Swap {} has been finished already, aborting.", uuid); return; - }, + } }, Err(e) => { error!("Error loading swap {}: {}", uuid, e); return; - }, + } }, }; @@ -2096,7 +2104,7 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { Timer::sleep(30.).await; } } - .fuse(), + .fuse(), ); let ctx = swap.ctx.clone(); @@ -2157,7 +2165,7 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { match res.0 { Some(c) => { command = c; - }, + } None => { if let Err(e) = mark_swap_as_finished(ctx.clone(), running_swap.uuid, running_swap.db_id().as_deref()).await @@ -2172,16 +2180,17 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { } } break; - }, + } } } } - .fuse(), + .fuse(), ); select! { _swap = swap_fut => (), // swap finished normally _touch = touch_loop => unreachable!("Touch loop can not stop!"), - }; + } + ; } pub struct MakerSwapPreparedParams { @@ -2200,9 +2209,9 @@ pub async fn check_balance_for_maker_swap( ) -> CheckBalanceResult { let (maker_payment_trade_fee, taker_payment_spend_trade_fee) = match prepared_params { Some(MakerSwapPreparedParams { - maker_payment_trade_fee, - taker_payment_spend_trade_fee, - }) => (maker_payment_trade_fee, taker_payment_spend_trade_fee), + maker_payment_trade_fee, + taker_payment_spend_trade_fee, + }) => (maker_payment_trade_fee, taker_payment_spend_trade_fee), None => { let preimage_value = TradePreimageValue::Exact(volume.to_decimal()); let maker_payment_trade_fee = my_coin @@ -2215,7 +2224,7 @@ pub async fn check_balance_for_maker_swap( .await .mm_err(|e| CheckBalanceError::from_trade_preimage_error(e, other_coin.ticker()))?; (maker_payment_trade_fee, taker_payment_spend_trade_fee) - }, + } }; let balance = @@ -2287,7 +2296,7 @@ pub async fn maker_swap_trade_preimage( Some(prepared_params), FeeApproxStage::TradePreimage, ) - .await?; + .await?; } let conf_settings = OrderConfirmationsSettings { diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index a8daa4d4f3..c9b75eb915 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -43,7 +43,8 @@ cfg_wasm32!( ); // This is needed to have Debug on messages -#[allow(unused_imports)] use prost::Message; +#[allow(unused_imports)] +use prost::Message; /// Negotiation data representation to be stored in DB. #[derive(Clone, Debug, Deserialize, Serialize)] @@ -180,7 +181,7 @@ impl StateMachineStorage for MakerSwapStorage { insert_new_swap_v2(&ctx, sql_params, db_id.as_deref())?; Ok(()) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -225,7 +226,7 @@ impl StateMachineStorage for MakerSwapStorage { MakerSwapDbRepr::from_sql_row, )?) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -397,7 +398,7 @@ pub struct MakerSwapStateMachine - MakerSwapStateMachine +MakerSwapStateMachine { /// Timeout for taker payment's on-chain confirmation. #[inline] @@ -422,7 +423,7 @@ impl StorableStateMachine - for MakerSwapStateMachine +for MakerSwapStateMachine { type Storage = MakerSwapStorage; type Result = (); @@ -462,12 +463,12 @@ impl Result<(RestoredMachine, Box>), Self::RecreateError> { + ) -> Result<(RestoredMachine, Box>), Self::RecreateError> { if repr.events.is_empty() { return MmError::err(SwapRecreateError::ReprEventsEmpty); } - let current_state: Box> = match repr.events.remove(repr.events.len() - 1) + let current_state: Box> = match repr.events.remove(repr.events.len() - 1) { MakerSwapEvent::Initialized { maker_coin_start_block, @@ -620,7 +621,7 @@ impl return MmError::err(SwapRecreateError::SwapCompleted), MakerSwapEvent::MakerPaymentRefunded { .. } => { return MmError::err(SwapRecreateError::SwapFinishedWithRefund); - }, + } }; let dex_fee = if repr.dex_fee_burn > MmNumber::default() { @@ -658,7 +659,7 @@ impl Result { - acquire_reentrancy_lock_impl(&self.ctx, self.uuid).await + acquire_reentrancy_lock_impl(&self.ctx, self.uuid, self.maker_coin.account_db_id().as_deref()).await } fn spawn_reentrancy_lock_renew(&mut self, guard: Self::ReentrancyLock) { @@ -708,14 +709,14 @@ impl { let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); let ticker = self.maker_coin.ticker(); if let Some(maker_coin_locked) = swaps_ctx.locked_amounts.lock().unwrap().get_mut(ticker) { maker_coin_locked.retain(|locked| locked.swap_uuid != self.uuid); }; - }, + } MakerSwapEvent::WaitingForTakerFunding { .. } | MakerSwapEvent::TakerFundingReceived { .. } | MakerSwapEvent::MakerPaymentRefundRequired { .. } @@ -761,7 +762,7 @@ impl Default for Initialize { } impl InitialState - for Initialize +for Initialize { type StateMachine = MakerSwapStateMachine; } #[async_trait] impl State - for Initialize +for Initialize { type StateMachine = MakerSwapStateMachine; @@ -806,7 +807,7 @@ impl { let reason = AbortReason::FailedToGetMakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let taker_coin_start_block = match state_machine.taker_coin.current_block().compat().await { @@ -814,7 +815,7 @@ impl { let reason = AbortReason::FailedToGetTakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let preimage_value = TradePreimageValue::Exact(state_machine.maker_volume.to_decimal()); @@ -828,7 +829,7 @@ impl { let reason = AbortReason::FailedToGetMakerPaymentFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let taker_payment_spend_trade_fee = match state_machine.taker_coin.get_receiver_trade_fee(stage).compat().await @@ -837,7 +838,7 @@ impl { let reason = AbortReason::FailedToGetTakerPaymentSpendFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let prepared_params = MakerSwapPreparedParams { @@ -854,7 +855,7 @@ impl { impl TransitionFrom> for Initialized {} impl StorableState - for Initialized +for Initialized { type StateMachine = MakerSwapStateMachine; @@ -901,7 +902,7 @@ impl State - for Initialized +for Initialized { type StateMachine = MakerSwapStateMachine; @@ -942,7 +943,7 @@ impl { let reason = AbortReason::DidNotReceiveTakerNegotiation(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; drop(abort_handle); @@ -952,11 +953,11 @@ impl { let reason = AbortReason::TakerAbortedNegotiation(abort.reason); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } None => { let reason = AbortReason::ReceivedInvalidTakerNegotiation; return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let started_at_diff = state_machine.started_at.abs_diff(taker_data.started_at); @@ -983,7 +984,7 @@ impl { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let maker_coin_htlc_pub_from_taker = @@ -992,7 +993,7 @@ impl { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let next_state = WaitingForTakerFunding { @@ -1065,13 +1066,12 @@ struct WaitingForTakerFunding TransitionFrom> - for WaitingForTakerFunding -{ -} +for WaitingForTakerFunding +{} #[async_trait] impl State - for WaitingForTakerFunding +for WaitingForTakerFunding { type StateMachine = MakerSwapStateMachine; @@ -1104,7 +1104,7 @@ impl { let reason = AbortReason::DidNotReceiveTakerFundingInfo(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; drop(abort_handle); @@ -1114,7 +1114,7 @@ impl { let reason = AbortReason::FailedToParseTakerFunding(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let next_state = TakerFundingReceived { maker_coin_start_block: self.maker_coin_start_block, @@ -1128,7 +1128,7 @@ impl StorableState - for WaitingForTakerFunding +for WaitingForTakerFunding { type StateMachine = MakerSwapStateMachine; @@ -1151,13 +1151,12 @@ struct TakerFundingReceived - TransitionFrom> for TakerFundingReceived -{ -} +TransitionFrom> for TakerFundingReceived +{} #[async_trait] impl State - for TakerFundingReceived +for TakerFundingReceived { type StateMachine = MakerSwapStateMachine; @@ -1198,7 +1197,7 @@ impl { let reason = AbortReason::FailedToGenerateFundingSpend(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let args = SendMakerPaymentArgs { @@ -1214,7 +1213,7 @@ impl { let reason = AbortReason::FailedToSendMakerPayment(format!("{:?}", e)); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; info!( "Sent maker payment {} tx {:02x} during swap {}", @@ -1236,7 +1235,7 @@ impl StorableState - for TakerFundingReceived +for TakerFundingReceived { type StateMachine = MakerSwapStateMachine; @@ -1264,14 +1263,13 @@ struct MakerPaymentSentFundingSpendGenerated - TransitionFrom> - for MakerPaymentSentFundingSpendGenerated -{ -} +TransitionFrom> +for MakerPaymentSentFundingSpendGenerated +{} #[async_trait] impl State - for MakerPaymentSentFundingSpendGenerated +for MakerPaymentSentFundingSpendGenerated { type StateMachine = MakerSwapStateMachine; @@ -1328,7 +1326,7 @@ impl { @@ -1341,7 +1339,7 @@ impl { let next_state = MakerPaymentRefundRequired { maker_coin_start_block: self.maker_coin_start_block, @@ -1352,15 +1350,15 @@ impl { Timer::sleep(30.).await; - }, + } Err(e) => match e { SearchForFundingSpendErr::Rpc(e) => { error!("Rpc error {} on search_for_taker_funding_spend", e); Timer::sleep(30.).await; - }, + } // Other error cases are considered irrecoverable, so we should proceed to refund stage // handling using @ binding to trigger a compiler error when new variant is added e @ SearchForFundingSpendErr::InvalidInputTx(_) @@ -1375,7 +1373,7 @@ impl StorableState - for MakerPaymentSentFundingSpendGenerated +for MakerPaymentSentFundingSpendGenerated { type StateMachine = MakerSwapStateMachine; @@ -1433,19 +1431,17 @@ struct MakerPaymentRefundRequired - TransitionFrom> - for MakerPaymentRefundRequired -{ -} +TransitionFrom> +for MakerPaymentRefundRequired +{} impl - TransitionFrom> for MakerPaymentRefundRequired -{ -} +TransitionFrom> for MakerPaymentRefundRequired +{} #[async_trait] impl State - for MakerPaymentRefundRequired +for MakerPaymentRefundRequired { type StateMachine = MakerSwapStateMachine; @@ -1471,7 +1467,7 @@ impl { let reason = AbortReason::MakerPaymentRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let next_state = MakerPaymentRefunded { @@ -1496,7 +1492,7 @@ impl { error!("Error {} on can_refund_htlc, retrying in 30 seconds", e); Timer::sleep(30.).await; - }, + } } } @@ -1526,7 +1522,7 @@ impl { let reason = AbortReason::MakerPaymentRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let next_state = MakerPaymentRefunded { @@ -1541,7 +1537,7 @@ impl StorableState - for MakerPaymentRefundRequired +for MakerPaymentRefundRequired { type StateMachine = MakerSwapStateMachine; @@ -1568,14 +1564,13 @@ struct TakerPaymentReceived - TransitionFrom> - for TakerPaymentReceived -{ -} +TransitionFrom> +for TakerPaymentReceived +{} #[async_trait] impl State - for TakerPaymentReceived +for TakerPaymentReceived { type StateMachine = MakerSwapStateMachine; @@ -1616,7 +1611,7 @@ impl s, @@ -1659,7 +1654,7 @@ impl StorableState - for TakerPaymentReceived +for TakerPaymentReceived { type StateMachine = MakerSwapStateMachine; @@ -1748,13 +1743,12 @@ struct TakerPaymentSpent - TransitionFrom> for TakerPaymentSpent -{ -} +TransitionFrom> for TakerPaymentSpent +{} #[async_trait] impl State - for TakerPaymentSpent +for TakerPaymentSpent { type StateMachine = MakerSwapStateMachine; @@ -1764,7 +1758,7 @@ impl StorableState - for TakerPaymentSpent +for TakerPaymentSpent { type StateMachine = MakerSwapStateMachine; @@ -1829,7 +1823,7 @@ impl Aborted { #[async_trait] impl LastState - for Aborted +for Aborted { type StateMachine = MakerSwapStateMachine; @@ -1842,7 +1836,7 @@ impl StorableState - for Aborted +for Aborted { type StateMachine = MakerSwapStateMachine; @@ -1858,19 +1852,16 @@ impl TransitionFrom> for impl TransitionFrom> for Aborted {} impl - TransitionFrom> for Aborted -{ -} +TransitionFrom> for Aborted +{} impl - TransitionFrom> for Aborted -{ -} +TransitionFrom> for Aborted +{} impl - TransitionFrom> for Aborted -{ -} +TransitionFrom> for Aborted +{} struct Completed { maker_coin: PhantomData, @@ -1887,7 +1878,7 @@ impl Completed { } impl StorableState - for Completed +for Completed { type StateMachine = MakerSwapStateMachine; @@ -1896,7 +1887,7 @@ impl LastState - for Completed +for Completed { type StateMachine = MakerSwapStateMachine; @@ -1909,9 +1900,8 @@ impl - TransitionFrom> for Completed -{ -} +TransitionFrom> for Completed +{} struct MakerPaymentRefunded { taker_coin: PhantomData, @@ -1921,7 +1911,7 @@ struct MakerPaymentRefunded { } impl StorableState - for MakerPaymentRefunded +for MakerPaymentRefunded { type StateMachine = MakerSwapStateMachine; @@ -1942,7 +1932,7 @@ impl LastState - for MakerPaymentRefunded +for MakerPaymentRefunded { type StateMachine = MakerSwapStateMachine; @@ -1958,6 +1948,5 @@ impl - TransitionFrom> for MakerPaymentRefunded -{ -} +TransitionFrom> for MakerPaymentRefunded +{} diff --git a/mm2src/mm2_main/src/lp_swap/swap_lock.rs b/mm2src/mm2_main/src/lp_swap/swap_lock.rs index 29dbf8f214..fe32c35843 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_lock.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_lock.rs @@ -4,10 +4,12 @@ use mm2_core::mm_ctx::MmArc; use mm2_err_handle::prelude::*; use uuid::Uuid; -#[cfg(target_arch = "wasm32")] use common::now_sec; +#[cfg(target_arch = "wasm32")] +use common::now_sec; #[cfg(not(target_arch = "wasm32"))] pub use native_lock::SwapLock; -#[cfg(target_arch = "wasm32")] pub use wasm_lock::SwapLock; +#[cfg(target_arch = "wasm32")] +pub use wasm_lock::SwapLock; pub type SwapLockResult = Result>; @@ -24,7 +26,7 @@ pub enum SwapLockError { #[async_trait] pub trait SwapLockOps: Sized { - async fn lock(ctx: &MmArc, swap_uuid: Uuid, ttl_sec: f64) -> SwapLockResult>; + async fn lock(ctx: &MmArc, swap_uuid: Uuid, ttl_sec: f64, db_id: Option<&str>) -> SwapLockResult>; async fn touch(&self) -> SwapLockResult<()>; } @@ -41,11 +43,11 @@ mod native_lock { match e { FileLockError::ErrorReadingTimestamp { path, error } => { SwapLockError::ErrorReadingTimestamp(format!("Path: {:?}, Error: {}", path, error)) - }, + } FileLockError::ErrorWritingTimestamp { path, error } | FileLockError::ErrorCreatingLockFile { path, error } => { SwapLockError::ErrorWritingTimestamp(format!("Path: {:?}, Error: {}", path, error)) - }, + } } } } @@ -56,8 +58,8 @@ mod native_lock { #[async_trait] impl SwapLockOps for SwapLock { - async fn lock(ctx: &MmArc, swap_uuid: Uuid, ttl_sec: f64) -> SwapLockResult> { - let lock_path = my_swaps_dir(ctx, None).join(format!("{}.lock", swap_uuid)); + async fn lock(ctx: &MmArc, swap_uuid: Uuid, ttl_sec: f64, db_id: Option<&str>) -> SwapLockResult> { + let lock_path = my_swaps_dir(ctx, db_id).join(format!("{}.lock", swap_uuid)); let file_lock = some_or_return_ok_none!(FileLock::lock(lock_path, ttl_sec)?); Ok(Some(SwapLock { file_lock })) @@ -94,7 +96,7 @@ mod wasm_lock { | e @ DbTransactionError::ErrorCountingItems(_) => SwapLockError::ErrorReadingTimestamp(e.to_string()), e @ DbTransactionError::ErrorDeletingItems(_) | e @ DbTransactionError::ErrorUploadingItem(_) => { SwapLockError::ErrorWritingTimestamp(e.to_string()) - }, + } } } } @@ -108,14 +110,16 @@ mod wasm_lock { swap_uuid: Uuid, /// The identifier of the timestamp record in the `SwapLockTable`. pub(super) record_id: ItemId, + db_id: Option, } impl Drop for SwapLock { fn drop(&mut self) { let ctx = self.ctx.clone(); let record_id = self.record_id; + let db_id = self.db_id.to_owned(); let fut = async move { - if let Err(e) = Self::release(ctx, record_id).await { + if let Err(e) = Self::release(ctx, record_id, db_id.as_deref()).await { error!("Error realising the SwapLock: {}", e); } debug!("SwapLock::drop] Finish"); @@ -126,10 +130,9 @@ mod wasm_lock { #[async_trait] impl SwapLockOps for SwapLock { - async fn lock(ctx: &MmArc, uuid: Uuid, ttl_sec: f64) -> SwapLockResult> { + async fn lock(ctx: &MmArc, uuid: Uuid, ttl_sec: f64, db_id: Option<&str>) -> SwapLockResult> { let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SwapLockError::InternalError)?; - // TODO: db_id - let db = swaps_ctx.swap_db(None).await?; + let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -154,13 +157,13 @@ mod wasm_lock { ctx: ctx.clone(), swap_uuid: uuid, record_id, + db_id: db_id.map(|s| s.to_string()), })) } async fn touch(&self) -> SwapLockResult<()> { let swaps_ctx = SwapsContext::from_ctx(&self.ctx).map_to_mm(SwapLockError::InternalError)?; - // TODO: db_id - let db = swaps_ctx.swap_db(None).await?; + let db = swaps_ctx.swap_db(self.db_id.as_deref()).await?; let item = SwapLockTable { uuid: self.swap_uuid, @@ -181,10 +184,9 @@ mod wasm_lock { } impl SwapLock { - async fn release(ctx: MmArc, record_id: ItemId) -> SwapLockResult<()> { + async fn release(ctx: MmArc, record_id: ItemId, db_id: Option<&str>) -> SwapLockResult<()> { let swaps_ctx = SwapsContext::from_ctx(&ctx).map_to_mm(SwapLockError::InternalError)?; - // TODO: db_id - let db = swaps_ctx.swap_db(None).await?; + let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; table.delete_item(record_id).await?; @@ -222,7 +224,7 @@ mod tests { let uuid = new_uuid(); let started_at = now_sec(); - let swap_lock = SwapLock::lock(&ctx, uuid, 10.) + let swap_lock = SwapLock::lock(&ctx, uuid, 10., None) .await .expect("!SwapLock::lock") .expect("SwapLock::lock must return a value"); @@ -249,11 +251,11 @@ mod tests { async fn test_file_lock_should_return_none_if_lock_acquired() { let ctx = MmCtxBuilder::new().with_test_db_namespace().into_mm_arc(); let uuid = new_uuid(); - let _lock = SwapLock::lock(&ctx, uuid, 10.) + let _lock = SwapLock::lock(&ctx, uuid, 10., None) .await .expect("!SwapLock::lock") .expect("SwapLock::lock must return a value"); - let new_lock = SwapLock::lock(&ctx, uuid, 10.).await.expect("!SwapLock::lock"); + let new_lock = SwapLock::lock(&ctx, uuid, 10., None).await.expect("!SwapLock::lock"); assert!( new_lock.is_none(), "SwapLock::lock must return None if the lock has already been acquired" @@ -266,7 +268,7 @@ mod tests { let uuid = new_uuid(); let started_at = now_sec(); - let first_lock = SwapLock::lock(&ctx, uuid, 1.) + let first_lock = SwapLock::lock(&ctx, uuid, 1., None) .await .expect("!SwapLock::lock") .expect("SwapLock::lock must return a value"); @@ -281,7 +283,7 @@ mod tests { Timer::sleep(2.).await; - let second_lock = SwapLock::lock(&ctx, uuid, 1.) + let second_lock = SwapLock::lock(&ctx, uuid, 1., None) .await .expect("!SwapLock::lock") .expect("SwapLock::lock must return a value after the last ttl is over"); diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs index ad4bf22636..6412d6f944 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs @@ -111,7 +111,7 @@ pub(super) async fn has_db_record_for( let conn = conn.lock().unwrap(); does_swap_exist(&conn, &id_str, db_id.as_deref()) }) - .await?) + .await?) } #[cfg(target_arch = "wasm32")] @@ -135,8 +135,8 @@ pub(super) async fn store_swap_event( event: T::Event, db_id: Option<&str>, ) -> MmResult<(), SwapStateMachineError> -where - T::Event: DeserializeOwned + Serialize + Send + 'static, + where + T::Event: DeserializeOwned + Serialize + Send + 'static, { let id_str = id.to_string(); let db_id = db_id.map(|e| e.to_string()); @@ -154,7 +154,7 @@ where update_swap_events(&conn, &id_str, &serialized_events, db_id.as_deref())?; Ok(()) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -218,7 +218,7 @@ pub(super) async fn get_unfinished_swaps_uuids( select_unfinished_swaps_uuids(&conn, swap_type, db_id.as_deref()) .map_to_mm(|e| SwapStateMachineError::StorageError(e.to_string())) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -251,7 +251,7 @@ pub(super) async fn mark_swap_as_finished( let conn = conn.lock().unwrap(); Ok(set_swap_is_finished(&conn, &id.to_string(), db_id.as_deref())?) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -302,10 +302,10 @@ pub(super) fn clean_up_context_impl(ctx: &MmArc, uuid: &Uuid, maker_coin: &str, } } -pub(super) async fn acquire_reentrancy_lock_impl(ctx: &MmArc, uuid: Uuid) -> MmResult { +pub(super) async fn acquire_reentrancy_lock_impl(ctx: &MmArc, uuid: Uuid, db_id: Option<&str>) -> MmResult { let mut attempts = 0; loop { - match SwapLock::lock(ctx, uuid, 40.).await? { + match SwapLock::lock(ctx, uuid, 40., db_id).await? { Some(l) => break Ok(l), None => { if attempts >= 1 { @@ -315,7 +315,7 @@ pub(super) async fn acquire_reentrancy_lock_impl(ctx: &MmArc, uuid: Uuid) -> MmR attempts += 1; Timer::sleep(40.).await; } - }, + } } } } @@ -342,7 +342,7 @@ pub(super) trait GetSwapCoins { /// Generic function for upgraded swaps kickstart handling. /// It is implemented only for UtxoStandardCoin/UtxoStandardCoin case temporary. pub(super) async fn swap_kickstart_handler< - T: StorableStateMachine>, + T: StorableStateMachine>, >( ctx: MmArc, swap_repr: ::DbRepr, @@ -365,11 +365,11 @@ pub(super) async fn swap_kickstart_handler< uuid, taker_coin_ticker, ); Timer::sleep(1.).await; - }, + } Err(e) => { error!("Error {} on {} find attempt", e, taker_coin_ticker); return; - }, + } }; }; @@ -384,11 +384,11 @@ pub(super) async fn swap_kickstart_handler< uuid, maker_coin_ticker, ); Timer::sleep(1.).await; - }, + } Err(e) => { error!("Error {} on {} find attempt", e, maker_coin_ticker); return; - }, + } }; }; @@ -400,7 +400,7 @@ pub(super) async fn swap_kickstart_handler< maker_coin_ticker, taker_coin_ticker ); return; - }, + } }; let recreate_context = SwapRecreateCtx { maker_coin, taker_coin }; @@ -410,7 +410,7 @@ pub(super) async fn swap_kickstart_handler< Err(e) => { error!("Error {} on trying to recreate the swap {}", e, uuid); return; - }, + } }; if let Err(e) = state_machine.kickstart(state).await { diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 0bcd20d689..d0e98b0377 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -4,12 +4,7 @@ use super::pubkey_banning::ban_pubkey_on_failed_swap; use super::swap_lock::{SwapLock, SwapLockOps}; use super::swap_watcher::{watcher_topic, SwapWatcherMsg}; use super::trade_preimage::{TradePreimageRequest, TradePreimageRpcError, TradePreimageRpcResult}; -use super::{broadcast_my_swap_status, broadcast_swap_message, broadcast_swap_msg_every, - check_other_coin_balance_for_swap, dex_fee_amount_from_taker_coin, dex_fee_rate, get_locked_amount, - recv_swap_msg, swap_topic, wait_for_maker_payment_conf_until, AtomicSwap, LockedAmount, MySwapInfo, - NegotiationDataMsg, NegotiationDataV2, NegotiationDataV3, RecoveredSwap, RecoveredSwapAction, SavedSwap, - SavedSwapIo, SavedTradeFee, SwapConfirmationsSettings, SwapError, SwapMsg, SwapPubkeys, SwapTxDataMsg, - SwapsContext, TransactionIdentifier, WAIT_CONFIRM_INTERVAL_SEC}; +use super::{broadcast_my_swap_status, broadcast_swap_message, broadcast_swap_msg_every, check_other_coin_balance_for_swap, dex_fee_amount_from_taker_coin, dex_fee_rate, get_locked_amount, recv_swap_msg, swap_topic, wait_for_maker_payment_conf_until, AtomicSwap, LockedAmount, MySwapInfo, NegotiationDataMsg, NegotiationDataV2, NegotiationDataV3, RecoveredSwap, RecoveredSwapAction, SavedSwap, SavedSwapIo, SavedTradeFee, SwapConfirmationsSettings, SwapError, SwapMsg, SwapPubkeys, SwapTxDataMsg, SwapsContext, TransactionIdentifier, WAIT_CONFIRM_INTERVAL_SEC}; use crate::mm2::lp_network::subscribe_to_topic; use crate::mm2::lp_ordermatch::TakerOrderBuilder; use crate::mm2::lp_swap::swap_v2_common::mark_swap_as_finished; @@ -186,7 +181,7 @@ impl TakerSavedEvent { TakerSwapEvent::MakerPaymentSpendFailed(_) => Some(TakerSwapCommand::PrepareForTakerPaymentRefund), TakerSwapEvent::TakerPaymentWaitRefundStarted { .. } => { Some(TakerSwapCommand::PrepareForTakerPaymentRefund) - }, + } TakerSwapEvent::TakerPaymentRefundStarted => Some(TakerSwapCommand::RefundTakerPayment), TakerSwapEvent::TakerPaymentRefunded(_) => Some(TakerSwapCommand::FinalizeTakerPaymentRefund), TakerSwapEvent::TakerPaymentRefundFailed(_) => Some(TakerSwapCommand::Finish), @@ -274,7 +269,7 @@ impl TakerSavedSwap { | TakerSwapEvent::MakerPaymentSpentByWatcher(_) | TakerSwapEvent::MakerPaymentWaitConfirmFailed(_) => { return false; - }, + } _ => (), } } @@ -344,7 +339,7 @@ impl TakerSavedSwap { return ERR!("maker's pubkey is empty"); }; key.to_string() - }, + } _ => return ERR!("Swap must be negotiated to get maker's pubkey"), }, None => return ERR!("Can't get maker's pubkey while there's no Negotiated event"), @@ -380,6 +375,13 @@ impl RunTakerSwapInput { RunTakerSwapInput::KickStart { swap_uuid, .. } => swap_uuid, } } + + fn db_id(&self) -> Option { + match self { + RunTakerSwapInput::StartNew(swap) => swap.db_id(), + RunTakerSwapInput::KickStart { taker_coin, .. } => taker_coin.account_db_id(), + } + } } /// Starts the taker swap and drives it to completion (until None next command received). @@ -388,9 +390,10 @@ impl RunTakerSwapInput { /// Every produced event is saved to local DB. Swap status is broadcast to P2P network after completion. pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { let uuid = swap.uuid().to_owned(); + let db_id = swap.db_id().to_owned(); let mut attempts = 0; let swap_lock = loop { - match SwapLock::lock(&ctx, uuid, 40.).await { + match SwapLock::lock(&ctx, uuid, 40., db_id.as_deref()).await { Ok(Some(l)) => break l, Ok(None) => { if attempts >= 1 { @@ -403,11 +406,11 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { attempts += 1; Timer::sleep(40.).await; } - }, + } Err(e) => { error!("Swap {} file lock error: {}", uuid, e); return; - }, + } } }; @@ -422,16 +425,16 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { Some(c) => { info!("Swap {} kick started.", uuid); (swap, c) - }, + } None => { warn!("Swap {} has been finished already, aborting.", uuid); return; - }, + } }, Err(e) => { error!("Error loading swap {}: {}", uuid, e); return; - }, + } }, }; @@ -445,7 +448,7 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { Timer::sleep(30.).await; } } - .fuse(), + .fuse(), ); let ctx = swap.ctx.clone(); @@ -494,7 +497,7 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { match res.0 { Some(c) => { command = c; - }, + } None => { if let Err(e) = mark_swap_as_finished(ctx.clone(), running_swap.uuid, running_swap.db_id().as_deref()).await @@ -510,16 +513,17 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { } } break; - }, + } } } } - .fuse(), + .fuse(), ); select! { _swap = swap_fut => (), // swap finished normally _touch = touch_loop => unreachable!("Touch loop can not stop!"), - }; + } + ; } #[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)] @@ -706,14 +710,14 @@ impl TakerSwapEvent { TakerSwapEvent::MakerPaymentValidateFailed(_) => "Maker payment validate failed...".to_owned(), TakerSwapEvent::MakerPaymentWaitConfirmFailed(_) => { "Maker payment wait for confirmation failed...".to_owned() - }, + } TakerSwapEvent::TakerPaymentSent(_) => "Taker payment sent...".to_owned(), TakerSwapEvent::WatcherMessageSent(_, _) => WATCHER_MESSAGE_SENT_LOG.to_owned(), TakerSwapEvent::TakerPaymentTransactionFailed(_) => "Taker payment transaction failed...".to_owned(), TakerSwapEvent::TakerPaymentDataSendFailed(_) => "Taker payment data send failed...".to_owned(), TakerSwapEvent::TakerPaymentWaitConfirmFailed(_) => { "Taker payment wait for confirmation failed...".to_owned() - }, + } TakerSwapEvent::TakerPaymentSpent(_) => "Taker payment spent...".to_owned(), TakerSwapEvent::TakerPaymentWaitForSpendFailed(_) => "Taker payment wait for spend failed...".to_owned(), TakerSwapEvent::MakerPaymentSpent(_) => "Maker payment spent...".to_owned(), @@ -721,7 +725,7 @@ impl TakerSwapEvent { TakerSwapEvent::MakerPaymentSpendFailed(_) => "Maker payment spend failed...".to_owned(), TakerSwapEvent::TakerPaymentWaitRefundStarted { wait_until } => { format!("Taker payment wait refund till {} started...", wait_until) - }, + } TakerSwapEvent::TakerPaymentRefundStarted => "Taker payment refund started...".to_owned(), TakerSwapEvent::TakerPaymentRefunded(_) => "Taker payment refunded...".to_owned(), TakerSwapEvent::TakerPaymentRefundFailed(_) => "Taker payment refund failed...".to_owned(), @@ -815,7 +819,7 @@ impl TakerSwap { fmt = "Taker swap {} has successfully started", self.uuid ); - }, + } TakerSwapEvent::StartFailed(err) => self.errors.lock().push(err), TakerSwapEvent::Negotiated(data) => { self.maker_payment_lock @@ -831,32 +835,32 @@ impl TakerSwap { if data.taker_coin_swap_contract_addr.is_some() { self.w().data.taker_coin_swap_contract_address = data.taker_coin_swap_contract_addr; } - }, + } TakerSwapEvent::NegotiateFailed(err) => self.errors.lock().push(err), TakerSwapEvent::TakerFeeSent(tx) => self.w().taker_fee = Some(tx), TakerSwapEvent::TakerFeeSendFailed(err) => self.errors.lock().push(err), TakerSwapEvent::TakerPaymentInstructionsReceived(instructions) => { self.w().payment_instructions = instructions - }, + } TakerSwapEvent::MakerPaymentReceived(tx) => self.w().maker_payment = Some(tx), TakerSwapEvent::MakerPaymentWaitConfirmStarted => (), TakerSwapEvent::MakerPaymentValidatedAndConfirmed => { self.maker_payment_confirmed.store(true, Ordering::Relaxed) - }, + } TakerSwapEvent::MakerPaymentValidateFailed(err) => self.errors.lock().push(err), TakerSwapEvent::MakerPaymentWaitConfirmFailed(err) => self.errors.lock().push(err), TakerSwapEvent::TakerPaymentSent(tx) => self.w().taker_payment = Some(tx), TakerSwapEvent::WatcherMessageSent(maker_payment_spend_preimage, taker_payment_refund_preimage) => { self.w().maker_payment_spend_preimage = maker_payment_spend_preimage; self.w().taker_payment_refund_preimage = taker_payment_refund_preimage; - }, + } TakerSwapEvent::TakerPaymentTransactionFailed(err) => self.errors.lock().push(err), TakerSwapEvent::TakerPaymentDataSendFailed(err) => self.errors.lock().push(err), TakerSwapEvent::TakerPaymentWaitConfirmFailed(err) => self.errors.lock().push(err), TakerSwapEvent::TakerPaymentSpent(data) => { self.w().taker_payment_spend = Some(data.transaction); self.w().secret = data.secret; - }, + } TakerSwapEvent::TakerPaymentWaitForSpendFailed(err) => self.errors.lock().push(err), TakerSwapEvent::MakerPaymentSpent(tx) => self.w().maker_payment_spend = Some(tx), TakerSwapEvent::MakerPaymentSpentByWatcher(tx) => self.w().maker_payment_spend = Some(tx), @@ -1028,8 +1032,8 @@ impl TakerSwap { Err(e) => { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::StartFailed( ERRL!("!taker_coin.get_fee_to_send_taker_fee {}", e).into(), - )])) - }, + )])); + } }; let get_sender_trade_fee_fut = self.taker_coin.get_sender_trade_fee(preimage_value, stage); let taker_payment_trade_fee = match get_sender_trade_fee_fut.await { @@ -1037,8 +1041,8 @@ impl TakerSwap { Err(e) => { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::StartFailed( ERRL!("!taker_coin.get_sender_trade_fee {}", e).into(), - )])) - }, + )])); + } }; let maker_payment_spend_trade_fee_fut = self.maker_coin.get_receiver_trade_fee(stage); let maker_payment_spend_trade_fee = match maker_payment_spend_trade_fee_fut.compat().await { @@ -1046,8 +1050,8 @@ impl TakerSwap { Err(e) => { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::StartFailed( ERRL!("!maker_coin.get_receiver_trade_fee {}", e).into(), - )])) - }, + )])); + } }; let params = TakerSwapPreparedParams { @@ -1078,8 +1082,8 @@ impl TakerSwap { Err(e) => { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::StartFailed( ERRL!("!maker_coin.current_block {}", e).into(), - )])) - }, + )])); + } }; let taker_coin_start_block = match self.taker_coin.current_block().compat().await { @@ -1087,8 +1091,8 @@ impl TakerSwap { Err(e) => { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::StartFailed( ERRL!("!taker_coin.current_block {}", e).into(), - )])) - }, + )])); + } }; let maker_coin_swap_contract_address = self.maker_coin.swap_contract_address(); @@ -1147,8 +1151,8 @@ impl TakerSwap { Err(e) => { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::NegotiateFailed( ERRL!("{:?}", e).into(), - )])) - }, + )])); + } }; debug!("Received maker negotiation data {:?}", maker_data); @@ -1169,7 +1173,7 @@ impl TakerSwap { maker_data.payment_locktime(), expected_lock_time ) - .into(), + .into(), )])); } @@ -1184,8 +1188,8 @@ impl TakerSwap { None => { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::NegotiateFailed( ERRL!("!maker_coin.negotiate_swap_contract_addr {}", e).into(), - )])) - }, + )])); + } }, }; @@ -1200,8 +1204,8 @@ impl TakerSwap { None => { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::NegotiateFailed( ERRL!("!taker_coin.negotiate_swap_contract_addr {}", e).into(), - )])) - }, + )])); + } }, }; @@ -1258,8 +1262,8 @@ impl TakerSwap { Err(e) => { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::NegotiateFailed( ERRL!("{:?}", e).into(), - )])) - }, + )])); + } }; drop(send_abort_handle); @@ -1306,7 +1310,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::TakerFeeSendFailed(ERRL!("{}", err.get_plain_text_format()).into()), ])); - }, + } }; let tx_hash = transaction.tx_hash(); @@ -1329,8 +1333,8 @@ impl TakerSwap { Err(e) => { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::MakerPaymentValidateFailed(e.to_string().into()), - ])) - }, + ])); + } }; let msg = SwapMsg::TakerFee(payment_data_msg); @@ -1355,8 +1359,8 @@ impl TakerSwap { TakerSwapEvent::MakerPaymentValidateFailed( ERRL!("Error waiting for 'maker-payment' data: {}", e).into(), ), - ])) - }, + ])); + } }; drop(abort_send_handle); @@ -1374,10 +1378,10 @@ impl TakerSwap { Err(e) => { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::MakerPaymentValidateFailed(e.to_string().into()), - ])) - }, + ])); + } } - }, + } None => None, }; swap_events.push(TakerSwapEvent::TakerPaymentInstructionsReceived(instructions)); @@ -1389,8 +1393,8 @@ impl TakerSwap { TakerSwapEvent::MakerPaymentValidateFailed( ERRL!("Error parsing the 'maker-payment': {:?}", e).into(), ), - ])) - }, + ])); + } }; let tx_hash = maker_payment.tx_hash(); @@ -1439,8 +1443,8 @@ impl TakerSwap { Err(err) => { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::TakerPaymentTransactionFailed(err.into_inner().to_string().into()), - ])) - }, + ])); + } } } else { None @@ -1546,8 +1550,8 @@ impl TakerSwap { TakerSwapEvent::TakerPaymentTransactionFailed( ERRL!("Watcher reward error: {}", err.to_string()).into(), ), - ])) - }, + ])); + } } } else { None @@ -1582,15 +1586,15 @@ impl TakerSwap { ERRL!("{}", err.get_plain_text_format()).into(), ), ])); - }, + } } - }, + } }, Err(e) => { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::TakerPaymentTransactionFailed(ERRL!("{}", e).into()), - ])) - }, + ])); + } }; let tx_hash = transaction.tx_hash(); @@ -1653,7 +1657,7 @@ impl TakerSwap { Some(taker_payment_refund.tx_hex()), )); info!("{}", WATCHER_MESSAGE_SENT_LOG); - }, + } Err(e) => error!( "The watcher message could not be sent, error creating at least one of the preimages: {}", e.get_plain_text_format() @@ -1758,7 +1762,7 @@ impl TakerSwap { wait_until: self.wait_refund_until(), }, ])); - }, + } }; drop(send_abort_handle); drop(watcher_broadcast_abort_handle); @@ -1780,8 +1784,8 @@ impl TakerSwap { Err(e) => { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::TakerPaymentWaitForSpendFailed(ERRL!("{}", e).into()), - ])) - }, + ])); + } }; Ok((Some(TakerSwapCommand::SpendMakerPayment), vec![ @@ -1833,7 +1837,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::MakerPaymentSpendFailed(ERRL!("{}", err.get_plain_text_format()).into()), ])); - }, + } }; broadcast_p2p_tx_msg( @@ -1900,7 +1904,7 @@ impl TakerSwap { Err(e) => { error!("Error {} on can_refund_htlc, retrying in 30 seconds", e); Timer::sleep(30.).await; - }, + } } } @@ -1938,7 +1942,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::TakerPaymentRefundFailed(ERRL!("{:?}", err.get_plain_text_format()).into()), ])); - }, + } }; broadcast_p2p_tx_msg( @@ -2030,7 +2034,7 @@ impl TakerSwap { }; #[cfg(any(test, feature = "run-docker-tests"))] - let fail_at = std::env::var("TAKER_FAIL_AT").map(FailAt::from).ok(); + let fail_at = std::env::var("TAKER_FAIL_AT").map(FailAt::from).ok(); let swap = TakerSwap::new( ctx.clone(), @@ -2046,7 +2050,7 @@ impl TakerSwap { data.lock_duration, data.p2p_privkey.map(SerializableSecp256k1Keypair::into_inner), #[cfg(any(test, feature = "run-docker-tests"))] - fail_at, + fail_at, ); for saved_event in &saved.events { @@ -2121,15 +2125,15 @@ impl TakerSwap { "Maker payment was already spent by {} tx {:02x}", self.maker_coin.ticker(), tx.tx_hash() - ) - }, + ); + } Ok(Some(FoundSwapTxSpend::Refunded(tx))) => { return ERR!( "Maker payment was already refunded by {} tx {:02x}", self.maker_coin.ticker(), tx.tx_hash() - ) - }, + ); + } Err(e) => return ERR!("Error {} when trying to find maker payment spend", e), Ok(None) => (), // payment is not spent, continue } @@ -2161,7 +2165,7 @@ impl TakerSwap { Some(tx) => tx.tx_hex(), None => return ERR!("Taker payment is not found, swap is not recoverable"), } - }, + } }; if self.r().taker_payment_spend.is_some() { @@ -2200,7 +2204,7 @@ impl TakerSwap { } return ERR!("{}", err.get_plain_text_format()); - }, + } }; return Ok(RecoveredSwap { @@ -2262,7 +2266,7 @@ impl TakerSwap { } return ERR!("{}", err.get_plain_text_format()); - }, + } }; Ok(RecoveredSwap { @@ -2270,7 +2274,7 @@ impl TakerSwap { coin: self.maker_coin.ticker().to_string(), transaction, }) - }, + } FoundSwapTxSpend::Refunded(tx) => ERR!( "Taker payment has been refunded already by transaction {:02x}", tx.tx_hash() @@ -2311,7 +2315,7 @@ impl TakerSwap { } return ERR!("{:?}", err.get_plain_text_format()); - }, + } }; Ok(RecoveredSwap { @@ -2319,7 +2323,7 @@ impl TakerSwap { coin: self.taker_coin.ticker().to_string(), transaction, }) - }, + } } } } @@ -2419,7 +2423,7 @@ pub async fn check_balance_for_taker_swap( taker_payment_trade_fee, maker_payment_spend_trade_fee, } - }, + } }; let taker_fee = TakerFeeAdditionalInfo { @@ -2435,7 +2439,7 @@ pub async fn check_balance_for_taker_swap( params.taker_payment_trade_fee, Some(taker_fee), ) - .await?; + .await?; if !params.maker_payment_spend_trade_fee.paid_from_trading_vol { check_other_coin_balance_for_swap(ctx, other_coin, swap_uuid, params.maker_payment_spend_trade_fee).await?; } @@ -2524,7 +2528,7 @@ pub async fn taker_swap_trade_preimage( Some(prepared_params), stage, ) - .await?; + .await?; let conf_settings = OrderConfirmationsSettings { base_confs: base_coin.required_confirmations(), @@ -2576,10 +2580,10 @@ pub async fn max_taker_vol(ctx: MmArc, req: Json) -> Result>, S Err(e) if e.get_inner().not_sufficient_balance() => { warn!("{}", e); MmNumber::from(0) - }, + } Err(err) => { return ERR!("{}", err); - }, + } }; let res = try_s!(json::to_vec(&json!({ @@ -2750,7 +2754,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); let actual = block_on(taker_swap.recover_funds()).unwrap(); let expected = RecoveredSwap { action: RecoveredSwapAction::SpentOtherPayment, @@ -2798,7 +2802,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); let actual = block_on(taker_swap.recover_funds()).unwrap(); let expected = RecoveredSwap { action: RecoveredSwapAction::RefundedMyPayment, @@ -2851,7 +2855,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); let actual = block_on(taker_swap.recover_funds()).unwrap(); let expected = RecoveredSwap { action: RecoveredSwapAction::SpentOtherPayment, @@ -2895,7 +2899,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); let actual = block_on(taker_swap.recover_funds()).unwrap(); let expected = RecoveredSwap { action: RecoveredSwapAction::RefundedMyPayment, @@ -2932,7 +2936,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); let error = block_on(taker_swap.recover_funds()).unwrap_err(); assert!(error.contains("Too early to refund")); assert!(unsafe { SEARCH_TX_SPEND_CALLED }); @@ -2972,7 +2976,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); let actual = block_on(taker_swap.recover_funds()).unwrap(); let expected = RecoveredSwap { action: RecoveredSwapAction::SpentOtherPayment, @@ -3002,7 +3006,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); assert!(block_on(taker_swap.recover_funds()).is_err()); } @@ -3043,7 +3047,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); assert_eq!(unsafe { SWAP_CONTRACT_ADDRESS_CALLED }, 2); assert_eq!( @@ -3078,7 +3082,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); assert_eq!(unsafe { SWAP_CONTRACT_ADDRESS_CALLED }, 1); let expected_addr = addr_from_str(ETH_SEPOLIA_SWAP_CONTRACT).unwrap(); @@ -3247,7 +3251,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); let swaps_ctx = SwapsContext::from_ctx(&ctx).unwrap(); let arc = Arc::new(swap); let weak_ref = Arc::downgrade(&arc); diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index 8860cdf845..af98ccecc6 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -44,7 +44,8 @@ cfg_wasm32!( ); // This is needed to have Debug on messages -#[allow(unused_imports)] use prost::Message; +#[allow(unused_imports)] +use prost::Message; /// Negotiation data representation to be stored in DB. #[derive(Clone, Debug, Deserialize, Serialize)] @@ -212,7 +213,7 @@ impl StateMachineStorage for TakerSwapStorage { insert_new_swap_v2(&ctx, sql_params, db_id.as_deref())?; Ok(()) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -257,7 +258,7 @@ impl StateMachineStorage for TakerSwapStorage { TakerSwapDbRepr::from_sql_row, )?) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -431,7 +432,7 @@ pub struct TakerSwapStateMachine - TakerSwapStateMachine +TakerSwapStateMachine { fn maker_payment_conf_timeout(&self) -> u64 { self.started_at + self.lock_duration / 3 } @@ -452,7 +453,7 @@ impl StorableStateMachine - for TakerSwapStateMachine +for TakerSwapStateMachine { type Storage = TakerSwapStorage; type Result = (); @@ -492,12 +493,12 @@ impl Result<(RestoredMachine, Box>), Self::RecreateError> { + ) -> Result<(RestoredMachine, Box>), Self::RecreateError> { if repr.events.is_empty() { return MmError::err(SwapRecreateError::ReprEventsEmpty); } - let current_state: Box> = match repr.events.remove(repr.events.len() - 1) + let current_state: Box> = match repr.events.remove(repr.events.len() - 1) { TakerSwapEvent::Initialized { maker_coin_start_block, @@ -733,10 +734,10 @@ impl return MmError::err(SwapRecreateError::SwapCompleted), TakerSwapEvent::TakerFundingRefunded { .. } => { return MmError::err(SwapRecreateError::SwapFinishedWithRefund); - }, + } TakerSwapEvent::TakerPaymentRefunded { .. } => { return MmError::err(SwapRecreateError::SwapFinishedWithRefund); - }, + } }; let dex_fee = if repr.dex_fee_burn > MmNumber::default() { @@ -774,7 +775,7 @@ impl Result { - acquire_reentrancy_lock_impl(&self.ctx, self.uuid).await + acquire_reentrancy_lock_impl(&self.ctx, self.uuid, self.taker_coin.account_db_id().as_deref()).await } fn spawn_reentrancy_lock_renew(&mut self, guard: Self::ReentrancyLock) { @@ -824,14 +825,14 @@ impl { let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); let ticker = self.taker_coin.ticker(); if let Some(taker_coin_locked) = swaps_ctx.locked_amounts.lock().unwrap().get_mut(ticker) { taker_coin_locked.retain(|locked| locked.swap_uuid != self.uuid); }; - }, + } TakerSwapEvent::Negotiated { .. } | TakerSwapEvent::TakerFundingRefundRequired { .. } | TakerSwapEvent::MakerPaymentAndFundingSpendPreimgReceived { .. } @@ -871,7 +872,7 @@ impl Default for Initialize { } impl InitialState - for Initialize +for Initialize { type StateMachine = TakerSwapStateMachine; } #[async_trait] impl State - for Initialize +for Initialize { type StateMachine = TakerSwapStateMachine; @@ -921,7 +922,7 @@ impl { let reason = AbortReason::FailedToGetMakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let taker_coin_start_block = match state_machine.taker_coin.current_block().compat().await { @@ -929,7 +930,7 @@ impl { let reason = AbortReason::FailedToGetTakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let total_payment_value = @@ -946,7 +947,7 @@ impl { let reason = AbortReason::FailedToGetTakerPaymentFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let maker_payment_spend_fee = match state_machine.maker_coin.get_receiver_trade_fee(stage).compat().await { @@ -954,7 +955,7 @@ impl { let reason = AbortReason::FailedToGetMakerPaymentSpendFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let prepared_params = TakerSwapPreparedParams { @@ -977,7 +978,7 @@ impl { impl TransitionFrom> for Initialized {} impl StorableState - for Initialized +for Initialized { type StateMachine = TakerSwapStateMachine; @@ -1024,7 +1025,7 @@ impl State - for Initialized +for Initialized { type StateMachine = TakerSwapStateMachine; @@ -1041,7 +1042,7 @@ impl { let reason = AbortReason::DidNotReceiveMakerNegotiation(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; debug!("Received maker negotiation message {:?}", maker_negotiation); @@ -1071,7 +1072,7 @@ impl { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let taker_coin_htlc_pub_from_maker = match state_machine @@ -1082,7 +1083,7 @@ impl { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let taker_coin_maker_address = match state_machine @@ -1093,7 +1094,7 @@ impl { let reason = AbortReason::FailedToParseAddress(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let unique_data = state_machine.unique_data(); @@ -1134,7 +1135,7 @@ impl { let reason = AbortReason::DidNotReceiveMakerNegotiated(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; drop(abort_handle); @@ -1218,13 +1219,12 @@ struct Negotiated TransitionFrom> - for Negotiated -{ -} +for Negotiated +{} #[async_trait] impl State - for Negotiated +for Negotiated { type StateMachine = TakerSwapStateMachine; @@ -1244,7 +1244,7 @@ impl { let reason = AbortReason::FailedToSendTakerFunding(format!("{:?}", e)); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; info!( @@ -1265,7 +1265,7 @@ impl StorableState - for Negotiated +for Negotiated { type StateMachine = TakerSwapStateMachine; @@ -1289,7 +1289,7 @@ struct TakerFundingSent State - for TakerFundingSent +for TakerFundingSent { type StateMachine = TakerSwapStateMachine; @@ -1329,7 +1329,7 @@ impl TransitionFrom> - for TakerFundingSent -{ -} +for TakerFundingSent +{} impl StorableState - for TakerFundingSent +for TakerFundingSent { type StateMachine = TakerSwapStateMachine; @@ -1431,13 +1430,12 @@ struct MakerPaymentAndFundingSpendPreimgReceived - TransitionFrom> - for MakerPaymentAndFundingSpendPreimgReceived -{ -} +TransitionFrom> +for MakerPaymentAndFundingSpendPreimgReceived +{} impl StorableState - for MakerPaymentAndFundingSpendPreimgReceived +for MakerPaymentAndFundingSpendPreimgReceived { type StateMachine = TakerSwapStateMachine; @@ -1464,7 +1462,7 @@ impl State - for MakerPaymentAndFundingSpendPreimgReceived +for MakerPaymentAndFundingSpendPreimgReceived { type StateMachine = TakerSwapStateMachine; @@ -1574,7 +1572,7 @@ impl - TransitionFrom> for TakerPaymentSent -{ -} +TransitionFrom> for TakerPaymentSent +{} impl - TransitionFrom> - for TakerPaymentSent -{ -} +TransitionFrom> +for TakerPaymentSent +{} #[async_trait] impl State - for TakerPaymentSent +for TakerPaymentSent { type StateMachine = TakerSwapStateMachine; @@ -1669,7 +1665,7 @@ impl StorableState - for TakerPaymentSent +for TakerPaymentSent { type StateMachine = TakerSwapStateMachine; @@ -1771,24 +1767,21 @@ struct TakerFundingRefundRequired - TransitionFrom> for TakerFundingRefundRequired -{ -} +TransitionFrom> for TakerFundingRefundRequired +{} impl - TransitionFrom> - for TakerFundingRefundRequired -{ -} +TransitionFrom> +for TakerFundingRefundRequired +{} impl - TransitionFrom> for TakerFundingRefundRequired -{ -} +TransitionFrom> for TakerFundingRefundRequired +{} #[async_trait] impl State - for TakerFundingRefundRequired +for TakerFundingRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1817,7 +1810,7 @@ impl { let reason = AbortReason::TakerFundingRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let next_state = TakerFundingRefunded { @@ -1831,7 +1824,7 @@ impl StorableState - for TakerFundingRefundRequired +for TakerFundingRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1863,18 +1856,16 @@ struct TakerPaymentRefundRequired - TransitionFrom> for TakerPaymentRefundRequired -{ -} +TransitionFrom> for TakerPaymentRefundRequired +{} impl - TransitionFrom> for TakerPaymentRefundRequired -{ -} +TransitionFrom> for TakerPaymentRefundRequired +{} #[async_trait] impl State - for TakerPaymentRefundRequired +for TakerPaymentRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1896,7 +1887,7 @@ impl { error!("Error {} on can_refund_htlc, retrying in 30 seconds", e); Timer::sleep(30.).await; - }, + } } } @@ -1921,7 +1912,7 @@ impl { let reason = AbortReason::TakerPaymentRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let next_state = TakerPaymentRefunded { @@ -1938,7 +1929,7 @@ impl StorableState - for TakerPaymentRefundRequired +for TakerPaymentRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1964,14 +1955,13 @@ struct MakerPaymentConfirmed - TransitionFrom> - for MakerPaymentConfirmed -{ -} +TransitionFrom> +for MakerPaymentConfirmed +{} #[async_trait] impl State - for MakerPaymentConfirmed +for MakerPaymentConfirmed { type StateMachine = TakerSwapStateMachine; @@ -2003,7 +1993,7 @@ impl StorableState - for MakerPaymentConfirmed +for MakerPaymentConfirmed { type StateMachine = TakerSwapStateMachine; @@ -2060,13 +2050,12 @@ struct TakerPaymentSpent - TransitionFrom> for TakerPaymentSpent -{ -} +TransitionFrom> for TakerPaymentSpent +{} #[async_trait] impl State - for TakerPaymentSpent +for TakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2086,7 +2075,7 @@ impl { let reason = AbortReason::CouldNotExtractSecret(e); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; let args = SpendMakerPaymentArgs { @@ -2103,7 +2092,7 @@ impl { let reason = AbortReason::FailedToSpendMakerPayment(format!("{:?}", e)); return Self::change_state(Aborted::new(reason), state_machine).await; - }, + } }; info!( "Spent maker payment {} tx {:02x} during swap {}", @@ -2124,7 +2113,7 @@ impl StorableState - for TakerPaymentSpent +for TakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2159,12 +2148,11 @@ struct MakerPaymentSpent - TransitionFrom> for MakerPaymentSpent -{ -} +TransitionFrom> for MakerPaymentSpent +{} impl StorableState - for MakerPaymentSpent +for MakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2194,7 +2182,7 @@ impl State - for MakerPaymentSpent +for MakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2244,7 +2232,7 @@ impl Aborted { #[async_trait] impl LastState - for Aborted +for Aborted { type StateMachine = TakerSwapStateMachine; @@ -2257,7 +2245,7 @@ impl StorableState - for Aborted +for Aborted { type StateMachine = TakerSwapStateMachine; @@ -2273,24 +2261,20 @@ impl TransitionFrom> for impl TransitionFrom> for Aborted {} impl TransitionFrom> - for Aborted -{ -} +for Aborted +{} impl - TransitionFrom> for Aborted -{ -} +TransitionFrom> for Aborted +{} impl - TransitionFrom> for Aborted -{ -} +TransitionFrom> for Aborted +{} impl - TransitionFrom> for Aborted -{ -} +TransitionFrom> for Aborted +{} struct Completed { maker_coin: PhantomData, @@ -2307,7 +2291,7 @@ impl Completed { } impl StorableState - for Completed +for Completed { type StateMachine = TakerSwapStateMachine; @@ -2316,7 +2300,7 @@ impl LastState - for Completed +for Completed { type StateMachine = TakerSwapStateMachine; @@ -2329,9 +2313,8 @@ impl - TransitionFrom> for Completed -{ -} +TransitionFrom> for Completed +{} struct TakerFundingRefunded { maker_coin: PhantomData, @@ -2341,7 +2324,7 @@ struct TakerFundingRefunded StorableState - for TakerFundingRefunded +for TakerFundingRefunded { type StateMachine = TakerSwapStateMachine; @@ -2362,7 +2345,7 @@ impl LastState - for TakerFundingRefunded +for TakerFundingRefunded { type StateMachine = TakerSwapStateMachine; @@ -2378,9 +2361,8 @@ impl - TransitionFrom> for TakerFundingRefunded -{ -} +TransitionFrom> for TakerFundingRefunded +{} struct TakerPaymentRefunded { maker_coin: PhantomData, @@ -2390,7 +2372,7 @@ struct TakerPaymentRefunded StorableState - for TakerPaymentRefunded +for TakerPaymentRefunded { type StateMachine = TakerSwapStateMachine; @@ -2408,7 +2390,7 @@ impl LastState - for TakerPaymentRefunded +for TakerPaymentRefunded { type StateMachine = TakerSwapStateMachine; @@ -2424,6 +2406,5 @@ impl - TransitionFrom> for TakerPaymentRefunded -{ -} +TransitionFrom> for TakerPaymentRefunded +{} From 92b94aceed68cd64234dad9703220eafc21aef2e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 3 May 2024 12:50:48 +0100 Subject: [PATCH 091/186] cargo fmt --- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 119 +++++---- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 187 +++++++------- mm2src/mm2_main/src/lp_swap/swap_lock.rs | 19 +- mm2src/mm2_main/src/lp_swap/swap_v2_common.rs | 34 +-- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 156 ++++++------ mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 241 ++++++++++-------- 6 files changed, 398 insertions(+), 358 deletions(-) diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index 19232bc111..0fc29b995d 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -294,7 +294,7 @@ impl MakerSwap { fmt = "Maker swap {} has successfully started", self.uuid ); - } + }, MakerSwapEvent::StartFailed(err) => self.errors.lock().push(err), MakerSwapEvent::Negotiated(data) => { self.taker_payment_lock @@ -308,11 +308,11 @@ impl MakerSwap { if data.taker_coin_swap_contract_addr.is_some() { self.w().data.taker_coin_swap_contract_address = data.taker_coin_swap_contract_addr; } - } + }, MakerSwapEvent::NegotiateFailed(err) => self.errors.lock().push(err), MakerSwapEvent::MakerPaymentInstructionsReceived(instructions) => { self.w().payment_instructions = instructions - } + }, MakerSwapEvent::TakerFeeValidated(tx) => self.w().taker_fee = Some(tx), MakerSwapEvent::TakerFeeValidateFailed(err) => self.errors.lock().push(err), MakerSwapEvent::MakerPaymentSent(tx) => self.w().maker_payment = Some(tx), @@ -323,7 +323,7 @@ impl MakerSwap { MakerSwapEvent::TakerPaymentWaitConfirmStarted => (), MakerSwapEvent::TakerPaymentValidatedAndConfirmed => { self.taker_payment_confirmed.store(true, Ordering::Relaxed) - } + }, MakerSwapEvent::TakerPaymentValidateFailed(err) => self.errors.lock().push(err), MakerSwapEvent::TakerPaymentWaitConfirmFailed(err) => self.errors.lock().push(err), MakerSwapEvent::TakerPaymentSpent(tx) => self.w().taker_payment_spend = Some(tx), @@ -489,7 +489,7 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::StartFailed( ERRL!("!maker_coin.get_sender_trade_fee {}", e).into(), )])); - } + }, }; let taker_payment_spend_trade_fee_fut = self.taker_coin.get_receiver_trade_fee(stage); let taker_payment_spend_trade_fee = match taker_payment_spend_trade_fee_fut.compat().await { @@ -498,7 +498,7 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::StartFailed( ERRL!("!taker_coin.get_receiver_trade_fee {}", e).into(), )])); - } + }, }; let params = MakerSwapPreparedParams { @@ -514,14 +514,14 @@ impl MakerSwap { Some(params), stage, ) - .await + .await { Ok(_) => (), Err(e) => { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::StartFailed( ERRL!("!check_balance_for_maker_swap {}", e).into(), )])); - } + }, }; let started_at = now_sec(); @@ -531,7 +531,7 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::StartFailed( ERRL!("!maker_coin.current_block {}", e).into(), )])); - } + }, }; let taker_coin_start_block = match self.taker_coin.current_block().compat().await { @@ -540,7 +540,7 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::StartFailed( ERRL!("!taker_coin.current_block {}", e).into(), )])); - } + }, }; let maker_coin_swap_contract_address = self.maker_coin.swap_contract_address(); @@ -613,7 +613,7 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::NegotiateFailed( ERRL!("{:?}", e).into(), )])); - } + }, }; drop(send_abort_handle); let time_dif = self.r().data.started_at.abs_diff(taker_data.started_at()); @@ -633,7 +633,7 @@ impl MakerSwap { taker_data.payment_locktime(), expected_lock_time ) - .into(), + .into(), )])); } @@ -647,7 +647,7 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::NegotiateFailed( ERRL!("!maker_coin.negotiate_swap_contract_addr {}", e).into(), )])); - } + }, }; let taker_coin_swap_contract_addr = match self @@ -660,7 +660,7 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![MakerSwapEvent::NegotiateFailed( ERRL!("!taker_coin.negotiate_swap_contract_addr {}", e).into(), )])); - } + }, }; // Validate maker_coin_htlc_pubkey realness @@ -716,7 +716,7 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![ MakerSwapEvent::TakerFeeValidateFailed(ERRL!("{}", e).into()), ])); - } + }, }; drop(send_abort_handle); @@ -738,9 +738,9 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![ MakerSwapEvent::TakerFeeValidateFailed(e.to_string().into()), ])); - } + }, } - } + }, None => None, }; swap_events.push(MakerSwapEvent::MakerPaymentInstructionsReceived(instructions)); @@ -751,7 +751,7 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![ MakerSwapEvent::TakerFeeValidateFailed(ERRL!("{:?}", e).into()), ])); - } + }, }; let hash = taker_fee.tx_hash(); @@ -787,7 +787,7 @@ impl MakerSwap { attempts += 1; Timer::sleep(10.).await; } - } + }, }; } @@ -838,7 +838,7 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![ MakerSwapEvent::MakerPaymentTransactionFailed(err.into_inner().to_string().into()), ])); - } + }, } } else { None @@ -869,15 +869,15 @@ impl MakerSwap { ERRL!("{}", err.get_plain_text_format()).into(), ), ])); - } + }, } - } + }, }, Err(e) => { return Ok((Some(MakerSwapCommand::Finish), vec![ MakerSwapEvent::MakerPaymentTransactionFailed(ERRL!("{}", e).into()), ])); - } + }, }; let tx_hash = transaction.tx_hash(); @@ -904,7 +904,7 @@ impl MakerSwap { wait_until: self.wait_refund_until(), }, ])); - } + }, }; let msg = SwapMsg::MakerPayment(payment_data_msg); let abort_send_handle = broadcast_swap_msg_every( @@ -955,7 +955,7 @@ impl MakerSwap { wait_until: self.wait_refund_until(), }, ])); - } + }, }; drop(abort_send_handle); @@ -970,7 +970,7 @@ impl MakerSwap { wait_until: self.wait_refund_until(), }, ])); - } + }, }; let tx_hash = taker_payment.tx_hash(); @@ -1030,7 +1030,7 @@ impl MakerSwap { return Ok((Some(MakerSwapCommand::Finish), vec![ MakerSwapEvent::MakerPaymentTransactionFailed(err.into_inner().to_string().into()), ])); - } + }, } } else { None @@ -1118,13 +1118,13 @@ impl MakerSwap { "!taker_coin.send_maker_spends_taker_payment: {}", err.get_plain_text_format() ) - .into(), + .into(), ), MakerSwapEvent::MakerPaymentWaitRefundStarted { wait_until: self.wait_refund_until(), }, ])); - } + }, }; broadcast_p2p_tx_msg( @@ -1221,7 +1221,7 @@ impl MakerSwap { Err(e) => { error!("Error {} on can_refund_htlc, retrying in 30 seconds", e); Timer::sleep(30.).await; - } + }, } } @@ -1261,10 +1261,10 @@ impl MakerSwap { "!maker_coin.send_maker_refunds_payment: {}", err.get_plain_text_format() ) - .into(), + .into(), ), ])); - } + }, }; broadcast_p2p_tx_msg( @@ -1418,14 +1418,14 @@ impl MakerSwap { selfi.taker_coin.ticker(), tx.tx_hash() ); - } + }, Ok(Some(FoundSwapTxSpend::Refunded(tx))) => { return ERR!( "Taker payment was already refunded by {} tx {:02x}", selfi.taker_coin.ticker(), tx.tx_hash() ); - } + }, Err(e) => return ERR!("Error {} when trying to find taker payment spend", e), Ok(None) => (), // payment is not spent, continue } @@ -1493,7 +1493,7 @@ impl MakerSwap { Some(tx) => tx.tx_hex(), None => return ERR!("Maker payment transaction was not found"), } - } + }, }; let search_input = SearchForSwapTxSpendInput { @@ -1517,7 +1517,7 @@ impl MakerSwap { coin: self.taker_coin.ticker().to_string(), transaction, }) - } + }, Ok(Some(FoundSwapTxSpend::Refunded(tx))) => ERR!( "Maker payment was already refunded by {} tx {:02x}", self.maker_coin.ticker(), @@ -1558,7 +1558,7 @@ impl MakerSwap { } return ERR!("{}", err.get_plain_text_format()); - } + }, }; Ok(RecoveredSwap { @@ -1566,7 +1566,7 @@ impl MakerSwap { coin: self.maker_coin.ticker().to_string(), transaction, }) - } + }, } } } @@ -1675,14 +1675,14 @@ impl MakerSwapEvent { MakerSwapEvent::MakerPaymentDataSendFailed(_) => "Maker payment failed...".to_owned(), MakerSwapEvent::MakerPaymentWaitConfirmFailed(_) => { "Maker payment wait for confirmation failed...".to_owned() - } + }, MakerSwapEvent::TakerPaymentReceived(_) => "Taker payment received...".to_owned(), MakerSwapEvent::TakerPaymentWaitConfirmStarted => "Taker payment wait confirm started...".to_owned(), MakerSwapEvent::TakerPaymentValidatedAndConfirmed => "Taker payment validated and confirmed...".to_owned(), MakerSwapEvent::TakerPaymentValidateFailed(_) => "Taker payment validate failed...".to_owned(), MakerSwapEvent::TakerPaymentWaitConfirmFailed(_) => { "Taker payment wait for confirmation failed...".to_owned() - } + }, MakerSwapEvent::TakerPaymentSpent(_) => "Taker payment spent...".to_owned(), MakerSwapEvent::TakerPaymentSpendFailed(_) => "Taker payment spend failed...".to_owned(), MakerSwapEvent::TakerPaymentSpendConfirmStarted => "Taker payment send wait confirm started...".to_owned(), @@ -1690,7 +1690,7 @@ impl MakerSwapEvent { MakerSwapEvent::TakerPaymentSpendConfirmFailed(_) => "Taker payment spend confirm failed...".to_owned(), MakerSwapEvent::MakerPaymentWaitRefundStarted { wait_until } => { format!("Maker payment wait refund till {} started...", wait_until) - } + }, MakerSwapEvent::MakerPaymentRefundStarted => "Maker payment refund started...".to_owned(), MakerSwapEvent::MakerPaymentRefunded(_) => "Maker payment refunded...".to_owned(), MakerSwapEvent::MakerPaymentRefundFailed(_) => "Maker payment refund failed...".to_owned(), @@ -1760,7 +1760,7 @@ impl MakerSavedEvent { MakerSwapEvent::TakerPaymentSpendConfirmFailed(_) => Some(MakerSwapCommand::PrepareForMakerPaymentRefund), MakerSwapEvent::MakerPaymentWaitRefundStarted { .. } => { Some(MakerSwapCommand::PrepareForMakerPaymentRefund) - } + }, MakerSwapEvent::MakerPaymentRefundStarted => Some(MakerSwapCommand::RefundMakerPayment), MakerSwapEvent::MakerPaymentRefunded(_) => Some(MakerSwapCommand::FinalizeMakerPaymentRefund), MakerSwapEvent::MakerPaymentRefundFailed(_) => Some(MakerSwapCommand::Finish), @@ -1929,7 +1929,7 @@ impl MakerSavedSwap { | MakerSwapEvent::TakerPaymentSpendConfirmed | MakerSwapEvent::MakerPaymentRefunded(_) => { return false; - } + }, _ => (), } } @@ -1995,7 +1995,7 @@ impl MakerSavedSwap { return ERR!("taker's pubkey is empty"); }; key.to_string() - } + }, _ => return ERR!("Swap must be negotiated to get taker's pubkey"), }, None => return ERR!("Can't get taker's pubkey while there's no Negotiated event"), @@ -2062,11 +2062,11 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { attempts += 1; Timer::sleep(40.).await; } - } + }, Err(e) => { error!("Swap {} file lock error: {}", uuid, e); return; - } + }, }; }; @@ -2081,16 +2081,16 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { Some(c) => { info!("Swap {} kick started.", uuid); (swap, c) - } + }, None => { warn!("Swap {} has been finished already, aborting.", uuid); return; - } + }, }, Err(e) => { error!("Error loading swap {}: {}", uuid, e); return; - } + }, }, }; @@ -2104,7 +2104,7 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { Timer::sleep(30.).await; } } - .fuse(), + .fuse(), ); let ctx = swap.ctx.clone(); @@ -2165,7 +2165,7 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { match res.0 { Some(c) => { command = c; - } + }, None => { if let Err(e) = mark_swap_as_finished(ctx.clone(), running_swap.uuid, running_swap.db_id().as_deref()).await @@ -2180,17 +2180,16 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { } } break; - } + }, } } } - .fuse(), + .fuse(), ); select! { _swap = swap_fut => (), // swap finished normally _touch = touch_loop => unreachable!("Touch loop can not stop!"), - } - ; + }; } pub struct MakerSwapPreparedParams { @@ -2209,9 +2208,9 @@ pub async fn check_balance_for_maker_swap( ) -> CheckBalanceResult { let (maker_payment_trade_fee, taker_payment_spend_trade_fee) = match prepared_params { Some(MakerSwapPreparedParams { - maker_payment_trade_fee, - taker_payment_spend_trade_fee, - }) => (maker_payment_trade_fee, taker_payment_spend_trade_fee), + maker_payment_trade_fee, + taker_payment_spend_trade_fee, + }) => (maker_payment_trade_fee, taker_payment_spend_trade_fee), None => { let preimage_value = TradePreimageValue::Exact(volume.to_decimal()); let maker_payment_trade_fee = my_coin @@ -2224,7 +2223,7 @@ pub async fn check_balance_for_maker_swap( .await .mm_err(|e| CheckBalanceError::from_trade_preimage_error(e, other_coin.ticker()))?; (maker_payment_trade_fee, taker_payment_spend_trade_fee) - } + }, }; let balance = @@ -2296,7 +2295,7 @@ pub async fn maker_swap_trade_preimage( Some(prepared_params), FeeApproxStage::TradePreimage, ) - .await?; + .await?; } let conf_settings = OrderConfirmationsSettings { diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index c9b75eb915..7155f6a8bf 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -43,8 +43,7 @@ cfg_wasm32!( ); // This is needed to have Debug on messages -#[allow(unused_imports)] -use prost::Message; +#[allow(unused_imports)] use prost::Message; /// Negotiation data representation to be stored in DB. #[derive(Clone, Debug, Deserialize, Serialize)] @@ -181,7 +180,7 @@ impl StateMachineStorage for MakerSwapStorage { insert_new_swap_v2(&ctx, sql_params, db_id.as_deref())?; Ok(()) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -226,7 +225,7 @@ impl StateMachineStorage for MakerSwapStorage { MakerSwapDbRepr::from_sql_row, )?) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -398,7 +397,7 @@ pub struct MakerSwapStateMachine -MakerSwapStateMachine + MakerSwapStateMachine { /// Timeout for taker payment's on-chain confirmation. #[inline] @@ -423,7 +422,7 @@ MakerSwapStateMachine #[async_trait] impl StorableStateMachine -for MakerSwapStateMachine + for MakerSwapStateMachine { type Storage = MakerSwapStorage; type Result = (); @@ -463,12 +462,12 @@ for MakerSwapStateMachine storage: MakerSwapStorage, mut repr: MakerSwapDbRepr, recreate_ctx: Self::RecreateCtx, - ) -> Result<(RestoredMachine, Box>), Self::RecreateError> { + ) -> Result<(RestoredMachine, Box>), Self::RecreateError> { if repr.events.is_empty() { return MmError::err(SwapRecreateError::ReprEventsEmpty); } - let current_state: Box> = match repr.events.remove(repr.events.len() - 1) + let current_state: Box> = match repr.events.remove(repr.events.len() - 1) { MakerSwapEvent::Initialized { maker_coin_start_block, @@ -621,7 +620,7 @@ for MakerSwapStateMachine MakerSwapEvent::Completed => return MmError::err(SwapRecreateError::SwapCompleted), MakerSwapEvent::MakerPaymentRefunded { .. } => { return MmError::err(SwapRecreateError::SwapFinishedWithRefund); - } + }, }; let dex_fee = if repr.dex_fee_burn > MmNumber::default() { @@ -709,14 +708,14 @@ for MakerSwapStateMachine .entry(maker_coin_ticker) .or_insert_with(Vec::new) .push(new_locked); - } + }, MakerSwapEvent::MakerPaymentSentFundingSpendGenerated { .. } => { let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); let ticker = self.maker_coin.ticker(); if let Some(maker_coin_locked) = swaps_ctx.locked_amounts.lock().unwrap().get_mut(ticker) { maker_coin_locked.retain(|locked| locked.swap_uuid != self.uuid); }; - } + }, MakerSwapEvent::WaitingForTakerFunding { .. } | MakerSwapEvent::TakerFundingReceived { .. } | MakerSwapEvent::MakerPaymentRefundRequired { .. } @@ -762,7 +761,7 @@ for MakerSwapStateMachine .entry(maker_coin_ticker) .or_insert_with(Vec::new) .push(new_locked); - } + }, MakerSwapEvent::MakerPaymentSentFundingSpendGenerated { .. } | MakerSwapEvent::MakerPaymentRefundRequired { .. } | MakerSwapEvent::MakerPaymentRefunded { .. } @@ -790,14 +789,14 @@ impl Default for Initialize { } impl InitialState -for Initialize + for Initialize { type StateMachine = MakerSwapStateMachine; } #[async_trait] impl State -for Initialize + for Initialize { type StateMachine = MakerSwapStateMachine; @@ -807,7 +806,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetMakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let taker_coin_start_block = match state_machine.taker_coin.current_block().compat().await { @@ -815,7 +814,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetTakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let preimage_value = TradePreimageValue::Exact(state_machine.maker_volume.to_decimal()); @@ -829,7 +828,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetMakerPaymentFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let taker_payment_spend_trade_fee = match state_machine.taker_coin.get_receiver_trade_fee(stage).compat().await @@ -838,7 +837,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetTakerPaymentSpendFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let prepared_params = MakerSwapPreparedParams { @@ -855,7 +854,7 @@ for Initialize Some(prepared_params), FeeApproxStage::StartSwap, ) - .await + .await { let reason = AbortReason::BalanceCheckFailure(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; @@ -886,7 +885,7 @@ struct Initialized { impl TransitionFrom> for Initialized {} impl StorableState -for Initialized + for Initialized { type StateMachine = MakerSwapStateMachine; @@ -902,7 +901,7 @@ for Initialized #[async_trait] impl State -for Initialized + for Initialized { type StateMachine = MakerSwapStateMachine; @@ -943,7 +942,7 @@ for Initialized Err(e) => { let reason = AbortReason::DidNotReceiveTakerNegotiation(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; drop(abort_handle); @@ -953,11 +952,11 @@ for Initialized Some(taker_negotiation::Action::Abort(abort)) => { let reason = AbortReason::TakerAbortedNegotiation(abort.reason); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, None => { let reason = AbortReason::ReceivedInvalidTakerNegotiation; return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let started_at_diff = state_machine.started_at.abs_diff(taker_data.started_at); @@ -984,7 +983,7 @@ for Initialized Err(e) => { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let maker_coin_htlc_pub_from_taker = @@ -993,7 +992,7 @@ for Initialized Err(e) => { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let next_state = WaitingForTakerFunding { @@ -1066,12 +1065,13 @@ struct WaitingForTakerFunding TransitionFrom> -for WaitingForTakerFunding -{} + for WaitingForTakerFunding +{ +} #[async_trait] impl State -for WaitingForTakerFunding + for WaitingForTakerFunding { type StateMachine = MakerSwapStateMachine; @@ -1104,7 +1104,7 @@ for WaitingForTakerFunding Err(e) => { let reason = AbortReason::DidNotReceiveTakerFundingInfo(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; drop(abort_handle); @@ -1114,7 +1114,7 @@ for WaitingForTakerFunding Err(e) => { let reason = AbortReason::FailedToParseTakerFunding(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let next_state = TakerFundingReceived { maker_coin_start_block: self.maker_coin_start_block, @@ -1128,7 +1128,7 @@ for WaitingForTakerFunding } impl StorableState -for WaitingForTakerFunding + for WaitingForTakerFunding { type StateMachine = MakerSwapStateMachine; @@ -1151,12 +1151,13 @@ struct TakerFundingReceived -TransitionFrom> for TakerFundingReceived -{} + TransitionFrom> for TakerFundingReceived +{ +} #[async_trait] impl State -for TakerFundingReceived + for TakerFundingReceived { type StateMachine = MakerSwapStateMachine; @@ -1197,7 +1198,7 @@ for TakerFundingReceived Err(e) => { let reason = AbortReason::FailedToGenerateFundingSpend(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let args = SendMakerPaymentArgs { @@ -1213,7 +1214,7 @@ for TakerFundingReceived Err(e) => { let reason = AbortReason::FailedToSendMakerPayment(format!("{:?}", e)); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; info!( "Sent maker payment {} tx {:02x} during swap {}", @@ -1235,7 +1236,7 @@ for TakerFundingReceived } impl StorableState -for TakerFundingReceived + for TakerFundingReceived { type StateMachine = MakerSwapStateMachine; @@ -1263,13 +1264,14 @@ struct MakerPaymentSentFundingSpendGenerated -TransitionFrom> -for MakerPaymentSentFundingSpendGenerated -{} + TransitionFrom> + for MakerPaymentSentFundingSpendGenerated +{ +} #[async_trait] impl State -for MakerPaymentSentFundingSpendGenerated + for MakerPaymentSentFundingSpendGenerated { type StateMachine = MakerSwapStateMachine; @@ -1326,7 +1328,7 @@ for MakerPaymentSentFundingSpendGenerated negotiation_data: self.negotiation_data, }; break Self::change_state(next_state, state_machine).await; - } + }, // it's not really possible as taker's funding time lock is 3 * lock_duration, though we have to // handle this case anyway Ok(Some(FundingTxSpend::RefundedTimelock(_))) => { @@ -1339,7 +1341,7 @@ for MakerPaymentSentFundingSpendGenerated }; break Self::change_state(next_state, state_machine).await; - } + }, Ok(Some(FundingTxSpend::RefundedSecret { secret, tx: _ })) => { let next_state = MakerPaymentRefundRequired { maker_coin_start_block: self.maker_coin_start_block, @@ -1350,15 +1352,15 @@ for MakerPaymentSentFundingSpendGenerated }; break Self::change_state(next_state, state_machine).await; - } + }, Ok(None) => { Timer::sleep(30.).await; - } + }, Err(e) => match e { SearchForFundingSpendErr::Rpc(e) => { error!("Rpc error {} on search_for_taker_funding_spend", e); Timer::sleep(30.).await; - } + }, // Other error cases are considered irrecoverable, so we should proceed to refund stage // handling using @ binding to trigger a compiler error when new variant is added e @ SearchForFundingSpendErr::InvalidInputTx(_) @@ -1373,7 +1375,7 @@ for MakerPaymentSentFundingSpendGenerated }; break Self::change_state(next_state, state_machine).await; - } + }, }, } } @@ -1381,7 +1383,7 @@ for MakerPaymentSentFundingSpendGenerated } impl StorableState -for MakerPaymentSentFundingSpendGenerated + for MakerPaymentSentFundingSpendGenerated { type StateMachine = MakerSwapStateMachine; @@ -1431,17 +1433,19 @@ struct MakerPaymentRefundRequired -TransitionFrom> -for MakerPaymentRefundRequired -{} + TransitionFrom> + for MakerPaymentRefundRequired +{ +} impl -TransitionFrom> for MakerPaymentRefundRequired -{} + TransitionFrom> for MakerPaymentRefundRequired +{ +} #[async_trait] impl State -for MakerPaymentRefundRequired + for MakerPaymentRefundRequired { type StateMachine = MakerSwapStateMachine; @@ -1467,7 +1471,7 @@ for MakerPaymentRefundRequired Err(e) => { let reason = AbortReason::MakerPaymentRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let next_state = MakerPaymentRefunded { @@ -1492,7 +1496,7 @@ for MakerPaymentRefundRequired Err(e) => { error!("Error {} on can_refund_htlc, retrying in 30 seconds", e); Timer::sleep(30.).await; - } + }, } } @@ -1522,7 +1526,7 @@ for MakerPaymentRefundRequired Err(e) => { let reason = AbortReason::MakerPaymentRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let next_state = MakerPaymentRefunded { @@ -1537,7 +1541,7 @@ for MakerPaymentRefundRequired } impl StorableState -for MakerPaymentRefundRequired + for MakerPaymentRefundRequired { type StateMachine = MakerSwapStateMachine; @@ -1564,13 +1568,14 @@ struct TakerPaymentReceived -TransitionFrom> -for TakerPaymentReceived -{} + TransitionFrom> + for TakerPaymentReceived +{ +} #[async_trait] impl State -for TakerPaymentReceived + for TakerPaymentReceived { type StateMachine = MakerSwapStateMachine; @@ -1611,7 +1616,7 @@ for TakerPaymentReceived reason: MakerPaymentRefundReason::DidNotGetTakerPaymentSpendPreimage(e), }; return Self::change_state(next_state, state_machine).await; - } + }, }; debug!("Received taker payment spend preimage message {:?}", preimage_data); @@ -1641,7 +1646,7 @@ for TakerPaymentReceived reason: MakerPaymentRefundReason::FailedToParseTakerPreimage(e.to_string()), }; return Self::change_state(next_state, state_machine).await; - } + }, }; let signature = match state_machine.taker_coin.parse_signature(&preimage_data.signature) { Ok(s) => s, @@ -1654,7 +1659,7 @@ for TakerPaymentReceived reason: MakerPaymentRefundReason::FailedToParseTakerSignature(e.to_string()), }; return Self::change_state(next_state, state_machine).await; - } + }, }; let tx_preimage = TxPreimageWithSig { preimage, signature }; @@ -1693,7 +1698,7 @@ for TakerPaymentReceived reason: MakerPaymentRefundReason::TakerPaymentSpendBroadcastFailed(format!("{:?}", e)), }; return Self::change_state(next_state, state_machine).await; - } + }, }; info!( "Spent taker payment {} tx {:02x} during swap {}", @@ -1713,7 +1718,7 @@ for TakerPaymentReceived } impl StorableState -for TakerPaymentReceived + for TakerPaymentReceived { type StateMachine = MakerSwapStateMachine; @@ -1743,12 +1748,13 @@ struct TakerPaymentSpent -TransitionFrom> for TakerPaymentSpent -{} + TransitionFrom> for TakerPaymentSpent +{ +} #[async_trait] impl State -for TakerPaymentSpent + for TakerPaymentSpent { type StateMachine = MakerSwapStateMachine; @@ -1758,7 +1764,7 @@ for TakerPaymentSpent } impl StorableState -for TakerPaymentSpent + for TakerPaymentSpent { type StateMachine = MakerSwapStateMachine; @@ -1823,7 +1829,7 @@ impl Aborted { #[async_trait] impl LastState -for Aborted + for Aborted { type StateMachine = MakerSwapStateMachine; @@ -1836,7 +1842,7 @@ for Aborted } impl StorableState -for Aborted + for Aborted { type StateMachine = MakerSwapStateMachine; @@ -1852,16 +1858,19 @@ impl TransitionFrom> for impl TransitionFrom> for Aborted {} impl -TransitionFrom> for Aborted -{} + TransitionFrom> for Aborted +{ +} impl -TransitionFrom> for Aborted -{} + TransitionFrom> for Aborted +{ +} impl -TransitionFrom> for Aborted -{} + TransitionFrom> for Aborted +{ +} struct Completed { maker_coin: PhantomData, @@ -1878,7 +1887,7 @@ impl Completed { } impl StorableState -for Completed + for Completed { type StateMachine = MakerSwapStateMachine; @@ -1887,7 +1896,7 @@ for Completed #[async_trait] impl LastState -for Completed + for Completed { type StateMachine = MakerSwapStateMachine; @@ -1900,8 +1909,9 @@ for Completed } impl -TransitionFrom> for Completed -{} + TransitionFrom> for Completed +{ +} struct MakerPaymentRefunded { taker_coin: PhantomData, @@ -1911,7 +1921,7 @@ struct MakerPaymentRefunded { } impl StorableState -for MakerPaymentRefunded + for MakerPaymentRefunded { type StateMachine = MakerSwapStateMachine; @@ -1932,7 +1942,7 @@ for MakerPaymentRefunded #[async_trait] impl LastState -for MakerPaymentRefunded + for MakerPaymentRefunded { type StateMachine = MakerSwapStateMachine; @@ -1948,5 +1958,6 @@ for MakerPaymentRefunded } impl -TransitionFrom> for MakerPaymentRefunded -{} + TransitionFrom> for MakerPaymentRefunded +{ +} diff --git a/mm2src/mm2_main/src/lp_swap/swap_lock.rs b/mm2src/mm2_main/src/lp_swap/swap_lock.rs index fe32c35843..12e59e9926 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_lock.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_lock.rs @@ -4,12 +4,10 @@ use mm2_core::mm_ctx::MmArc; use mm2_err_handle::prelude::*; use uuid::Uuid; -#[cfg(target_arch = "wasm32")] -use common::now_sec; +#[cfg(target_arch = "wasm32")] use common::now_sec; #[cfg(not(target_arch = "wasm32"))] pub use native_lock::SwapLock; -#[cfg(target_arch = "wasm32")] -pub use wasm_lock::SwapLock; +#[cfg(target_arch = "wasm32")] pub use wasm_lock::SwapLock; pub type SwapLockResult = Result>; @@ -43,11 +41,11 @@ mod native_lock { match e { FileLockError::ErrorReadingTimestamp { path, error } => { SwapLockError::ErrorReadingTimestamp(format!("Path: {:?}, Error: {}", path, error)) - } + }, FileLockError::ErrorWritingTimestamp { path, error } | FileLockError::ErrorCreatingLockFile { path, error } => { SwapLockError::ErrorWritingTimestamp(format!("Path: {:?}, Error: {}", path, error)) - } + }, } } } @@ -58,7 +56,12 @@ mod native_lock { #[async_trait] impl SwapLockOps for SwapLock { - async fn lock(ctx: &MmArc, swap_uuid: Uuid, ttl_sec: f64, db_id: Option<&str>) -> SwapLockResult> { + async fn lock( + ctx: &MmArc, + swap_uuid: Uuid, + ttl_sec: f64, + db_id: Option<&str>, + ) -> SwapLockResult> { let lock_path = my_swaps_dir(ctx, db_id).join(format!("{}.lock", swap_uuid)); let file_lock = some_or_return_ok_none!(FileLock::lock(lock_path, ttl_sec)?); @@ -96,7 +99,7 @@ mod wasm_lock { | e @ DbTransactionError::ErrorCountingItems(_) => SwapLockError::ErrorReadingTimestamp(e.to_string()), e @ DbTransactionError::ErrorDeletingItems(_) | e @ DbTransactionError::ErrorUploadingItem(_) => { SwapLockError::ErrorWritingTimestamp(e.to_string()) - } + }, } } } diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs index 6412d6f944..68e7c86f06 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs @@ -111,7 +111,7 @@ pub(super) async fn has_db_record_for( let conn = conn.lock().unwrap(); does_swap_exist(&conn, &id_str, db_id.as_deref()) }) - .await?) + .await?) } #[cfg(target_arch = "wasm32")] @@ -135,8 +135,8 @@ pub(super) async fn store_swap_event( event: T::Event, db_id: Option<&str>, ) -> MmResult<(), SwapStateMachineError> - where - T::Event: DeserializeOwned + Serialize + Send + 'static, +where + T::Event: DeserializeOwned + Serialize + Send + 'static, { let id_str = id.to_string(); let db_id = db_id.map(|e| e.to_string()); @@ -154,7 +154,7 @@ pub(super) async fn store_swap_event( update_swap_events(&conn, &id_str, &serialized_events, db_id.as_deref())?; Ok(()) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -218,7 +218,7 @@ pub(super) async fn get_unfinished_swaps_uuids( select_unfinished_swaps_uuids(&conn, swap_type, db_id.as_deref()) .map_to_mm(|e| SwapStateMachineError::StorageError(e.to_string())) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -251,7 +251,7 @@ pub(super) async fn mark_swap_as_finished( let conn = conn.lock().unwrap(); Ok(set_swap_is_finished(&conn, &id.to_string(), db_id.as_deref())?) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -302,7 +302,11 @@ pub(super) fn clean_up_context_impl(ctx: &MmArc, uuid: &Uuid, maker_coin: &str, } } -pub(super) async fn acquire_reentrancy_lock_impl(ctx: &MmArc, uuid: Uuid, db_id: Option<&str>) -> MmResult { +pub(super) async fn acquire_reentrancy_lock_impl( + ctx: &MmArc, + uuid: Uuid, + db_id: Option<&str>, +) -> MmResult { let mut attempts = 0; loop { match SwapLock::lock(ctx, uuid, 40., db_id).await? { @@ -315,7 +319,7 @@ pub(super) async fn acquire_reentrancy_lock_impl(ctx: &MmArc, uuid: Uuid, db_id: attempts += 1; Timer::sleep(40.).await; } - } + }, } } } @@ -342,7 +346,7 @@ pub(super) trait GetSwapCoins { /// Generic function for upgraded swaps kickstart handling. /// It is implemented only for UtxoStandardCoin/UtxoStandardCoin case temporary. pub(super) async fn swap_kickstart_handler< - T: StorableStateMachine>, + T: StorableStateMachine>, >( ctx: MmArc, swap_repr: ::DbRepr, @@ -365,11 +369,11 @@ pub(super) async fn swap_kickstart_handler< uuid, taker_coin_ticker, ); Timer::sleep(1.).await; - } + }, Err(e) => { error!("Error {} on {} find attempt", e, taker_coin_ticker); return; - } + }, }; }; @@ -384,11 +388,11 @@ pub(super) async fn swap_kickstart_handler< uuid, maker_coin_ticker, ); Timer::sleep(1.).await; - } + }, Err(e) => { error!("Error {} on {} find attempt", e, maker_coin_ticker); return; - } + }, }; }; @@ -400,7 +404,7 @@ pub(super) async fn swap_kickstart_handler< maker_coin_ticker, taker_coin_ticker ); return; - } + }, }; let recreate_context = SwapRecreateCtx { maker_coin, taker_coin }; @@ -410,7 +414,7 @@ pub(super) async fn swap_kickstart_handler< Err(e) => { error!("Error {} on trying to recreate the swap {}", e, uuid); return; - } + }, }; if let Err(e) = state_machine.kickstart(state).await { diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index d0e98b0377..7ce98efb52 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -4,7 +4,12 @@ use super::pubkey_banning::ban_pubkey_on_failed_swap; use super::swap_lock::{SwapLock, SwapLockOps}; use super::swap_watcher::{watcher_topic, SwapWatcherMsg}; use super::trade_preimage::{TradePreimageRequest, TradePreimageRpcError, TradePreimageRpcResult}; -use super::{broadcast_my_swap_status, broadcast_swap_message, broadcast_swap_msg_every, check_other_coin_balance_for_swap, dex_fee_amount_from_taker_coin, dex_fee_rate, get_locked_amount, recv_swap_msg, swap_topic, wait_for_maker_payment_conf_until, AtomicSwap, LockedAmount, MySwapInfo, NegotiationDataMsg, NegotiationDataV2, NegotiationDataV3, RecoveredSwap, RecoveredSwapAction, SavedSwap, SavedSwapIo, SavedTradeFee, SwapConfirmationsSettings, SwapError, SwapMsg, SwapPubkeys, SwapTxDataMsg, SwapsContext, TransactionIdentifier, WAIT_CONFIRM_INTERVAL_SEC}; +use super::{broadcast_my_swap_status, broadcast_swap_message, broadcast_swap_msg_every, + check_other_coin_balance_for_swap, dex_fee_amount_from_taker_coin, dex_fee_rate, get_locked_amount, + recv_swap_msg, swap_topic, wait_for_maker_payment_conf_until, AtomicSwap, LockedAmount, MySwapInfo, + NegotiationDataMsg, NegotiationDataV2, NegotiationDataV3, RecoveredSwap, RecoveredSwapAction, SavedSwap, + SavedSwapIo, SavedTradeFee, SwapConfirmationsSettings, SwapError, SwapMsg, SwapPubkeys, SwapTxDataMsg, + SwapsContext, TransactionIdentifier, WAIT_CONFIRM_INTERVAL_SEC}; use crate::mm2::lp_network::subscribe_to_topic; use crate::mm2::lp_ordermatch::TakerOrderBuilder; use crate::mm2::lp_swap::swap_v2_common::mark_swap_as_finished; @@ -181,7 +186,7 @@ impl TakerSavedEvent { TakerSwapEvent::MakerPaymentSpendFailed(_) => Some(TakerSwapCommand::PrepareForTakerPaymentRefund), TakerSwapEvent::TakerPaymentWaitRefundStarted { .. } => { Some(TakerSwapCommand::PrepareForTakerPaymentRefund) - } + }, TakerSwapEvent::TakerPaymentRefundStarted => Some(TakerSwapCommand::RefundTakerPayment), TakerSwapEvent::TakerPaymentRefunded(_) => Some(TakerSwapCommand::FinalizeTakerPaymentRefund), TakerSwapEvent::TakerPaymentRefundFailed(_) => Some(TakerSwapCommand::Finish), @@ -269,7 +274,7 @@ impl TakerSavedSwap { | TakerSwapEvent::MakerPaymentSpentByWatcher(_) | TakerSwapEvent::MakerPaymentWaitConfirmFailed(_) => { return false; - } + }, _ => (), } } @@ -339,7 +344,7 @@ impl TakerSavedSwap { return ERR!("maker's pubkey is empty"); }; key.to_string() - } + }, _ => return ERR!("Swap must be negotiated to get maker's pubkey"), }, None => return ERR!("Can't get maker's pubkey while there's no Negotiated event"), @@ -406,11 +411,11 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { attempts += 1; Timer::sleep(40.).await; } - } + }, Err(e) => { error!("Swap {} file lock error: {}", uuid, e); return; - } + }, } }; @@ -425,16 +430,16 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { Some(c) => { info!("Swap {} kick started.", uuid); (swap, c) - } + }, None => { warn!("Swap {} has been finished already, aborting.", uuid); return; - } + }, }, Err(e) => { error!("Error loading swap {}: {}", uuid, e); return; - } + }, }, }; @@ -448,7 +453,7 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { Timer::sleep(30.).await; } } - .fuse(), + .fuse(), ); let ctx = swap.ctx.clone(); @@ -497,7 +502,7 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { match res.0 { Some(c) => { command = c; - } + }, None => { if let Err(e) = mark_swap_as_finished(ctx.clone(), running_swap.uuid, running_swap.db_id().as_deref()).await @@ -513,17 +518,16 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { } } break; - } + }, } } } - .fuse(), + .fuse(), ); select! { _swap = swap_fut => (), // swap finished normally _touch = touch_loop => unreachable!("Touch loop can not stop!"), - } - ; + }; } #[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)] @@ -710,14 +714,14 @@ impl TakerSwapEvent { TakerSwapEvent::MakerPaymentValidateFailed(_) => "Maker payment validate failed...".to_owned(), TakerSwapEvent::MakerPaymentWaitConfirmFailed(_) => { "Maker payment wait for confirmation failed...".to_owned() - } + }, TakerSwapEvent::TakerPaymentSent(_) => "Taker payment sent...".to_owned(), TakerSwapEvent::WatcherMessageSent(_, _) => WATCHER_MESSAGE_SENT_LOG.to_owned(), TakerSwapEvent::TakerPaymentTransactionFailed(_) => "Taker payment transaction failed...".to_owned(), TakerSwapEvent::TakerPaymentDataSendFailed(_) => "Taker payment data send failed...".to_owned(), TakerSwapEvent::TakerPaymentWaitConfirmFailed(_) => { "Taker payment wait for confirmation failed...".to_owned() - } + }, TakerSwapEvent::TakerPaymentSpent(_) => "Taker payment spent...".to_owned(), TakerSwapEvent::TakerPaymentWaitForSpendFailed(_) => "Taker payment wait for spend failed...".to_owned(), TakerSwapEvent::MakerPaymentSpent(_) => "Maker payment spent...".to_owned(), @@ -725,7 +729,7 @@ impl TakerSwapEvent { TakerSwapEvent::MakerPaymentSpendFailed(_) => "Maker payment spend failed...".to_owned(), TakerSwapEvent::TakerPaymentWaitRefundStarted { wait_until } => { format!("Taker payment wait refund till {} started...", wait_until) - } + }, TakerSwapEvent::TakerPaymentRefundStarted => "Taker payment refund started...".to_owned(), TakerSwapEvent::TakerPaymentRefunded(_) => "Taker payment refunded...".to_owned(), TakerSwapEvent::TakerPaymentRefundFailed(_) => "Taker payment refund failed...".to_owned(), @@ -819,7 +823,7 @@ impl TakerSwap { fmt = "Taker swap {} has successfully started", self.uuid ); - } + }, TakerSwapEvent::StartFailed(err) => self.errors.lock().push(err), TakerSwapEvent::Negotiated(data) => { self.maker_payment_lock @@ -835,32 +839,32 @@ impl TakerSwap { if data.taker_coin_swap_contract_addr.is_some() { self.w().data.taker_coin_swap_contract_address = data.taker_coin_swap_contract_addr; } - } + }, TakerSwapEvent::NegotiateFailed(err) => self.errors.lock().push(err), TakerSwapEvent::TakerFeeSent(tx) => self.w().taker_fee = Some(tx), TakerSwapEvent::TakerFeeSendFailed(err) => self.errors.lock().push(err), TakerSwapEvent::TakerPaymentInstructionsReceived(instructions) => { self.w().payment_instructions = instructions - } + }, TakerSwapEvent::MakerPaymentReceived(tx) => self.w().maker_payment = Some(tx), TakerSwapEvent::MakerPaymentWaitConfirmStarted => (), TakerSwapEvent::MakerPaymentValidatedAndConfirmed => { self.maker_payment_confirmed.store(true, Ordering::Relaxed) - } + }, TakerSwapEvent::MakerPaymentValidateFailed(err) => self.errors.lock().push(err), TakerSwapEvent::MakerPaymentWaitConfirmFailed(err) => self.errors.lock().push(err), TakerSwapEvent::TakerPaymentSent(tx) => self.w().taker_payment = Some(tx), TakerSwapEvent::WatcherMessageSent(maker_payment_spend_preimage, taker_payment_refund_preimage) => { self.w().maker_payment_spend_preimage = maker_payment_spend_preimage; self.w().taker_payment_refund_preimage = taker_payment_refund_preimage; - } + }, TakerSwapEvent::TakerPaymentTransactionFailed(err) => self.errors.lock().push(err), TakerSwapEvent::TakerPaymentDataSendFailed(err) => self.errors.lock().push(err), TakerSwapEvent::TakerPaymentWaitConfirmFailed(err) => self.errors.lock().push(err), TakerSwapEvent::TakerPaymentSpent(data) => { self.w().taker_payment_spend = Some(data.transaction); self.w().secret = data.secret; - } + }, TakerSwapEvent::TakerPaymentWaitForSpendFailed(err) => self.errors.lock().push(err), TakerSwapEvent::MakerPaymentSpent(tx) => self.w().maker_payment_spend = Some(tx), TakerSwapEvent::MakerPaymentSpentByWatcher(tx) => self.w().maker_payment_spend = Some(tx), @@ -1033,7 +1037,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::StartFailed( ERRL!("!taker_coin.get_fee_to_send_taker_fee {}", e).into(), )])); - } + }, }; let get_sender_trade_fee_fut = self.taker_coin.get_sender_trade_fee(preimage_value, stage); let taker_payment_trade_fee = match get_sender_trade_fee_fut.await { @@ -1042,7 +1046,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::StartFailed( ERRL!("!taker_coin.get_sender_trade_fee {}", e).into(), )])); - } + }, }; let maker_payment_spend_trade_fee_fut = self.maker_coin.get_receiver_trade_fee(stage); let maker_payment_spend_trade_fee = match maker_payment_spend_trade_fee_fut.compat().await { @@ -1051,7 +1055,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::StartFailed( ERRL!("!maker_coin.get_receiver_trade_fee {}", e).into(), )])); - } + }, }; let params = TakerSwapPreparedParams { @@ -1083,7 +1087,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::StartFailed( ERRL!("!maker_coin.current_block {}", e).into(), )])); - } + }, }; let taker_coin_start_block = match self.taker_coin.current_block().compat().await { @@ -1092,7 +1096,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::StartFailed( ERRL!("!taker_coin.current_block {}", e).into(), )])); - } + }, }; let maker_coin_swap_contract_address = self.maker_coin.swap_contract_address(); @@ -1152,7 +1156,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::NegotiateFailed( ERRL!("{:?}", e).into(), )])); - } + }, }; debug!("Received maker negotiation data {:?}", maker_data); @@ -1173,7 +1177,7 @@ impl TakerSwap { maker_data.payment_locktime(), expected_lock_time ) - .into(), + .into(), )])); } @@ -1189,7 +1193,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::NegotiateFailed( ERRL!("!maker_coin.negotiate_swap_contract_addr {}", e).into(), )])); - } + }, }, }; @@ -1205,7 +1209,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::NegotiateFailed( ERRL!("!taker_coin.negotiate_swap_contract_addr {}", e).into(), )])); - } + }, }, }; @@ -1263,7 +1267,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![TakerSwapEvent::NegotiateFailed( ERRL!("{:?}", e).into(), )])); - } + }, }; drop(send_abort_handle); @@ -1310,7 +1314,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::TakerFeeSendFailed(ERRL!("{}", err.get_plain_text_format()).into()), ])); - } + }, }; let tx_hash = transaction.tx_hash(); @@ -1334,7 +1338,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::MakerPaymentValidateFailed(e.to_string().into()), ])); - } + }, }; let msg = SwapMsg::TakerFee(payment_data_msg); @@ -1360,7 +1364,7 @@ impl TakerSwap { ERRL!("Error waiting for 'maker-payment' data: {}", e).into(), ), ])); - } + }, }; drop(abort_send_handle); @@ -1379,9 +1383,9 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::MakerPaymentValidateFailed(e.to_string().into()), ])); - } + }, } - } + }, None => None, }; swap_events.push(TakerSwapEvent::TakerPaymentInstructionsReceived(instructions)); @@ -1394,7 +1398,7 @@ impl TakerSwap { ERRL!("Error parsing the 'maker-payment': {:?}", e).into(), ), ])); - } + }, }; let tx_hash = maker_payment.tx_hash(); @@ -1444,7 +1448,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::TakerPaymentTransactionFailed(err.into_inner().to_string().into()), ])); - } + }, } } else { None @@ -1551,7 +1555,7 @@ impl TakerSwap { ERRL!("Watcher reward error: {}", err.to_string()).into(), ), ])); - } + }, } } else { None @@ -1586,15 +1590,15 @@ impl TakerSwap { ERRL!("{}", err.get_plain_text_format()).into(), ), ])); - } + }, } - } + }, }, Err(e) => { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::TakerPaymentTransactionFailed(ERRL!("{}", e).into()), ])); - } + }, }; let tx_hash = transaction.tx_hash(); @@ -1657,7 +1661,7 @@ impl TakerSwap { Some(taker_payment_refund.tx_hex()), )); info!("{}", WATCHER_MESSAGE_SENT_LOG); - } + }, Err(e) => error!( "The watcher message could not be sent, error creating at least one of the preimages: {}", e.get_plain_text_format() @@ -1762,7 +1766,7 @@ impl TakerSwap { wait_until: self.wait_refund_until(), }, ])); - } + }, }; drop(send_abort_handle); drop(watcher_broadcast_abort_handle); @@ -1785,7 +1789,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::TakerPaymentWaitForSpendFailed(ERRL!("{}", e).into()), ])); - } + }, }; Ok((Some(TakerSwapCommand::SpendMakerPayment), vec![ @@ -1837,7 +1841,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::MakerPaymentSpendFailed(ERRL!("{}", err.get_plain_text_format()).into()), ])); - } + }, }; broadcast_p2p_tx_msg( @@ -1904,7 +1908,7 @@ impl TakerSwap { Err(e) => { error!("Error {} on can_refund_htlc, retrying in 30 seconds", e); Timer::sleep(30.).await; - } + }, } } @@ -1942,7 +1946,7 @@ impl TakerSwap { return Ok((Some(TakerSwapCommand::Finish), vec![ TakerSwapEvent::TakerPaymentRefundFailed(ERRL!("{:?}", err.get_plain_text_format()).into()), ])); - } + }, }; broadcast_p2p_tx_msg( @@ -2034,7 +2038,7 @@ impl TakerSwap { }; #[cfg(any(test, feature = "run-docker-tests"))] - let fail_at = std::env::var("TAKER_FAIL_AT").map(FailAt::from).ok(); + let fail_at = std::env::var("TAKER_FAIL_AT").map(FailAt::from).ok(); let swap = TakerSwap::new( ctx.clone(), @@ -2050,7 +2054,7 @@ impl TakerSwap { data.lock_duration, data.p2p_privkey.map(SerializableSecp256k1Keypair::into_inner), #[cfg(any(test, feature = "run-docker-tests"))] - fail_at, + fail_at, ); for saved_event in &saved.events { @@ -2126,14 +2130,14 @@ impl TakerSwap { self.maker_coin.ticker(), tx.tx_hash() ); - } + }, Ok(Some(FoundSwapTxSpend::Refunded(tx))) => { return ERR!( "Maker payment was already refunded by {} tx {:02x}", self.maker_coin.ticker(), tx.tx_hash() ); - } + }, Err(e) => return ERR!("Error {} when trying to find maker payment spend", e), Ok(None) => (), // payment is not spent, continue } @@ -2165,7 +2169,7 @@ impl TakerSwap { Some(tx) => tx.tx_hex(), None => return ERR!("Taker payment is not found, swap is not recoverable"), } - } + }, }; if self.r().taker_payment_spend.is_some() { @@ -2204,7 +2208,7 @@ impl TakerSwap { } return ERR!("{}", err.get_plain_text_format()); - } + }, }; return Ok(RecoveredSwap { @@ -2266,7 +2270,7 @@ impl TakerSwap { } return ERR!("{}", err.get_plain_text_format()); - } + }, }; Ok(RecoveredSwap { @@ -2274,7 +2278,7 @@ impl TakerSwap { coin: self.maker_coin.ticker().to_string(), transaction, }) - } + }, FoundSwapTxSpend::Refunded(tx) => ERR!( "Taker payment has been refunded already by transaction {:02x}", tx.tx_hash() @@ -2315,7 +2319,7 @@ impl TakerSwap { } return ERR!("{:?}", err.get_plain_text_format()); - } + }, }; Ok(RecoveredSwap { @@ -2323,7 +2327,7 @@ impl TakerSwap { coin: self.taker_coin.ticker().to_string(), transaction, }) - } + }, } } } @@ -2423,7 +2427,7 @@ pub async fn check_balance_for_taker_swap( taker_payment_trade_fee, maker_payment_spend_trade_fee, } - } + }, }; let taker_fee = TakerFeeAdditionalInfo { @@ -2439,7 +2443,7 @@ pub async fn check_balance_for_taker_swap( params.taker_payment_trade_fee, Some(taker_fee), ) - .await?; + .await?; if !params.maker_payment_spend_trade_fee.paid_from_trading_vol { check_other_coin_balance_for_swap(ctx, other_coin, swap_uuid, params.maker_payment_spend_trade_fee).await?; } @@ -2528,7 +2532,7 @@ pub async fn taker_swap_trade_preimage( Some(prepared_params), stage, ) - .await?; + .await?; let conf_settings = OrderConfirmationsSettings { base_confs: base_coin.required_confirmations(), @@ -2580,10 +2584,10 @@ pub async fn max_taker_vol(ctx: MmArc, req: Json) -> Result>, S Err(e) if e.get_inner().not_sufficient_balance() => { warn!("{}", e); MmNumber::from(0) - } + }, Err(err) => { return ERR!("{}", err); - } + }, }; let res = try_s!(json::to_vec(&json!({ @@ -2754,7 +2758,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); let actual = block_on(taker_swap.recover_funds()).unwrap(); let expected = RecoveredSwap { action: RecoveredSwapAction::SpentOtherPayment, @@ -2802,7 +2806,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); let actual = block_on(taker_swap.recover_funds()).unwrap(); let expected = RecoveredSwap { action: RecoveredSwapAction::RefundedMyPayment, @@ -2855,7 +2859,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); let actual = block_on(taker_swap.recover_funds()).unwrap(); let expected = RecoveredSwap { action: RecoveredSwapAction::SpentOtherPayment, @@ -2899,7 +2903,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); let actual = block_on(taker_swap.recover_funds()).unwrap(); let expected = RecoveredSwap { action: RecoveredSwapAction::RefundedMyPayment, @@ -2936,7 +2940,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); let error = block_on(taker_swap.recover_funds()).unwrap_err(); assert!(error.contains("Too early to refund")); assert!(unsafe { SEARCH_TX_SPEND_CALLED }); @@ -2976,7 +2980,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); let actual = block_on(taker_swap.recover_funds()).unwrap(); let expected = RecoveredSwap { action: RecoveredSwapAction::SpentOtherPayment, @@ -3006,7 +3010,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); assert!(block_on(taker_swap.recover_funds()).is_err()); } @@ -3047,7 +3051,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); assert_eq!(unsafe { SWAP_CONTRACT_ADDRESS_CALLED }, 2); assert_eq!( @@ -3082,7 +3086,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); assert_eq!(unsafe { SWAP_CONTRACT_ADDRESS_CALLED }, 1); let expected_addr = addr_from_str(ETH_SEPOLIA_SWAP_CONTRACT).unwrap(); @@ -3251,7 +3255,7 @@ mod taker_swap_tests { taker_coin, taker_saved_swap, )) - .unwrap(); + .unwrap(); let swaps_ctx = SwapsContext::from_ctx(&ctx).unwrap(); let arc = Arc::new(swap); let weak_ref = Arc::downgrade(&arc); diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index af98ccecc6..bf0ff750fe 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -44,8 +44,7 @@ cfg_wasm32!( ); // This is needed to have Debug on messages -#[allow(unused_imports)] -use prost::Message; +#[allow(unused_imports)] use prost::Message; /// Negotiation data representation to be stored in DB. #[derive(Clone, Debug, Deserialize, Serialize)] @@ -213,7 +212,7 @@ impl StateMachineStorage for TakerSwapStorage { insert_new_swap_v2(&ctx, sql_params, db_id.as_deref())?; Ok(()) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -258,7 +257,7 @@ impl StateMachineStorage for TakerSwapStorage { TakerSwapDbRepr::from_sql_row, )?) }) - .await + .await } #[cfg(target_arch = "wasm32")] @@ -432,7 +431,7 @@ pub struct TakerSwapStateMachine -TakerSwapStateMachine + TakerSwapStateMachine { fn maker_payment_conf_timeout(&self) -> u64 { self.started_at + self.lock_duration / 3 } @@ -453,7 +452,7 @@ TakerSwapStateMachine #[async_trait] impl StorableStateMachine -for TakerSwapStateMachine + for TakerSwapStateMachine { type Storage = TakerSwapStorage; type Result = (); @@ -493,12 +492,12 @@ for TakerSwapStateMachine storage: TakerSwapStorage, mut repr: TakerSwapDbRepr, recreate_ctx: Self::RecreateCtx, - ) -> Result<(RestoredMachine, Box>), Self::RecreateError> { + ) -> Result<(RestoredMachine, Box>), Self::RecreateError> { if repr.events.is_empty() { return MmError::err(SwapRecreateError::ReprEventsEmpty); } - let current_state: Box> = match repr.events.remove(repr.events.len() - 1) + let current_state: Box> = match repr.events.remove(repr.events.len() - 1) { TakerSwapEvent::Initialized { maker_coin_start_block, @@ -734,10 +733,10 @@ for TakerSwapStateMachine TakerSwapEvent::Completed => return MmError::err(SwapRecreateError::SwapCompleted), TakerSwapEvent::TakerFundingRefunded { .. } => { return MmError::err(SwapRecreateError::SwapFinishedWithRefund); - } + }, TakerSwapEvent::TakerPaymentRefunded { .. } => { return MmError::err(SwapRecreateError::SwapFinishedWithRefund); - } + }, }; let dex_fee = if repr.dex_fee_burn > MmNumber::default() { @@ -825,14 +824,14 @@ for TakerSwapStateMachine .entry(taker_coin_ticker) .or_insert_with(Vec::new) .push(new_locked); - } + }, TakerSwapEvent::TakerFundingSent { .. } => { let swaps_ctx = SwapsContext::from_ctx(&self.ctx).expect("from_ctx should not fail at this point"); let ticker = self.taker_coin.ticker(); if let Some(taker_coin_locked) = swaps_ctx.locked_amounts.lock().unwrap().get_mut(ticker) { taker_coin_locked.retain(|locked| locked.swap_uuid != self.uuid); }; - } + }, TakerSwapEvent::Negotiated { .. } | TakerSwapEvent::TakerFundingRefundRequired { .. } | TakerSwapEvent::MakerPaymentAndFundingSpendPreimgReceived { .. } @@ -872,7 +871,7 @@ for TakerSwapStateMachine .entry(taker_coin_ticker) .or_insert_with(Vec::new) .push(new_locked); - } + }, TakerSwapEvent::TakerFundingSent { .. } | TakerSwapEvent::TakerFundingRefundRequired { .. } | TakerSwapEvent::MakerPaymentAndFundingSpendPreimgReceived { .. } @@ -905,14 +904,14 @@ impl Default for Initialize { } impl InitialState -for Initialize + for Initialize { type StateMachine = TakerSwapStateMachine; } #[async_trait] impl State -for Initialize + for Initialize { type StateMachine = TakerSwapStateMachine; @@ -922,7 +921,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetMakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let taker_coin_start_block = match state_machine.taker_coin.current_block().compat().await { @@ -930,7 +929,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetTakerCoinBlock(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let total_payment_value = @@ -947,7 +946,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetTakerPaymentFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let maker_payment_spend_fee = match state_machine.maker_coin.get_receiver_trade_fee(stage).compat().await { @@ -955,7 +954,7 @@ for Initialize Err(e) => { let reason = AbortReason::FailedToGetMakerPaymentSpendFee(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let prepared_params = TakerSwapPreparedParams { @@ -978,7 +977,7 @@ for Initialize Some(prepared_params), FeeApproxStage::StartSwap, ) - .await + .await { let reason = AbortReason::BalanceCheckFailure(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; @@ -1009,7 +1008,7 @@ struct Initialized { impl TransitionFrom> for Initialized {} impl StorableState -for Initialized + for Initialized { type StateMachine = TakerSwapStateMachine; @@ -1025,7 +1024,7 @@ for Initialized #[async_trait] impl State -for Initialized + for Initialized { type StateMachine = TakerSwapStateMachine; @@ -1042,7 +1041,7 @@ for Initialized Err(e) => { let reason = AbortReason::DidNotReceiveMakerNegotiation(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; debug!("Received maker negotiation message {:?}", maker_negotiation); @@ -1072,7 +1071,7 @@ for Initialized Err(e) => { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let taker_coin_htlc_pub_from_maker = match state_machine @@ -1083,7 +1082,7 @@ for Initialized Err(e) => { let reason = AbortReason::FailedToParsePubkey(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let taker_coin_maker_address = match state_machine @@ -1094,7 +1093,7 @@ for Initialized Err(e) => { let reason = AbortReason::FailedToParseAddress(e.to_string()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let unique_data = state_machine.unique_data(); @@ -1135,7 +1134,7 @@ for Initialized Err(e) => { let reason = AbortReason::DidNotReceiveMakerNegotiated(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; drop(abort_handle); @@ -1219,12 +1218,13 @@ struct Negotiated TransitionFrom> -for Negotiated -{} + for Negotiated +{ +} #[async_trait] impl State -for Negotiated + for Negotiated { type StateMachine = TakerSwapStateMachine; @@ -1244,7 +1244,7 @@ for Negotiated Err(e) => { let reason = AbortReason::FailedToSendTakerFunding(format!("{:?}", e)); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; info!( @@ -1265,7 +1265,7 @@ for Negotiated } impl StorableState -for Negotiated + for Negotiated { type StateMachine = TakerSwapStateMachine; @@ -1289,7 +1289,7 @@ struct TakerFundingSent State -for TakerFundingSent + for TakerFundingSent { type StateMachine = TakerSwapStateMachine; @@ -1329,7 +1329,7 @@ for TakerFundingSent reason: TakerFundingRefundReason::DidNotReceiveMakerPayment(e), }; return Self::change_state(next_state, state_machine).await; - } + }, }; drop(abort_handle); @@ -1346,7 +1346,7 @@ for TakerFundingSent reason: TakerFundingRefundReason::FailedToParseMakerPayment(e.to_string()), }; return Self::change_state(next_state, state_machine).await; - } + }, }; let preimage_tx = match state_machine @@ -1363,7 +1363,7 @@ for TakerFundingSent reason: TakerFundingRefundReason::FailedToParseFundingSpendPreimg(e.to_string()), }; return Self::change_state(next_state, state_machine).await; - } + }, }; let preimage_sig = match state_machine @@ -1380,7 +1380,7 @@ for TakerFundingSent reason: TakerFundingRefundReason::FailedToParseFundingSpendSig(e.to_string()), }; return Self::change_state(next_state, state_machine).await; - } + }, }; let next_state = MakerPaymentAndFundingSpendPreimgReceived { @@ -1399,11 +1399,12 @@ for TakerFundingSent } impl TransitionFrom> -for TakerFundingSent -{} + for TakerFundingSent +{ +} impl StorableState -for TakerFundingSent + for TakerFundingSent { type StateMachine = TakerSwapStateMachine; @@ -1430,12 +1431,13 @@ struct MakerPaymentAndFundingSpendPreimgReceived -TransitionFrom> -for MakerPaymentAndFundingSpendPreimgReceived -{} + TransitionFrom> + for MakerPaymentAndFundingSpendPreimgReceived +{ +} impl StorableState -for MakerPaymentAndFundingSpendPreimgReceived + for MakerPaymentAndFundingSpendPreimgReceived { type StateMachine = TakerSwapStateMachine; @@ -1462,7 +1464,7 @@ for MakerPaymentAndFundingSpendPreimgReceived #[async_trait] impl State -for MakerPaymentAndFundingSpendPreimgReceived + for MakerPaymentAndFundingSpendPreimgReceived { type StateMachine = TakerSwapStateMachine; @@ -1572,7 +1574,7 @@ for MakerPaymentAndFundingSpendPreimgReceived reason: TakerFundingRefundReason::FailedToSendTakerPayment(format!("{:?}", e)), }; return Self::change_state(next_state, state_machine).await; - } + }, }; info!( @@ -1603,17 +1605,19 @@ struct TakerPaymentSent -TransitionFrom> for TakerPaymentSent -{} + TransitionFrom> for TakerPaymentSent +{ +} impl -TransitionFrom> -for TakerPaymentSent -{} + TransitionFrom> + for TakerPaymentSent +{ +} #[async_trait] impl State -for TakerPaymentSent + for TakerPaymentSent { type StateMachine = TakerSwapStateMachine; @@ -1665,7 +1669,7 @@ for TakerPaymentSent reason: TakerPaymentRefundReason::FailedToGenerateSpendPreimage(e.to_string()), }; return Self::change_state(next_state, state_machine).await; - } + }, }; let preimage_msg = TakerPaymentSpendPreimage { @@ -1702,7 +1706,7 @@ for TakerPaymentSent reason: TakerPaymentRefundReason::MakerDidNotSpendInTime(format!("{}", e)), }; return Self::change_state(next_state, state_machine).await; - } + }, }; info!( "Found taker payment spend {} tx {:02x} during swap {}", @@ -1724,7 +1728,7 @@ for TakerPaymentSent } impl StorableState -for TakerPaymentSent + for TakerPaymentSent { type StateMachine = TakerSwapStateMachine; @@ -1767,21 +1771,24 @@ struct TakerFundingRefundRequired -TransitionFrom> for TakerFundingRefundRequired -{} + TransitionFrom> for TakerFundingRefundRequired +{ +} impl -TransitionFrom> -for TakerFundingRefundRequired -{} + TransitionFrom> + for TakerFundingRefundRequired +{ +} impl -TransitionFrom> for TakerFundingRefundRequired -{} + TransitionFrom> for TakerFundingRefundRequired +{ +} #[async_trait] impl State -for TakerFundingRefundRequired + for TakerFundingRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1810,7 +1817,7 @@ for TakerFundingRefundRequired Err(e) => { let reason = AbortReason::TakerFundingRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let next_state = TakerFundingRefunded { @@ -1824,7 +1831,7 @@ for TakerFundingRefundRequired } impl StorableState -for TakerFundingRefundRequired + for TakerFundingRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1856,16 +1863,18 @@ struct TakerPaymentRefundRequired -TransitionFrom> for TakerPaymentRefundRequired -{} + TransitionFrom> for TakerPaymentRefundRequired +{ +} impl -TransitionFrom> for TakerPaymentRefundRequired -{} + TransitionFrom> for TakerPaymentRefundRequired +{ +} #[async_trait] impl State -for TakerPaymentRefundRequired + for TakerPaymentRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1887,7 +1896,7 @@ for TakerPaymentRefundRequired Err(e) => { error!("Error {} on can_refund_htlc, retrying in 30 seconds", e); Timer::sleep(30.).await; - } + }, } } @@ -1912,7 +1921,7 @@ for TakerPaymentRefundRequired Err(e) => { let reason = AbortReason::TakerPaymentRefundFailed(e.get_plain_text_format()); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let next_state = TakerPaymentRefunded { @@ -1929,7 +1938,7 @@ for TakerPaymentRefundRequired } impl StorableState -for TakerPaymentRefundRequired + for TakerPaymentRefundRequired { type StateMachine = TakerSwapStateMachine; @@ -1955,13 +1964,14 @@ struct MakerPaymentConfirmed -TransitionFrom> -for MakerPaymentConfirmed -{} + TransitionFrom> + for MakerPaymentConfirmed +{ +} #[async_trait] impl State -for MakerPaymentConfirmed + for MakerPaymentConfirmed { type StateMachine = TakerSwapStateMachine; @@ -1993,7 +2003,7 @@ for MakerPaymentConfirmed reason: TakerFundingRefundReason::FailedToSendTakerPayment(format!("{:?}", e)), }; return Self::change_state(next_state, state_machine).await; - } + }, }; info!( @@ -2015,7 +2025,7 @@ for MakerPaymentConfirmed } impl StorableState -for MakerPaymentConfirmed + for MakerPaymentConfirmed { type StateMachine = TakerSwapStateMachine; @@ -2050,12 +2060,13 @@ struct TakerPaymentSpent -TransitionFrom> for TakerPaymentSpent -{} + TransitionFrom> for TakerPaymentSpent +{ +} #[async_trait] impl State -for TakerPaymentSpent + for TakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2075,7 +2086,7 @@ for TakerPaymentSpent Err(e) => { let reason = AbortReason::CouldNotExtractSecret(e); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; let args = SpendMakerPaymentArgs { @@ -2092,7 +2103,7 @@ for TakerPaymentSpent Err(e) => { let reason = AbortReason::FailedToSpendMakerPayment(format!("{:?}", e)); return Self::change_state(Aborted::new(reason), state_machine).await; - } + }, }; info!( "Spent maker payment {} tx {:02x} during swap {}", @@ -2113,7 +2124,7 @@ for TakerPaymentSpent } impl StorableState -for TakerPaymentSpent + for TakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2148,11 +2159,12 @@ struct MakerPaymentSpent -TransitionFrom> for MakerPaymentSpent -{} + TransitionFrom> for MakerPaymentSpent +{ +} impl StorableState -for MakerPaymentSpent + for MakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2182,7 +2194,7 @@ for MakerPaymentSpent #[async_trait] impl State -for MakerPaymentSpent + for MakerPaymentSpent { type StateMachine = TakerSwapStateMachine; @@ -2232,7 +2244,7 @@ impl Aborted { #[async_trait] impl LastState -for Aborted + for Aborted { type StateMachine = TakerSwapStateMachine; @@ -2245,7 +2257,7 @@ for Aborted } impl StorableState -for Aborted + for Aborted { type StateMachine = TakerSwapStateMachine; @@ -2261,20 +2273,24 @@ impl TransitionFrom> for impl TransitionFrom> for Aborted {} impl TransitionFrom> -for Aborted -{} + for Aborted +{ +} impl -TransitionFrom> for Aborted -{} + TransitionFrom> for Aborted +{ +} impl -TransitionFrom> for Aborted -{} + TransitionFrom> for Aborted +{ +} impl -TransitionFrom> for Aborted -{} + TransitionFrom> for Aborted +{ +} struct Completed { maker_coin: PhantomData, @@ -2291,7 +2307,7 @@ impl Completed { } impl StorableState -for Completed + for Completed { type StateMachine = TakerSwapStateMachine; @@ -2300,7 +2316,7 @@ for Completed #[async_trait] impl LastState -for Completed + for Completed { type StateMachine = TakerSwapStateMachine; @@ -2313,8 +2329,9 @@ for Completed } impl -TransitionFrom> for Completed -{} + TransitionFrom> for Completed +{ +} struct TakerFundingRefunded { maker_coin: PhantomData, @@ -2324,7 +2341,7 @@ struct TakerFundingRefunded StorableState -for TakerFundingRefunded + for TakerFundingRefunded { type StateMachine = TakerSwapStateMachine; @@ -2345,7 +2362,7 @@ for TakerFundingRefunded #[async_trait] impl LastState -for TakerFundingRefunded + for TakerFundingRefunded { type StateMachine = TakerSwapStateMachine; @@ -2361,8 +2378,9 @@ for TakerFundingRefunded } impl -TransitionFrom> for TakerFundingRefunded -{} + TransitionFrom> for TakerFundingRefunded +{ +} struct TakerPaymentRefunded { maker_coin: PhantomData, @@ -2372,7 +2390,7 @@ struct TakerPaymentRefunded StorableState -for TakerPaymentRefunded + for TakerPaymentRefunded { type StateMachine = TakerSwapStateMachine; @@ -2390,7 +2408,7 @@ for TakerPaymentRefunded #[async_trait] impl LastState -for TakerPaymentRefunded + for TakerPaymentRefunded { type StateMachine = TakerSwapStateMachine; @@ -2406,5 +2424,6 @@ for TakerPaymentRefunded } impl -TransitionFrom> for TakerPaymentRefunded -{} + TransitionFrom> for TakerPaymentRefunded +{ +} From 6d51390fbdf4ccd3dff35a35a3d1cb9505c84fb2 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 6 May 2024 17:35:52 +0100 Subject: [PATCH 092/186] sql_connection_pool --- .../coins/hd_wallet/storage/sqlite_storage.rs | 8 +- mm2src/coins/lightning/ln_utils.rs | 1 + mm2src/coins/nft/nft_structs.rs | 4 +- mm2src/common/common.rs | 2 +- mm2src/mm2_core/src/lib.rs | 1 + mm2src/mm2_core/src/mm_ctx.rs | 123 +++--------- mm2src/mm2_core/src/sql_connection_pool.rs | 146 ++++++++++++++ mm2src/mm2_main/src/lp_native_dex.rs | 23 ++- mm2src/mm2_main/src/lp_swap.rs | 181 +++++++++--------- mm2src/mm2_main/src/ordermatch_tests.rs | 7 +- mm2src/mm2_test_helpers/src/for_tests.rs | 14 +- mm2src/mm2_test_helpers/src/lib.rs | 3 +- 12 files changed, 291 insertions(+), 222 deletions(-) create mode 100644 mm2src/mm2_core/src/sql_connection_pool.rs diff --git a/mm2src/coins/hd_wallet/storage/sqlite_storage.rs b/mm2src/coins/hd_wallet/storage/sqlite_storage.rs index 898f4c8823..a469ab5f18 100644 --- a/mm2src/coins/hd_wallet/storage/sqlite_storage.rs +++ b/mm2src/coins/hd_wallet/storage/sqlite_storage.rs @@ -101,11 +101,11 @@ impl HDWalletStorageInternalOps for HDWalletSqliteStorage { where Self: Sized, { - let shared = ctx.shared_sqlite_conn.as_option().or_mm_err(|| { + let shared = ctx.shared_sqlite_conn_opt(None).or_mm_err(|| { HDWalletStorageError::Internal("'MmCtx::shared_sqlite_conn' is not initialized".to_owned()) })?; let storage = HDWalletSqliteStorage { - conn: SqliteConnShared::downgrade(shared), + conn: SqliteConnShared::downgrade(&shared), }; storage.init_tables().await?; Ok(storage) @@ -279,7 +279,9 @@ pub(crate) async fn get_all_storage_items(ctx: &MmArc) -> Vec| HDAccountStorageItem::try_from(row)) diff --git a/mm2src/coins/lightning/ln_utils.rs b/mm2src/coins/lightning/ln_utils.rs index 65640ff7db..94c86f76a2 100644 --- a/mm2src/coins/lightning/ln_utils.rs +++ b/mm2src/coins/lightning/ln_utils.rs @@ -69,6 +69,7 @@ pub async fn init_persister( } pub async fn init_db(ctx: &MmArc, ticker: String) -> EnableLightningResult { + // TODO db_id let shared = ctx.sqlite_conn_opt(None).or_mm_err(|| { EnableLightningError::DbError("'MmCtx::sqlite_connection' is not found or initialized".to_owned()) })?; diff --git a/mm2src/coins/nft/nft_structs.rs b/mm2src/coins/nft/nft_structs.rs index e03804aeb7..e0ff2881eb 100644 --- a/mm2src/coins/nft/nft_structs.rs +++ b/mm2src/coins/nft/nft_structs.rs @@ -25,7 +25,7 @@ cfg_native! { use db_common::async_sql_conn::AsyncConnection; use db_common::AsyncConnectionCtx; use futures::lock::Mutex as AsyncMutex; - use mm2_core::mm_ctx::{log_sqlite_file_open_attempt, ASYNC_SQLITE_DB_ID}; + use mm2_core::mm_ctx::log_sqlite_file_open_attempt; } cfg_wasm32! { @@ -777,7 +777,7 @@ impl NftCtx { } // else create and return new connection. - let sqlite_file_path = self.ctx.dbdir(Some(&db_id)).join(ASYNC_SQLITE_DB_ID); + let sqlite_file_path = self.ctx.dbdir(Some(&db_id)).join("KOMODEFI.db"); log_sqlite_file_open_attempt(&sqlite_file_path); let async_conn = AsyncConnection::open(sqlite_file_path) .await diff --git a/mm2src/common/common.rs b/mm2src/common/common.rs index ce01798398..febc06a0bb 100644 --- a/mm2src/common/common.rs +++ b/mm2src/common/common.rs @@ -970,7 +970,7 @@ fn test_is_acceptable_input_on_repeated_characters() { assert!(is_acceptable_input_on_repeated_characters("SuperStrongPassword123*", 3)); assert!(!is_acceptable_input_on_repeated_characters( "SuperStrongaaaPassword123*", - 3 + 3, )); } diff --git a/mm2src/mm2_core/src/lib.rs b/mm2src/mm2_core/src/lib.rs index 3eb5ecc6ae..db09c4fe55 100644 --- a/mm2src/mm2_core/src/lib.rs +++ b/mm2src/mm2_core/src/lib.rs @@ -3,6 +3,7 @@ use rand::{thread_rng, Rng}; pub mod event_dispatcher; pub mod mm_ctx; +#[cfg(not(target_arch = "wasm32"))] pub mod sql_connection_pool; #[derive(Clone, Copy, Display, PartialEq)] pub enum DbNamespaceId { diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index f8be7d0cc6..c27f4a4ec3 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -1,9 +1,11 @@ +use crate::sql_connection_pool::{SqliteConnPool, ASYNC_SQLITE_DB_ID}; #[cfg(feature = "track-ctx-pointer")] use common::executor::Timer; use common::executor::{abortable_queue::{AbortableQueue, WeakSpawner}, graceful_shutdown, AbortSettings, AbortableSystem, SpawnAbortable, SpawnFuture}; use common::log::{self, LogLevel, LogOnError, LogState}; use common::{cfg_native, cfg_wasm32, small_rng}; +use db_common::sqlite::rusqlite::Connection; use gstuff::{try_s, Constructible, ERR, ERRL}; use lazy_static::lazy_static; use mm2_event_stream::{controller::Controller, Event, EventStreamConfiguration}; @@ -28,23 +30,16 @@ cfg_wasm32! { cfg_native! { use db_common::AsyncConnectionCtx; use db_common::async_sql_conn::AsyncConnection; - use db_common::sqlite::rusqlite::Connection; use futures::lock::Mutex as AsyncMutex; use rustls::ServerName; use mm2_metrics::prometheus; use mm2_metrics::MmMetricsError; use std::net::{IpAddr, SocketAddr, AddrParseError}; use std::path::{Path, PathBuf}; - use std::sync::MutexGuard; } /// Default interval to export and record metrics to log. const EXPORT_METRICS_INTERVAL: f64 = 5. * 60.; -pub const ASYNC_SQLITE_DB_ID: &str = "KOMODEFI.db"; -pub const SYNC_SQLITE_DB_ID: &str = "MM2.db"; - -#[cfg(not(target_arch = "wasm32"))] -pub type SyncSqliteConnectionArc = Arc>; /// MarketMaker state, shared between the various MarketMaker threads. /// @@ -65,6 +60,7 @@ pub type SyncSqliteConnectionArc = Arc>; /// Only the pointers (`MmArc`, `MmWeak`) can be moved around. /// /// Threads only have the non-`mut` access to `MmCtx`, allowing us to directly share certain fields. +#[allow(clippy::type_complexity)] pub struct MmCtx { /// MM command-line configuration. pub conf: Json, @@ -122,12 +118,9 @@ pub struct MmCtx { /// The RPC sender forwarding requests to writing part of underlying stream. #[cfg(target_arch = "wasm32")] pub wasm_rpc: Constructible, - /// Deprecated, please use `async_sqlite_connection` for new implementations. + /// Deprecated, please create `shared_async_sqlite_conn` for new implementations and call db `KOMODEFI-shared.db` for shared_db. #[cfg(not(target_arch = "wasm32"))] - pub sqlite_connection: Constructible>>>, - /// Deprecated, please create `shared_async_sqlite_conn` for new implementations and call db `KOMODEFI-shared.db`. - #[cfg(not(target_arch = "wasm32"))] - pub shared_sqlite_conn: Constructible>>, + pub sqlite_conn_pool: Constructible, /// asynchronous handle for rusqlite connection. #[cfg(not(target_arch = "wasm32"))] pub async_sqlite_connection: Constructible>>, @@ -181,9 +174,7 @@ impl MmCtx { #[cfg(target_arch = "wasm32")] wasm_rpc: Constructible::default(), #[cfg(not(target_arch = "wasm32"))] - sqlite_connection: Constructible::default(), - #[cfg(not(target_arch = "wasm32"))] - shared_sqlite_conn: Constructible::default(), + sqlite_conn_pool: Constructible::default(), #[cfg(not(target_arch = "wasm32"))] async_sqlite_connection: Constructible::default(), mm_version: "".into(), @@ -322,8 +313,10 @@ impl MmCtx { /// /// No checks in this method, the paths should be checked in the `fn fix_directories` instead. #[cfg(not(target_arch = "wasm32"))] - pub fn shared_dbdir(&self) -> PathBuf { - let db_id = hex::encode(self.shared_db_id().as_slice()); + pub fn shared_dbdir(&self, db_id: Option<&str>) -> PathBuf { + let db_id = db_id + .map(|d| d.to_owned()) + .unwrap_or_else(|| hex::encode(self.shared_db_id().as_slice())); path_to_dbdir(self.conf["dbdir"].as_str(), &db_id) } @@ -356,29 +349,6 @@ impl MmCtx { pub fn mm_version(&self) -> &str { &self.mm_version } - #[cfg(not(target_arch = "wasm32"))] - pub fn init_sqlite_connection(&self, db_id: Option<&str>) -> Result<(), String> { - let sqlite_file_path = self.dbdir(db_id).join(SYNC_SQLITE_DB_ID); - log_sqlite_file_open_attempt(&sqlite_file_path); - - let connection = try_s!(Connection::open(sqlite_file_path)); - let mut store = HashMap::new(); - store.insert(self.rmd160_hex(), Arc::new(Mutex::new(connection))); - try_s!(self.sqlite_connection.pin(Arc::new(Mutex::new(store)))); - - Ok(()) - } - - #[cfg(not(target_arch = "wasm32"))] - pub fn init_shared_sqlite_conn(&self) -> Result<(), String> { - let sqlite_file_path = self.shared_dbdir().join("MM2-shared.db"); - log_sqlite_file_open_attempt(&sqlite_file_path); - let connection = try_s!(Connection::open(sqlite_file_path)); - try_s!(self.shared_sqlite_conn.pin(Arc::new(Mutex::new(connection)))); - - Ok(()) - } - #[cfg(not(target_arch = "wasm32"))] pub async fn init_async_sqlite_connection(&self, db_id: Option<&str>) -> Result<(), String> { let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex()); @@ -397,73 +367,34 @@ impl MmCtx { } #[cfg(not(target_arch = "wasm32"))] - pub fn sqlite_conn_opt(&self, db_id: Option<&str>) -> Option { - if let Some(connections) = self.sqlite_connection.as_option() { - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex()); - let connections = connections.lock().unwrap(); - return if let Some(connection) = connections.get(&db_id) { - Some(connection.clone()) - } else { - let sqlite_file_path = self.dbdir(Some(&db_id)).join(SYNC_SQLITE_DB_ID); - log_sqlite_file_open_attempt(&sqlite_file_path); - - let connection = Arc::new(Mutex::new( - Connection::open(sqlite_file_path).expect("failed to open db"), - )); - let mut store = HashMap::new(); - store.insert(db_id, connection.clone()); - drop(connections); - - // TODO: run migration and fix directions - Some(connection) - }; - }; - - None + pub fn sqlite_conn_pool(&self) -> SqliteConnPool { + self.sqlite_conn_pool + .or(&|| panic!("sqlite_connection is not initialized")) + .clone() } #[cfg(not(target_arch = "wasm32"))] - pub fn sqlite_connection(&self, db_id: Option<&str>) -> SyncSqliteConnectionArc { - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex()); - let connections = self - .sqlite_connection - .or(&|| panic!("sqlite_connection is not initialized")) - .lock() - .unwrap(); - return if let Some(connection) = connections.get(&db_id) { - connection.clone() - } else { - let sqlite_file_path = self.dbdir(Some(&db_id)).join(SYNC_SQLITE_DB_ID); - log_sqlite_file_open_attempt(&sqlite_file_path); - - let connection = Arc::new(Mutex::new( - Connection::open(sqlite_file_path).expect("failed to open db"), - )); - let mut store = HashMap::new(); - store.insert(db_id, connection.clone()); - drop(connections); - // TODO: run migration and fix directions - connection + pub fn shared_sqlite_conn_opt(&self, db_id: Option<&str>) -> Option>> { + if let Some(pool) = self.sqlite_conn_pool.as_option().cloned() { + return pool.shared_sqlite_conn_opt(self, db_id); }; + + None } #[cfg(not(target_arch = "wasm32"))] - pub fn init_sqlite_connection_for_test(&self, db_id: Option<&str>) -> Result { - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex()); - let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); - let mut store = HashMap::new(); - store.insert(db_id, connection.clone()); - try_s!(self.sqlite_connection.pin(Arc::new(Mutex::new(store)))); + pub fn sqlite_conn_opt(&self, db_id: Option<&str>) -> Option>> { + if let Some(pool) = self.sqlite_conn_pool.as_option().cloned() { + return pool.sqlite_conn_opt(self, db_id); + }; - Ok(connection) + None } #[cfg(not(target_arch = "wasm32"))] - pub fn shared_sqlite_conn(&self) -> MutexGuard { - self.shared_sqlite_conn - .or(&|| panic!("shared_sqlite_conn is not initialized")) - .lock() - .unwrap() + pub fn sqlite_connection(&self, db_id: Option<&str>) -> Arc> { + let pool = self.sqlite_conn_pool(); + pool.sqlite_conn(self, db_id) } } diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs new file mode 100644 index 0000000000..63a5273fa3 --- /dev/null +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -0,0 +1,146 @@ +use crate::mm_ctx::{log_sqlite_file_open_attempt, MmCtx}; +use db_common::sqlite::rusqlite::Connection; +use gstuff::try_s; +use std::collections::HashMap; +use std::sync::{Arc, Mutex}; + +pub const ASYNC_SQLITE_DB_ID: &str = "KOMODEFI.db"; +const SYNC_SQLITE_DB_ID: &str = "MM2.db"; + +enum DbIdConnKind { + Shared, + Single, +} + +#[derive(Clone)] +pub struct SqliteConnPool(Arc>>>>); + +impl SqliteConnPool { + pub fn init(ctx: &MmCtx, db_id: Option<&str>) -> Result<(), String> { + Self::init_impl(ctx, db_id, DbIdConnKind::Single) + } + + pub fn init_shared(ctx: &MmCtx, db_id: Option<&str>) -> Result<(), String> { + Self::init_impl(ctx, db_id, DbIdConnKind::Shared) + } + + fn init_impl(ctx: &MmCtx, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind) -> Result<(), String> { + let db_id = Self::db_id(ctx, db_id, &db_id_conn_kind); + + match ctx.sqlite_conn_pool.as_option() { + // if connection pool is not already initialized, create new connection pool. + None => { + let conn = Self::open_connection(ctx, &db_id, &db_id_conn_kind); + let store = Arc::new(Mutex::new(HashMap::from([(db_id, conn)]))); + try_s!(ctx.sqlite_conn_pool.pin(Self(store))); + }, + // if connection pool is already initialized, insert new connection. + Some(pool) => { + let conn = Self::open_connection(ctx, &db_id, &db_id_conn_kind); + let mut pool = pool.0.lock().unwrap(); + pool.insert(db_id, conn); + }, + }; + + Ok(()) + } + + pub fn init_test(ctx: &MmCtx) -> Result<(), String> { Self::init_impl_test(ctx, None, DbIdConnKind::Single) } + + pub fn init_shared_test(ctx: &MmCtx) -> Result<(), String> { Self::init_impl_test(ctx, None, DbIdConnKind::Shared) } + + fn init_impl_test(ctx: &MmCtx, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind) -> Result<(), String> { + let db_id = Self::db_id(ctx, db_id, &db_id_conn_kind); + + match ctx.sqlite_conn_pool.as_option() { + None => { + let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); + let store = Arc::new(Mutex::new(HashMap::from([(db_id, connection)]))); + try_s!(ctx.sqlite_conn_pool.pin(Self(store))); + }, + Some(pool) => { + let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); + let mut pool = pool.0.lock().unwrap(); + pool.insert(db_id, connection); + }, + } + Ok(()) + } + + pub fn sqlite_conn(&self, ctx: &MmCtx, db_id: Option<&str>) -> Arc> { + Self::sqlite_conn_impl(ctx, db_id, DbIdConnKind::Single) + } + + pub fn sqlite_conn_shared(&self, ctx: &MmCtx, db_id: Option<&str>) -> Arc> { + Self::sqlite_conn_impl(ctx, db_id, DbIdConnKind::Shared) + } + + pub fn sqlite_conn_opt(&self, ctx: &MmCtx, db_id: Option<&str>) -> Option>> { + Self::sqlite_conn_opt_impl(ctx, db_id, DbIdConnKind::Single) + } + + pub fn shared_sqlite_conn_opt(&self, ctx: &MmCtx, db_id: Option<&str>) -> Option>> { + Self::sqlite_conn_opt_impl(ctx, db_id, DbIdConnKind::Shared) + } + + fn sqlite_conn_opt_impl( + ctx: &MmCtx, + db_id: Option<&str>, + db_id_conn_kind: DbIdConnKind, + ) -> Option>> { + if let Some(connections) = ctx.sqlite_conn_pool.as_option() { + let db_id = Self::db_id(ctx, db_id, &db_id_conn_kind); + let mut connections = connections.0.lock().unwrap(); + return if let Some(connection) = connections.get(&db_id) { + Some(connection.clone()) + } else { + let conn = Self::open_connection(ctx, &db_id, &db_id_conn_kind); + connections.insert(db_id, conn.clone()); + // TODO: run migration and fix directions + Some(conn) + }; + }; + + None + } + + fn sqlite_conn_impl(ctx: &MmCtx, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind) -> Arc> { + let mut connections = ctx + .sqlite_conn_pool + .or(&|| panic!("sqlite_conn_pool is not initialized")) + .0 + .lock() + .unwrap(); + + let db_id = Self::db_id(ctx, db_id, &db_id_conn_kind); + return if let Some(connection) = connections.get(&db_id) { + connection.clone() + } else { + let conn = Self::open_connection(ctx, &db_id, &db_id_conn_kind); + connections.insert(db_id, conn.clone()); + // TODO: run migration and fix directions + conn + }; + } + + fn db_id(ctx: &MmCtx, db_id: Option<&str>, db_id_conn_kind: &DbIdConnKind) -> String { + let db_id_default = match db_id_conn_kind { + DbIdConnKind::Shared => hex::encode(ctx.shared_db_id().as_slice()), + DbIdConnKind::Single => ctx.rmd160_hex(), + }; + + db_id.map(|e| e.to_owned()).unwrap_or_else(|| db_id_default) + } + + fn open_connection(ctx: &MmCtx, db_id: &str, db_id_conn_kind: &DbIdConnKind) -> Arc> { + let sqlite_file_path = match db_id_conn_kind { + DbIdConnKind::Shared => ctx.shared_dbdir(Some(db_id)).join("MM2-shared.db"), + DbIdConnKind::Single => ctx.dbdir(Some(db_id)).join(SYNC_SQLITE_DB_ID), + }; + + log_sqlite_file_open_attempt(&sqlite_file_path); + Arc::new(Mutex::new( + Connection::open(sqlite_file_path).expect("failed to open db"), + )) + } +} diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 18344e3916..817bc53ee1 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -26,6 +26,7 @@ use crypto::{from_hw_error, CryptoCtx, HwError, HwProcessingError, HwRpcError, W use derive_more::Display; use enum_derives::EnumFromTrait; use mm2_core::mm_ctx::{MmArc, MmCtx}; +use mm2_core::sql_connection_pool::SqliteConnPool; use mm2_err_handle::common_errors::InternalError; use mm2_err_handle::prelude::*; use mm2_event_stream::behaviour::{EventBehaviour, EventInitStatus}; @@ -332,8 +333,8 @@ fn default_seednodes(netid: u16) -> Vec { } #[cfg(not(target_arch = "wasm32"))] -pub fn fix_directories(ctx: &MmCtx, db_id: Option<&str>) -> MmInitResult<()> { - fix_shared_dbdir(ctx)?; +pub fn fix_directories(ctx: &MmCtx, db_id: Option<&str>, shared_db_id: Option<&str>) -> MmInitResult<()> { + fix_shared_dbdir(ctx, shared_db_id)?; let dbdir = ctx.dbdir(db_id); fs::create_dir_all(&dbdir).map_to_mm(|e| MmInitError::ErrorCreatingDbDir { @@ -393,8 +394,8 @@ pub fn fix_directories(ctx: &MmCtx, db_id: Option<&str>) -> MmInitResult<()> { } #[cfg(not(target_arch = "wasm32"))] -fn fix_shared_dbdir(ctx: &MmCtx) -> MmInitResult<()> { - let shared_db = ctx.shared_dbdir(); +fn fix_shared_dbdir(ctx: &MmCtx, db_id: Option<&str>) -> MmInitResult<()> { + let shared_db = ctx.shared_dbdir(db_id); fs::create_dir_all(&shared_db).map_to_mm(|e| MmInitError::ErrorCreatingDbDir { path: shared_db.clone(), error: e.to_string(), @@ -463,11 +464,9 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { #[cfg(not(target_arch = "wasm32"))] { - fix_directories(&ctx, None)?; - ctx.init_sqlite_connection(None) - .map_to_mm(MmInitError::ErrorSqliteInitializing)?; - ctx.init_shared_sqlite_conn() - .map_to_mm(MmInitError::ErrorSqliteInitializing)?; + fix_directories(&ctx, None, None)?; + SqliteConnPool::init(&ctx, None).map_to_mm(MmInitError::ErrorSqliteInitializing)?; + SqliteConnPool::init_shared(&ctx, None).map_to_mm(MmInitError::ErrorSqliteInitializing)?; ctx.init_async_sqlite_connection(None) .await .map_to_mm(MmInitError::ErrorSqliteInitializing)?; @@ -484,7 +483,7 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { // launch kickstart threads before RPC is available, this will prevent the API user to place // an order and start new swap that might get started 2 times because of kick-start - kick_start(ctx.clone()).await?; + kick_start(ctx.clone(), None).await?; init_event_streaming(&ctx).await?; @@ -543,8 +542,8 @@ pub async fn lp_init(ctx: MmArc, version: String, datetime: String) -> MmInitRes Ok(()) } -async fn kick_start(ctx: MmArc) -> MmInitResult<()> { - let mut coins_needed_for_kick_start = swap_kick_starts(ctx.clone()) +async fn kick_start(ctx: MmArc, db_id: Option<&str>) -> MmInitResult<()> { + let mut coins_needed_for_kick_start = swap_kick_starts(ctx.clone(), db_id) .await .map_to_mm(MmInitError::SwapsKickStartError)?; coins_needed_for_kick_start.extend( diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 888df21db0..ec2867a87e 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -1399,100 +1399,94 @@ pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result Result, String> { - let db_ids = try_s!(find_unique_account_ids_active(&ctx).await); +pub async fn swap_kick_starts(ctx: MmArc, db_id: Option<&str>) -> Result, String> { let mut coins = HashSet::new(); + #[cfg(target_arch = "wasm32")] + try_s!(migrate_swaps_data(&ctx, db_id).await); - for db_id in db_ids { - let db_id = Some(db_id); - #[cfg(target_arch = "wasm32")] - try_s!(migrate_swaps_data(&ctx, db_id.as_deref()).await); - - let legacy_unfinished_uuids = - try_s!(get_unfinished_swaps_uuids(ctx.clone(), LEGACY_SWAP_TYPE, db_id.as_deref()).await); - for uuid in legacy_unfinished_uuids { - let swap = match SavedSwap::load_my_swap_from_db(&ctx, db_id.as_deref(), uuid).await { - Ok(Some(s)) => s, - Ok(None) => { - warn!("Swap {} is indexed, but doesn't exist in DB", uuid); - continue; - }, - Err(e) => { - error!("Error {} on getting swap {} data from DB", e, uuid); - continue; - }, - }; - info!("Kick starting the swap {}", swap.uuid()); - let maker_coin_ticker = match swap.maker_coin_ticker() { - Ok(t) => t, - Err(e) => { - error!("Error {} getting maker coin of swap: {}", e, swap.uuid()); - continue; - }, - }; - let taker_coin_ticker = match swap.taker_coin_ticker() { - Ok(t) => t, - Err(e) => { - error!("Error {} getting taker coin of swap {}", e, swap.uuid()); - continue; - }, - }; - coins.insert(maker_coin_ticker.clone()); - coins.insert(taker_coin_ticker.clone()); + let legacy_unfinished_uuids = try_s!(get_unfinished_swaps_uuids(ctx.clone(), LEGACY_SWAP_TYPE, db_id).await); + for uuid in legacy_unfinished_uuids { + let swap = match SavedSwap::load_my_swap_from_db(&ctx, db_id, uuid).await { + Ok(Some(s)) => s, + Ok(None) => { + warn!("Swap {} is indexed, but doesn't exist in DB", uuid); + continue; + }, + Err(e) => { + error!("Error {} on getting swap {} data from DB", e, uuid); + continue; + }, + }; + info!("Kick starting the swap {}", swap.uuid()); + let maker_coin_ticker = match swap.maker_coin_ticker() { + Ok(t) => t, + Err(e) => { + error!("Error {} getting maker coin of swap: {}", e, swap.uuid()); + continue; + }, + }; + let taker_coin_ticker = match swap.taker_coin_ticker() { + Ok(t) => t, + Err(e) => { + error!("Error {} getting taker coin of swap {}", e, swap.uuid()); + continue; + }, + }; + coins.insert(maker_coin_ticker.clone()); + coins.insert(taker_coin_ticker.clone()); - let fut = kickstart_thread_handler(ctx.clone(), swap, maker_coin_ticker, taker_coin_ticker); - ctx.spawner().spawn(fut); - } + let fut = kickstart_thread_handler(ctx.clone(), swap, maker_coin_ticker, taker_coin_ticker); + ctx.spawner().spawn(fut); + } - let maker_swap_storage = MakerSwapStorage::new(ctx.clone(), db_id.as_deref()); - let unfinished_maker_uuids = try_s!(maker_swap_storage.get_unfinished().await); - for maker_uuid in unfinished_maker_uuids { - info!("Trying to kickstart maker swap {}", maker_uuid); - let maker_swap_repr = match maker_swap_storage.get_repr(maker_uuid).await { - Ok(repr) => repr, - Err(e) => { - error!("Error {} getting DB repr of maker swap {}", e, maker_uuid); - continue; - }, - }; - debug!("Got maker swap repr {:?}", maker_swap_repr); - - coins.insert(maker_swap_repr.maker_coin.clone()); - coins.insert(maker_swap_repr.taker_coin.clone()); - - let fut = swap_kickstart_handler::>( - ctx.clone(), - maker_swap_repr, - maker_swap_storage.clone(), - maker_uuid, - ); - ctx.spawner().spawn(fut); - } + let maker_swap_storage = MakerSwapStorage::new(ctx.clone(), db_id); + let unfinished_maker_uuids = try_s!(maker_swap_storage.get_unfinished().await); + for maker_uuid in unfinished_maker_uuids { + info!("Trying to kickstart maker swap {}", maker_uuid); + let maker_swap_repr = match maker_swap_storage.get_repr(maker_uuid).await { + Ok(repr) => repr, + Err(e) => { + error!("Error {} getting DB repr of maker swap {}", e, maker_uuid); + continue; + }, + }; + debug!("Got maker swap repr {:?}", maker_swap_repr); - let taker_swap_storage = TakerSwapStorage::new(ctx.clone(), db_id.as_deref()); - let unfinished_taker_uuids = try_s!(taker_swap_storage.get_unfinished().await); - for taker_uuid in unfinished_taker_uuids { - info!("Trying to kickstart taker swap {}", taker_uuid); - let taker_swap_repr = match taker_swap_storage.get_repr(taker_uuid).await { - Ok(repr) => repr, - Err(e) => { - error!("Error {} getting DB repr of taker swap {}", e, taker_uuid); - continue; - }, - }; - debug!("Got taker swap repr {:?}", taker_swap_repr); - - coins.insert(taker_swap_repr.maker_coin.clone()); - coins.insert(taker_swap_repr.taker_coin.clone()); - - let fut = swap_kickstart_handler::>( - ctx.clone(), - taker_swap_repr, - taker_swap_storage.clone(), - taker_uuid, - ); - ctx.spawner().spawn(fut); - } + coins.insert(maker_swap_repr.maker_coin.clone()); + coins.insert(maker_swap_repr.taker_coin.clone()); + + let fut = swap_kickstart_handler::>( + ctx.clone(), + maker_swap_repr, + maker_swap_storage.clone(), + maker_uuid, + ); + ctx.spawner().spawn(fut); + } + + let taker_swap_storage = TakerSwapStorage::new(ctx.clone(), db_id); + let unfinished_taker_uuids = try_s!(taker_swap_storage.get_unfinished().await); + for taker_uuid in unfinished_taker_uuids { + info!("Trying to kickstart taker swap {}", taker_uuid); + let taker_swap_repr = match taker_swap_storage.get_repr(taker_uuid).await { + Ok(repr) => repr, + Err(e) => { + error!("Error {} getting DB repr of taker swap {}", e, taker_uuid); + continue; + }, + }; + debug!("Got taker swap repr {:?}", taker_swap_repr); + + coins.insert(taker_swap_repr.maker_coin.clone()); + coins.insert(taker_swap_repr.taker_coin.clone()); + + let fut = swap_kickstart_handler::>( + ctx.clone(), + taker_swap_repr, + taker_swap_storage.clone(), + taker_uuid, + ); + ctx.spawner().spawn(fut); } Ok(coins) @@ -1899,6 +1893,7 @@ mod lp_swap_tests { use coins::PrivKeyActivationPolicy; use common::{block_on, new_uuid}; use mm2_core::mm_ctx::MmCtxBuilder; + use mm2_core::sql_connection_pool::SqliteConnPool; use mm2_test_helpers::for_tests::{morty_conf, rick_conf, MORTY_ELECTRUM_ADDRS, RICK_ELECTRUM_ADDRS}; #[test] @@ -2322,9 +2317,9 @@ mod lp_swap_tests { .unwrap() .mm2_internal_key_pair(); - fix_directories(&maker_ctx, None).unwrap(); + fix_directories(&maker_ctx, None, None).unwrap(); block_on(init_p2p(maker_ctx.clone())).unwrap(); - maker_ctx.init_sqlite_connection(None).unwrap(); + SqliteConnPool::init_test(&maker_ctx).unwrap(); let rick_activation_params = utxo_activation_params(RICK_ELECTRUM_ADDRS); let morty_activation_params = utxo_activation_params(MORTY_ELECTRUM_ADDRS); @@ -2360,9 +2355,9 @@ mod lp_swap_tests { .unwrap() .mm2_internal_key_pair(); - fix_directories(&taker_ctx, None).unwrap(); + fix_directories(&taker_ctx, None, None).unwrap(); block_on(init_p2p(taker_ctx.clone())).unwrap(); - taker_ctx.init_sqlite_connection(None).unwrap(); + SqliteConnPool::init_test(&taker_ctx).unwrap(); let rick_taker = block_on(utxo_standard_coin_with_priv_key( &taker_ctx, diff --git a/mm2src/mm2_main/src/ordermatch_tests.rs b/mm2src/mm2_main/src/ordermatch_tests.rs index 01ebda2871..35bc290692 100644 --- a/mm2src/mm2_main/src/ordermatch_tests.rs +++ b/mm2src/mm2_main/src/ordermatch_tests.rs @@ -5,6 +5,7 @@ use common::{block_on, executor::spawn}; use crypto::privkey::key_pair_from_seed; use futures::{channel::mpsc, StreamExt}; use mm2_core::mm_ctx::{MmArc, MmCtx}; +use mm2_core::sql_connection_pool::SqliteConnPool; use mm2_libp2p::AdexBehaviourCmd; use mm2_libp2p::{decode_message, PeerId}; use mm2_net::p2p::P2PContext; @@ -1066,7 +1067,7 @@ fn test_cancel_by_single_coin() { let ctx = mm_ctx_with_iguana(None); let rx = prepare_for_cancel_by(&ctx); - let _ = ctx.init_sqlite_connection_for_test(None); + SqliteConnPool::init_test(&ctx).unwrap(); delete_my_maker_order.mock_safe(|_, _, _| MockResult::Return(Box::new(futures01::future::ok(())))); delete_my_taker_order.mock_safe(|_, _, _| MockResult::Return(Box::new(futures01::future::ok(())))); @@ -1084,7 +1085,7 @@ fn test_cancel_by_pair() { let ctx = mm_ctx_with_iguana(None); let rx = prepare_for_cancel_by(&ctx); - let _ = ctx.init_sqlite_connection_for_test(None); + SqliteConnPool::init_test(&ctx).unwrap(); delete_my_maker_order.mock_safe(|_, _, _| MockResult::Return(Box::new(futures01::future::ok(())))); delete_my_taker_order.mock_safe(|_, _, _| MockResult::Return(Box::new(futures01::future::ok(())))); @@ -1106,7 +1107,7 @@ fn test_cancel_by_all() { let ctx = mm_ctx_with_iguana(None); let rx = prepare_for_cancel_by(&ctx); - let _ = ctx.init_sqlite_connection_for_test(None); + SqliteConnPool::init_test(&ctx).unwrap(); delete_my_maker_order.mock_safe(|_, _, _| MockResult::Return(Box::new(futures01::future::ok(())))); delete_my_taker_order.mock_safe(|_, _, _| MockResult::Return(Box::new(futures01::future::ok(())))); diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index ddddb3e845..4655b5121e 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -28,6 +28,7 @@ use std::num::NonZeroUsize; use std::process::Child; use std::sync::Mutex; use uuid::Uuid; +use mm2_core::sql_connection_pool::SqliteConnPool; cfg_native! { use common::block_on; @@ -1084,22 +1085,13 @@ pub fn mm_ctx_with_custom_db() -> MmArc { mm_ctx_with_custom_db_with_conf(None) #[cfg(not(target_arch = "wasm32"))] pub fn mm_ctx_with_custom_db_with_conf(conf: Option) -> MmArc { - use db_common::sqlite::rusqlite::Connection; - use std::sync::Arc; - let mut ctx_builder = MmCtxBuilder::new(); if let Some(conf) = conf { ctx_builder = ctx_builder.with_conf(conf); } let ctx = ctx_builder.into_mm_arc(); - - let mut connections = HashMap::new(); - let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); - connections.insert(ctx.rmd160_hex(), connection); - let _ = ctx.sqlite_connection.pin(Arc::new(Mutex::new(connections))); - - let connection = Connection::open_in_memory().unwrap(); - let _ = ctx.shared_sqlite_conn.pin(Arc::new(Mutex::new(connection))); + let _ = SqliteConnPool::init_test(&ctx).unwrap(); + let _ = SqliteConnPool::init_shared_test(&ctx).unwrap(); ctx } diff --git a/mm2src/mm2_test_helpers/src/lib.rs b/mm2src/mm2_test_helpers/src/lib.rs index 92da032370..2a09433eef 100644 --- a/mm2src/mm2_test_helpers/src/lib.rs +++ b/mm2src/mm2_test_helpers/src/lib.rs @@ -1,6 +1,7 @@ #![feature(result_flattening)] -#[macro_use] extern crate serde_derive; +#[macro_use] +extern crate serde_derive; pub mod electrums; pub mod for_tests; From a4c27b99a853a72a6e2ef715e25391cda0065109 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 6 May 2024 17:43:50 +0100 Subject: [PATCH 093/186] fix wasm clippy and add doc comments --- mm2src/mm2_core/src/mm_ctx.rs | 6 +++--- mm2src/mm2_main/src/lp_native_dex.rs | 2 +- mm2src/mm2_test_helpers/src/for_tests.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index c27f4a4ec3..87e15e3af6 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -1,11 +1,9 @@ -use crate::sql_connection_pool::{SqliteConnPool, ASYNC_SQLITE_DB_ID}; #[cfg(feature = "track-ctx-pointer")] use common::executor::Timer; use common::executor::{abortable_queue::{AbortableQueue, WeakSpawner}, graceful_shutdown, AbortSettings, AbortableSystem, SpawnAbortable, SpawnFuture}; use common::log::{self, LogLevel, LogOnError, LogState}; use common::{cfg_native, cfg_wasm32, small_rng}; -use db_common::sqlite::rusqlite::Connection; use gstuff::{try_s, Constructible, ERR, ERRL}; use lazy_static::lazy_static; use mm2_event_stream::{controller::Controller, Event, EventStreamConfiguration}; @@ -28,8 +26,10 @@ cfg_wasm32! { } cfg_native! { -use db_common::AsyncConnectionCtx; + use db_common::AsyncConnectionCtx; use db_common::async_sql_conn::AsyncConnection; + use db_common::sqlite::rusqlite::Connection; + use crate::sql_connection_pool::{SqliteConnPool, ASYNC_SQLITE_DB_ID}; use futures::lock::Mutex as AsyncMutex; use rustls::ServerName; use mm2_metrics::prometheus; diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 817bc53ee1..e6dae2f665 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -26,7 +26,6 @@ use crypto::{from_hw_error, CryptoCtx, HwError, HwProcessingError, HwRpcError, W use derive_more::Display; use enum_derives::EnumFromTrait; use mm2_core::mm_ctx::{MmArc, MmCtx}; -use mm2_core::sql_connection_pool::SqliteConnPool; use mm2_err_handle::common_errors::InternalError; use mm2_err_handle::prelude::*; use mm2_event_stream::behaviour::{EventBehaviour, EventInitStatus}; @@ -62,6 +61,7 @@ cfg_native! { use mm2_io::fs::{ensure_dir_is_writable, ensure_file_is_writable}; use mm2_net::ip_addr::myipaddr; use rustls_pemfile as pemfile; + use mm2_core::sql_connection_pool::SqliteConnPool; } #[path = "lp_init/init_context.rs"] mod init_context; diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index 4655b5121e..d8d191149d 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -28,7 +28,6 @@ use std::num::NonZeroUsize; use std::process::Child; use std::sync::Mutex; use uuid::Uuid; -use mm2_core::sql_connection_pool::SqliteConnPool; cfg_native! { use common::block_on; @@ -42,6 +41,7 @@ cfg_native! { use futures::task::SpawnExt; use http::Request; use regex::Regex; + use mm2_core::sql_connection_pool::SqliteConnPool; use std::fs; use std::io::Write; use std::net::Ipv4Addr; From 1c08ef5ce5a56c336f7bfdab24ca40fb8632098b Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 6 May 2024 17:48:38 +0100 Subject: [PATCH 094/186] more doc comments --- mm2src/mm2_core/src/mm_ctx.rs | 17 +++++++++-------- mm2src/mm2_core/src/sql_connection_pool.rs | 17 ++++++++++++++++- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 87e15e3af6..996056f01a 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -366,13 +366,8 @@ impl MmCtx { Ok(()) } - #[cfg(not(target_arch = "wasm32"))] - pub fn sqlite_conn_pool(&self) -> SqliteConnPool { - self.sqlite_conn_pool - .or(&|| panic!("sqlite_connection is not initialized")) - .clone() - } - + /// Retrieves an optional shared connection from the pool for the specified database ID. + /// Returns `None` if the connection pool is not initialized. #[cfg(not(target_arch = "wasm32"))] pub fn shared_sqlite_conn_opt(&self, db_id: Option<&str>) -> Option>> { if let Some(pool) = self.sqlite_conn_pool.as_option().cloned() { @@ -382,6 +377,8 @@ impl MmCtx { None } + /// Retrieves an optional connection from the pool for the specified database ID. + /// Returns `None` if the connection pool is not initialized. #[cfg(not(target_arch = "wasm32"))] pub fn sqlite_conn_opt(&self, db_id: Option<&str>) -> Option>> { if let Some(pool) = self.sqlite_conn_pool.as_option().cloned() { @@ -391,9 +388,13 @@ impl MmCtx { None } + /// Obtains a connection from the pool for the specified database ID, panicking if the pool is not initialized. #[cfg(not(target_arch = "wasm32"))] pub fn sqlite_connection(&self, db_id: Option<&str>) -> Arc> { - let pool = self.sqlite_conn_pool(); + let pool = self + .sqlite_conn_pool + .or(&|| panic!("sqlite_connection is not initialized")) + .clone(); pool.sqlite_conn(self, db_id) } } diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 63a5273fa3..9a2a8fcbbf 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -7,23 +7,28 @@ use std::sync::{Arc, Mutex}; pub const ASYNC_SQLITE_DB_ID: &str = "KOMODEFI.db"; const SYNC_SQLITE_DB_ID: &str = "MM2.db"; +/// Represents the kind of database connection ID: either shared or single-user. enum DbIdConnKind { Shared, Single, } +/// A pool for managing SQLite connections, where each connection is keyed by a unique string identifier. #[derive(Clone)] pub struct SqliteConnPool(Arc>>>>); impl SqliteConnPool { + /// Initializes a single-user database connection. pub fn init(ctx: &MmCtx, db_id: Option<&str>) -> Result<(), String> { Self::init_impl(ctx, db_id, DbIdConnKind::Single) } + /// Initializes a shared database connection. pub fn init_shared(ctx: &MmCtx, db_id: Option<&str>) -> Result<(), String> { Self::init_impl(ctx, db_id, DbIdConnKind::Shared) } + /// Internal implementation to initialize a database connection. fn init_impl(ctx: &MmCtx, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind) -> Result<(), String> { let db_id = Self::db_id(ctx, db_id, &db_id_conn_kind); @@ -45,10 +50,13 @@ impl SqliteConnPool { Ok(()) } + /// Test method for initializing a single-user database connection in-memory. pub fn init_test(ctx: &MmCtx) -> Result<(), String> { Self::init_impl_test(ctx, None, DbIdConnKind::Single) } + /// Test method for initializing a shared database connection in-memory. pub fn init_shared_test(ctx: &MmCtx) -> Result<(), String> { Self::init_impl_test(ctx, None, DbIdConnKind::Shared) } + /// Internal test implementation to initialize a database connection in-memory. fn init_impl_test(ctx: &MmCtx, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind) -> Result<(), String> { let db_id = Self::db_id(ctx, db_id, &db_id_conn_kind); @@ -67,22 +75,27 @@ impl SqliteConnPool { Ok(()) } + /// Retrieves a single-user connection from the pool. pub fn sqlite_conn(&self, ctx: &MmCtx, db_id: Option<&str>) -> Arc> { Self::sqlite_conn_impl(ctx, db_id, DbIdConnKind::Single) } + /// Retrieves a shared connection from the pool. pub fn sqlite_conn_shared(&self, ctx: &MmCtx, db_id: Option<&str>) -> Arc> { Self::sqlite_conn_impl(ctx, db_id, DbIdConnKind::Shared) } + /// Optionally retrieves a single-user connection from the pool if available. pub fn sqlite_conn_opt(&self, ctx: &MmCtx, db_id: Option<&str>) -> Option>> { Self::sqlite_conn_opt_impl(ctx, db_id, DbIdConnKind::Single) } + /// Optionally retrieves a shared connection from the pool if available. pub fn shared_sqlite_conn_opt(&self, ctx: &MmCtx, db_id: Option<&str>) -> Option>> { Self::sqlite_conn_opt_impl(ctx, db_id, DbIdConnKind::Shared) } + /// Internal implementation to retrieve or create a connection optionally. fn sqlite_conn_opt_impl( ctx: &MmCtx, db_id: Option<&str>, @@ -100,10 +113,10 @@ impl SqliteConnPool { Some(conn) }; }; - None } + /// Internal implementation to retrieve or create a connection. fn sqlite_conn_impl(ctx: &MmCtx, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind) -> Arc> { let mut connections = ctx .sqlite_conn_pool @@ -123,6 +136,7 @@ impl SqliteConnPool { }; } + /// Generates a database ID based on the connection kind and optional database ID. fn db_id(ctx: &MmCtx, db_id: Option<&str>, db_id_conn_kind: &DbIdConnKind) -> String { let db_id_default = match db_id_conn_kind { DbIdConnKind::Shared => hex::encode(ctx.shared_db_id().as_slice()), @@ -132,6 +146,7 @@ impl SqliteConnPool { db_id.map(|e| e.to_owned()).unwrap_or_else(|| db_id_default) } + /// Opens a database connection based on the database ID and connection kind. fn open_connection(ctx: &MmCtx, db_id: &str, db_id_conn_kind: &DbIdConnKind) -> Arc> { let sqlite_file_path = match db_id_conn_kind { DbIdConnKind::Shared => ctx.shared_dbdir(Some(db_id)).join("MM2-shared.db"), From a6d0b51f95ed14193e225d90344af10f8f129f83 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 6 May 2024 18:04:24 +0100 Subject: [PATCH 095/186] add hd wallet todos --- mm2src/coins/eth/v2_activation.rs | 14 +++++----- .../coins/hd_wallet/storage/mock_storage.rs | 2 +- mm2src/coins/hd_wallet/storage/mod.rs | 26 ++++++++++--------- .../coins/hd_wallet/storage/sqlite_storage.rs | 7 +++-- .../coins/hd_wallet/storage/wasm_storage.rs | 3 ++- .../utxo/utxo_builder/utxo_coin_builder.rs | 6 +++-- 6 files changed, 32 insertions(+), 26 deletions(-) diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 5a79e19f9e..4d99dfebf2 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -649,9 +649,10 @@ pub(crate) async fn build_address_and_priv_key_policy( let bip39_secp_priv_key = global_hd_ctx.root_priv_key().clone(); let hd_wallet_rmd160 = *ctx.rmd160(); - let hd_wallet_storage = HDWalletCoinStorage::init_with_rmd160(ctx, ticker.to_string(), hd_wallet_rmd160) - .await - .mm_err(EthActivationV2Error::from)?; + let hd_wallet_storage = + HDWalletCoinStorage::init_with_rmd160(ctx, ticker.to_string(), hd_wallet_rmd160, None) + .await + .mm_err(EthActivationV2Error::from)?; let accounts = load_hd_accounts_from_storage(&hd_wallet_storage, &path_to_coin).await?; let gap_limit = gap_limit.unwrap_or(DEFAULT_GAP_LIMIT); let hd_wallet = EthHDWallet { @@ -685,9 +686,10 @@ pub(crate) async fn build_address_and_priv_key_policy( .hw_ctx() .or_mm_err(|| EthActivationV2Error::HwContextNotInitialized)?; let hd_wallet_rmd160 = hw_ctx.rmd160(); - let hd_wallet_storage = HDWalletCoinStorage::init_with_rmd160(ctx, ticker.to_string(), hd_wallet_rmd160) - .await - .mm_err(EthActivationV2Error::from)?; + let hd_wallet_storage = + HDWalletCoinStorage::init_with_rmd160(ctx, ticker.to_string(), hd_wallet_rmd160, None) + .await + .mm_err(EthActivationV2Error::from)?; let accounts = load_hd_accounts_from_storage(&hd_wallet_storage, &path_to_coin).await?; let gap_limit = gap_limit.unwrap_or(DEFAULT_GAP_LIMIT); let hd_wallet = EthHDWallet { diff --git a/mm2src/coins/hd_wallet/storage/mock_storage.rs b/mm2src/coins/hd_wallet/storage/mock_storage.rs index 8086e58be8..eaf686fc90 100644 --- a/mm2src/coins/hd_wallet/storage/mock_storage.rs +++ b/mm2src/coins/hd_wallet/storage/mock_storage.rs @@ -8,7 +8,7 @@ pub(crate) struct HDWalletMockStorage; #[async_trait] #[cfg_attr(test, mockable)] impl HDWalletStorageInternalOps for HDWalletMockStorage { - async fn init(_ctx: &MmArc) -> HDWalletStorageResult + async fn init(_ctx: &MmArc, _db_id: Option<&str>) -> HDWalletStorageResult where Self: Sized, { diff --git a/mm2src/coins/hd_wallet/storage/mod.rs b/mm2src/coins/hd_wallet/storage/mod.rs index ced30d5bb8..a38551e687 100644 --- a/mm2src/coins/hd_wallet/storage/mod.rs +++ b/mm2src/coins/hd_wallet/storage/mod.rs @@ -14,6 +14,7 @@ use std::ops::Deref; #[cfg(target_arch = "wasm32")] mod wasm_storage; #[cfg(any(test, target_arch = "wasm32"))] mod mock_storage; + #[cfg(any(test, target_arch = "wasm32"))] pub(crate) use mock_storage::HDWalletMockStorage; @@ -84,7 +85,7 @@ pub struct HDAccountStorageItem { #[async_trait] #[cfg_attr(test, mockable)] pub(crate) trait HDWalletStorageInternalOps { - async fn init(ctx: &MmArc) -> HDWalletStorageResult + async fn init(ctx: &MmArc, db_id: Option<&str>) -> HDWalletStorageResult where Self: Sized; @@ -219,8 +220,8 @@ impl Default for HDWalletCoinStorage { } impl HDWalletCoinStorage { - pub async fn init(ctx: &MmArc, coin: String) -> HDWalletStorageResult { - let inner = Box::new(HDWalletStorageInstance::init(ctx).await?); + pub async fn init(ctx: &MmArc, coin: String, db_id: Option<&str>) -> HDWalletStorageResult { + let inner = Box::new(HDWalletStorageInstance::init(ctx, db_id).await?); let crypto_ctx = CryptoCtx::from_ctx(ctx)?; let hd_wallet_rmd160 = crypto_ctx .hw_wallet_rmd160() @@ -236,8 +237,9 @@ impl HDWalletCoinStorage { ctx: &MmArc, coin: String, hd_wallet_rmd160: H160, + db_id: Option<&str>, ) -> HDWalletStorageResult { - let inner = Box::new(HDWalletStorageInstance::init(ctx).await?); + let inner = Box::new(HDWalletStorageInstance::init(ctx, db_id).await?); Ok(HDWalletCoinStorage { coin, hd_wallet_rmd160, @@ -341,13 +343,13 @@ mod tests { let device0_rmd160 = H160::from("0000000000000000000000000000000000000020"); let device1_rmd160 = H160::from("0000000000000000000000000000000000000030"); - let rick_device0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device0_rmd160) + let rick_device0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device0_rmd160, None) .await .expect("!HDWalletCoinStorage::new"); - let rick_device1_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device1_rmd160) + let rick_device1_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device1_rmd160, None) .await .expect("!HDWalletCoinStorage::new"); - let morty_device0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "MORTY".to_owned(), device0_rmd160) + let morty_device0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "MORTY".to_owned(), device0_rmd160, None) .await .expect("!HDWalletCoinStorage::new"); @@ -379,7 +381,7 @@ mod tests { rick_device0_account0.clone(), rick_device0_account1.clone(), rick_device1_account0.clone(), - morty_device0_account0.clone() + morty_device0_account0.clone(), ]); let mut actual = rick_device0_db @@ -433,13 +435,13 @@ mod tests { let device1_rmd160 = H160::from("0000000000000000000000000000000000000020"); let device2_rmd160 = H160::from("0000000000000000000000000000000000000030"); - let wallet0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device0_rmd160) + let wallet0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device0_rmd160, None) .await .expect("!HDWalletCoinStorage::new"); - let wallet1_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device1_rmd160) + let wallet1_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device1_rmd160, None) .await .expect("!HDWalletCoinStorage::new"); - let wallet2_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device2_rmd160) + let wallet2_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device2_rmd160, None) .await .expect("!HDWalletCoinStorage::new"); @@ -492,7 +494,7 @@ mod tests { let ctx = mm_ctx_with_custom_db(); let device_rmd160 = H160::from("0000000000000000000000000000000000000010"); - let db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device_rmd160) + let db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device_rmd160, None) .await .expect("!HDWalletCoinStorage::new"); diff --git a/mm2src/coins/hd_wallet/storage/sqlite_storage.rs b/mm2src/coins/hd_wallet/storage/sqlite_storage.rs index a469ab5f18..911b0f9518 100644 --- a/mm2src/coins/hd_wallet/storage/sqlite_storage.rs +++ b/mm2src/coins/hd_wallet/storage/sqlite_storage.rs @@ -97,11 +97,11 @@ pub(super) struct HDWalletSqliteStorage { #[async_trait] impl HDWalletStorageInternalOps for HDWalletSqliteStorage { - async fn init(ctx: &MmArc) -> HDWalletStorageResult + async fn init(ctx: &MmArc, db_id: Option<&str>) -> HDWalletStorageResult where Self: Sized, { - let shared = ctx.shared_sqlite_conn_opt(None).or_mm_err(|| { + let shared = ctx.shared_sqlite_conn_opt(db_id).or_mm_err(|| { HDWalletStorageError::Internal("'MmCtx::shared_sqlite_conn' is not initialized".to_owned()) })?; let storage = HDWalletSqliteStorage { @@ -279,8 +279,7 @@ pub(crate) async fn get_all_storage_items(ctx: &MmArc) -> Vec HDWalletStorageResult + async fn init(ctx: &MmArc, _db_id: Option<&str>) -> HDWalletStorageResult where Self: Sized, { @@ -271,6 +271,7 @@ impl HDWalletIndexedDbStorage { } async fn lock_db_mutex(db: &SharedDb) -> HDWalletStorageResult> { + // TODO: db_id db.get_or_initialize(None).await.mm_err(HDWalletStorageError::from) } diff --git a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs index 8b523b630f..ae8fde91d9 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs @@ -221,8 +221,9 @@ pub trait UtxoFieldsWithGlobalHDBuilder: UtxoCoinBuilderCommonOps { let address_format = self.address_format()?; let hd_wallet_rmd160 = *self.ctx().rmd160(); + // TODO shared_db_id let hd_wallet_storage = - HDWalletCoinStorage::init_with_rmd160(self.ctx(), self.ticker().to_owned(), hd_wallet_rmd160).await?; + HDWalletCoinStorage::init_with_rmd160(self.ctx(), self.ticker().to_owned(), hd_wallet_rmd160, None).await?; let accounts = load_hd_accounts_from_storage(&hd_wallet_storage, path_to_coin) .await .mm_err(UtxoCoinBuildError::from)?; @@ -357,7 +358,8 @@ pub trait UtxoFieldsWithHardwareWalletBuilder: UtxoCoinBuilderCommonOps { .clone() .or_mm_err(|| UtxoConfError::DerivationPathIsNotSet)?; - let hd_wallet_storage = HDWalletCoinStorage::init(self.ctx(), ticker).await?; + // TODO shared_d_id + let hd_wallet_storage = HDWalletCoinStorage::init(self.ctx(), ticker, None).await?; let accounts = load_hd_accounts_from_storage(&hd_wallet_storage, &path_to_coin) .await From 35b102a961515b4d5dd4e6316459a94b4a46a474 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 6 May 2024 18:26:29 +0100 Subject: [PATCH 096/186] add todos for AccountContext --- mm2src/mm2_gui_storage/src/context.rs | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/mm2src/mm2_gui_storage/src/context.rs b/mm2src/mm2_gui_storage/src/context.rs index 926d94cbdf..fb6203e3c3 100644 --- a/mm2src/mm2_gui_storage/src/context.rs +++ b/mm2src/mm2_gui_storage/src/context.rs @@ -2,6 +2,7 @@ use crate::account::storage::{AccountStorage, AccountStorageBoxed, AccountStorag use mm2_core::mm_ctx::{from_ctx, MmArc}; use std::sync::Arc; +#[allow(unused)] pub(crate) struct AccountContext { storage: AccountStorageBoxed, db_id: Option, @@ -9,30 +10,14 @@ pub(crate) struct AccountContext { impl AccountContext { /// Obtains a reference to this crate context, creating it if necessary. + /// TODO: this is only create/intiliaze once..need to find a way to manage multiple account contexts pub(crate) fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { - let account_context = from_ctx(&ctx.account_ctx, move || { + from_ctx(&ctx.account_ctx, move || { Ok(AccountContext { - storage: AccountStorageBuilder::new(ctx, db_id) - .build() - .map_err(|e| e.to_string())?, + storage: AccountStorageBuilder::new(ctx, db_id).build().map_err(|e| e.to_string())?, db_id: db_id.map(|e| e.to_string()), }) - })?; - - if account_context.db_id.as_deref() != db_id { - let mut ctx_field = ctx.account_ctx.lock().unwrap(); - let account_context = Arc::new(AccountContext { - storage: AccountStorageBuilder::new(ctx, db_id) - .build() - .map_err(|e| e.to_string())?, - db_id: db_id.map(|e| e.to_string()), - }); - *ctx_field = Some(Arc::clone(&account_context) as Arc); - - return Ok(account_context); - }; - - Ok(account_context) + }) } /// Initializes the storage and returns a reference to it. From 474d6415588f9da32df64ed04ad05a3be036f87d Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 6 May 2024 19:48:15 +0100 Subject: [PATCH 097/186] impl AsyncSqliteConnPool --- mm2src/coins/nft/nft_structs.rs | 33 +- mm2src/coins/nft/storage/sql_storage.rs | 1012 ++++++++--------- mm2src/db_common/src/lib.rs | 16 - mm2src/mm2_core/src/mm_ctx.rs | 26 +- mm2src/mm2_core/src/sql_connection_pool.rs | 115 ++ mm2src/mm2_gui_storage/src/context.rs | 4 +- mm2src/mm2_main/src/lp_native_dex.rs | 31 +- .../src/rpc/lp_commands/lp_commands_legacy.rs | 8 +- mm2src/mm2_test_helpers/src/for_tests.rs | 14 +- 9 files changed, 635 insertions(+), 624 deletions(-) diff --git a/mm2src/coins/nft/nft_structs.rs b/mm2src/coins/nft/nft_structs.rs index e0ff2881eb..a0e798c4db 100644 --- a/mm2src/coins/nft/nft_structs.rs +++ b/mm2src/coins/nft/nft_structs.rs @@ -2,6 +2,8 @@ use common::ten; use enum_derives::EnumVariantList; use ethereum_types::Address; use mm2_core::mm_ctx::{from_ctx, MmArc}; +#[cfg(not(target_arch = "wasm32"))] +use mm2_core::sql_connection_pool::AsyncSqliteConnPool; use mm2_err_handle::prelude::*; use mm2_number::{BigDecimal, BigUint}; use rpc::v1::types::Bytes as BytesJson; @@ -21,13 +23,6 @@ use crate::nft::nft_errors::{LockDBError, ParseChainTypeError, ParseContractType use crate::nft::storage::{NftListStorageOps, NftTransferHistoryStorageOps}; use crate::{TransactionType, TxFeeDetails, WithdrawFee}; -cfg_native! { - use db_common::async_sql_conn::AsyncConnection; - use db_common::AsyncConnectionCtx; - use futures::lock::Mutex as AsyncMutex; - use mm2_core::mm_ctx::log_sqlite_file_open_attempt; -} - cfg_wasm32! { use mm2_db::indexed_db::{ConstructibleDb, SharedDb}; use crate::nft::storage::wasm::WasmNftCacheError; @@ -731,7 +726,7 @@ pub(crate) struct NftCtx { #[cfg(target_arch = "wasm32")] pub(crate) nft_cache_db: SharedDb, #[cfg(not(target_arch = "wasm32"))] - pub(crate) nft_cache_dbs: Arc>, + pub(crate) nft_cache_dbs: AsyncSqliteConnPool, #[cfg(not(target_arch = "wasm32"))] ctx: MmArc, } @@ -744,7 +739,7 @@ impl NftCtx { pub(crate) fn from_ctx(ctx: &MmArc) -> Result, String> { Ok(try_s!(from_ctx(&ctx.nft_ctx, move || { let async_sqlite_connection = ctx - .async_sqlite_connection + .async_sqlite_conn_pool .ok_or("async_sqlite_connection is not initialized".to_owned())?; Ok(NftCtx { nft_cache_dbs: async_sqlite_connection.clone(), @@ -768,24 +763,8 @@ impl NftCtx { &self, db_id: Option<&str>, ) -> MmResult { - let db_id = db_id.map(|d| d.to_string()).unwrap_or_else(|| self.ctx.rmd160_hex()); - let mut connection = self.nft_cache_dbs.lock().await; - - // check if existing connection db_id is same as requested db and return the connection. - if db_id == connection.db_id { - return Ok(connection); - } - - // else create and return new connection. - let sqlite_file_path = self.ctx.dbdir(Some(&db_id)).join("KOMODEFI.db"); - log_sqlite_file_open_attempt(&sqlite_file_path); - let async_conn = AsyncConnection::open(sqlite_file_path) - .await - .map_to_mm(|e| LockDBError::InternalError(e.to_string()))?; - connection.connection = async_conn; - connection.db_id = db_id; - - Ok(connection) + let locked = self.nft_cache_dbs.async_sqlite_conn(&self.ctx, db_id).await; + Ok(locked.lock_owned().await) } #[cfg(target_arch = "wasm32")] diff --git a/mm2src/coins/nft/storage/sql_storage.rs b/mm2src/coins/nft/storage/sql_storage.rs index fcff626a00..a59c07a6c3 100644 --- a/mm2src/coins/nft/storage/sql_storage.rs +++ b/mm2src/coins/nft/storage/sql_storage.rs @@ -5,15 +5,14 @@ use crate::nft::nft_structs::{Chain, ContractType, ConvertChain, Nft, NftCommon, use crate::nft::storage::{get_offset_limit, NftDetailsJson, NftListStorageOps, NftStorageError, NftTransferHistoryStorageOps, RemoveNftResult, TransferDetailsJson}; use async_trait::async_trait; -use db_common::async_sql_conn::AsyncConnError; +use db_common::async_sql_conn::{AsyncConnError, AsyncConnection}; use db_common::sql_build::{SqlCondition, SqlQuery}; use db_common::sqlite::rusqlite::types::{FromSqlError, Type}; use db_common::sqlite::rusqlite::{Connection, Error as SqlError, Result as SqlResult, Row, Statement}; use db_common::sqlite::sql_builder::SqlBuilder; use db_common::sqlite::{query_single_row, string_from_row, SafeTableName, CHECK_TABLE_EXISTS_SQL}; -use db_common::AsyncConnectionCtx; use ethereum_types::Address; -use futures_util::lock::MutexGuard; +use futures_util::lock::OwnedMutexGuard; use mm2_err_handle::prelude::*; use mm2_number::{BigDecimal, BigUint}; use serde_json::Value as Json; @@ -548,37 +547,35 @@ fn is_table_empty(conn: &Connection, safe_table_name: SafeTableName) -> Result { +impl NftListStorageOps for OwnedMutexGuard { type Error = AsyncConnError; async fn init(&self, chain: &Chain) -> MmResult<(), Self::Error> { let sql_nft_list = create_nft_list_table_sql(chain)?; - self.connection - .call(move |conn| { - conn.execute(&sql_nft_list, []).map(|_| ())?; - conn.execute(&create_scanned_nft_blocks_sql()?, []).map(|_| ())?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + conn.execute(&sql_nft_list, []).map(|_| ())?; + conn.execute(&create_scanned_nft_blocks_sql()?, []).map(|_| ())?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn is_initialized(&self, chain: &Chain) -> MmResult { let table_name = chain.nft_list_table_name()?; - self.connection - .call(move |conn| { - let nft_list_initialized = - query_single_row(conn, CHECK_TABLE_EXISTS_SQL, [table_name.inner()], string_from_row)?; - let scanned_nft_blocks_initialized = query_single_row( - conn, - CHECK_TABLE_EXISTS_SQL, - [scanned_nft_blocks_table_name()?.inner()], - string_from_row, - )?; - Ok(nft_list_initialized.is_some() && scanned_nft_blocks_initialized.is_some()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let nft_list_initialized = + query_single_row(conn, CHECK_TABLE_EXISTS_SQL, [table_name.inner()], string_from_row)?; + let scanned_nft_blocks_initialized = query_single_row( + conn, + CHECK_TABLE_EXISTS_SQL, + [scanned_nft_blocks_table_name()?.inner()], + string_from_row, + )?; + Ok(nft_list_initialized.is_some() && scanned_nft_blocks_initialized.is_some()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_nft_list( @@ -589,34 +586,33 @@ impl NftListStorageOps for MutexGuard<'_, AsyncConnectionCtx> { page_number: Option, filters: Option, ) -> MmResult { - self.connection - .call(move |conn| { - let sql_builder = get_nft_list_builder_preimage(chains, filters)?; - let total_count_builder_sql = sql_builder - .clone() - .count("*") - .sql() - .map_err(|e| SqlError::ToSqlConversionFailure(e.into()))?; - let total: isize = conn - .prepare(&total_count_builder_sql)? - .query_row([], |row| row.get(0))?; - let count_total = total.try_into().expect("count should not be failed"); - - let (offset, limit) = get_offset_limit(max, limit, page_number, count_total); - let sql = finalize_sql_builder(sql_builder, offset, limit)?; - let nfts = conn - .prepare(&sql)? - .query_map([], nft_from_row)? - .collect::, _>>()?; - let result = NftList { - nfts, - skipped: offset, - total: count_total, - }; - Ok(result) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_builder = get_nft_list_builder_preimage(chains, filters)?; + let total_count_builder_sql = sql_builder + .clone() + .count("*") + .sql() + .map_err(|e| SqlError::ToSqlConversionFailure(e.into()))?; + let total: isize = conn + .prepare(&total_count_builder_sql)? + .query_row([], |row| row.get(0))?; + let count_total = total.try_into().expect("count should not be failed"); + + let (offset, limit) = get_offset_limit(max, limit, page_number, count_total); + let sql = finalize_sql_builder(sql_builder, offset, limit)?; + let nfts = conn + .prepare(&sql)? + .query_map([], nft_from_row)? + .collect::, _>>()?; + let result = NftList { + nfts, + skipped: offset, + total: count_total, + }; + Ok(result) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn add_nfts_to_list(&self, chain: Chain, nfts: I, last_scanned_block: u64) -> MmResult<(), Self::Error> @@ -624,56 +620,55 @@ impl NftListStorageOps for MutexGuard<'_, AsyncConnectionCtx> { I: IntoIterator + Send + 'static, I::IntoIter: Send, { - self.connection - .call(move |conn| { - let sql_transaction = conn.transaction()?; - - for nft in nfts { - let details_json = NftDetailsJson { - owner_of: nft.common.owner_of, - token_hash: nft.common.token_hash, - minter_address: nft.common.minter_address, - block_number_minted: nft.block_number_minted, - }; - let details_json = json::to_string(&details_json).expect("serialization should not fail"); - let params = [ - Some(eth_addr_to_hex(&nft.common.token_address)), - Some(nft.token_id.to_string()), - Some(nft.chain.to_string()), - Some(nft.common.amount.to_string()), - Some(nft.block_number.to_string()), - Some(nft.contract_type.to_string()), - Some(i32::from(nft.common.possible_spam).to_string()), - Some(i32::from(nft.possible_phishing).to_string()), - nft.common.collection_name, - nft.common.symbol, - nft.common.token_uri, - nft.common.token_domain, - nft.common.metadata, - nft.common.last_token_uri_sync, - nft.common.last_metadata_sync, - nft.uri_meta.raw_image_url, - nft.uri_meta.image_url, - nft.uri_meta.image_domain, - nft.uri_meta.token_name, - nft.uri_meta.description, - nft.uri_meta.attributes.map(|v| v.to_string()), - nft.uri_meta.animation_url, - nft.uri_meta.animation_domain, - nft.uri_meta.external_url, - nft.uri_meta.external_domain, - nft.uri_meta.image_details.map(|v| v.to_string()), - Some(details_json), - ]; - sql_transaction.execute(&insert_nft_in_list_sql(&chain)?, params)?; - } - let scanned_block_params = [chain.to_ticker().to_string(), last_scanned_block.to_string()]; - sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_transaction = conn.transaction()?; + + for nft in nfts { + let details_json = NftDetailsJson { + owner_of: nft.common.owner_of, + token_hash: nft.common.token_hash, + minter_address: nft.common.minter_address, + block_number_minted: nft.block_number_minted, + }; + let details_json = json::to_string(&details_json).expect("serialization should not fail"); + let params = [ + Some(eth_addr_to_hex(&nft.common.token_address)), + Some(nft.token_id.to_string()), + Some(nft.chain.to_string()), + Some(nft.common.amount.to_string()), + Some(nft.block_number.to_string()), + Some(nft.contract_type.to_string()), + Some(i32::from(nft.common.possible_spam).to_string()), + Some(i32::from(nft.possible_phishing).to_string()), + nft.common.collection_name, + nft.common.symbol, + nft.common.token_uri, + nft.common.token_domain, + nft.common.metadata, + nft.common.last_token_uri_sync, + nft.common.last_metadata_sync, + nft.uri_meta.raw_image_url, + nft.uri_meta.image_url, + nft.uri_meta.image_domain, + nft.uri_meta.token_name, + nft.uri_meta.description, + nft.uri_meta.attributes.map(|v| v.to_string()), + nft.uri_meta.animation_url, + nft.uri_meta.animation_domain, + nft.uri_meta.external_url, + nft.uri_meta.external_domain, + nft.uri_meta.image_details.map(|v| v.to_string()), + Some(details_json), + ]; + sql_transaction.execute(&insert_nft_in_list_sql(&chain)?, params)?; + } + let scanned_block_params = [chain.to_ticker().to_string(), last_scanned_block.to_string()]; + sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_nft( @@ -683,18 +678,17 @@ impl NftListStorageOps for MutexGuard<'_, AsyncConnectionCtx> { token_id: BigUint, ) -> MmResult, Self::Error> { let table_name = chain.nft_list_table_name()?; - self.connection - .call(move |conn| { - let sql = format!( - "SELECT * FROM {} WHERE token_address=?1 AND token_id=?2", - table_name.inner() - ); - let params = [token_address, token_id.to_string()]; - let nft = query_single_row(conn, &sql, params, nft_from_row)?; - Ok(nft) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql = format!( + "SELECT * FROM {} WHERE token_address=?1 AND token_id=?2", + table_name.inner() + ); + let params = [token_address, token_id.to_string()]; + let nft = query_single_row(conn, &sql, params, nft_from_row)?; + Ok(nft) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn remove_nft_from_list( @@ -708,22 +702,21 @@ impl NftListStorageOps for MutexGuard<'_, AsyncConnectionCtx> { let sql = delete_nft_sql(table_name)?; let params = [token_address, token_id.to_string()]; let scanned_block_params = [chain.to_ticker().to_string(), scanned_block.to_string()]; - self.connection - .call(move |conn| { - let sql_transaction = conn.transaction()?; - let rows_num = sql_transaction.execute(&sql, params)?; - - let remove_nft_result = if rows_num > 0 { - RemoveNftResult::NftRemoved - } else { - RemoveNftResult::NftDidNotExist - }; - sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; - sql_transaction.commit()?; - Ok(remove_nft_result) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_transaction = conn.transaction()?; + let rows_num = sql_transaction.execute(&sql, params)?; + + let remove_nft_result = if rows_num > 0 { + RemoveNftResult::NftRemoved + } else { + RemoveNftResult::NftDidNotExist + }; + sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; + sql_transaction.commit()?; + Ok(remove_nft_result) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_nft_amount( @@ -738,78 +731,74 @@ impl NftListStorageOps for MutexGuard<'_, AsyncConnectionCtx> { table_name.inner() ); let params = [token_address, token_id.to_string()]; - self.connection - .call(move |conn| { - let amount = query_single_row(conn, &sql, params, nft_amount_from_row)?; - Ok(amount) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let amount = query_single_row(conn, &sql, params, nft_amount_from_row)?; + Ok(amount) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn refresh_nft_metadata(&self, chain: &Chain, nft: Nft) -> MmResult<(), Self::Error> { let sql = refresh_nft_metadata_sql(chain)?; - self.connection - .call(move |conn| { - let sql_transaction = conn.transaction()?; - let params = [ - Some(i32::from(nft.common.possible_spam).to_string()), - Some(i32::from(nft.possible_phishing).to_string()), - nft.common.collection_name, - nft.common.symbol, - nft.common.token_uri, - nft.common.token_domain, - nft.common.metadata, - nft.common.last_token_uri_sync, - nft.common.last_metadata_sync, - nft.uri_meta.raw_image_url, - nft.uri_meta.image_url, - nft.uri_meta.image_domain, - nft.uri_meta.token_name, - nft.uri_meta.description, - nft.uri_meta.attributes.map(|v| v.to_string()), - nft.uri_meta.animation_url, - nft.uri_meta.animation_domain, - nft.uri_meta.external_url, - nft.uri_meta.external_domain, - nft.uri_meta.image_details.map(|v| v.to_string()), - Some(eth_addr_to_hex(&nft.common.token_address)), - Some(nft.token_id.to_string()), - ]; - sql_transaction.execute(&sql, params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_transaction = conn.transaction()?; + let params = [ + Some(i32::from(nft.common.possible_spam).to_string()), + Some(i32::from(nft.possible_phishing).to_string()), + nft.common.collection_name, + nft.common.symbol, + nft.common.token_uri, + nft.common.token_domain, + nft.common.metadata, + nft.common.last_token_uri_sync, + nft.common.last_metadata_sync, + nft.uri_meta.raw_image_url, + nft.uri_meta.image_url, + nft.uri_meta.image_domain, + nft.uri_meta.token_name, + nft.uri_meta.description, + nft.uri_meta.attributes.map(|v| v.to_string()), + nft.uri_meta.animation_url, + nft.uri_meta.animation_domain, + nft.uri_meta.external_url, + nft.uri_meta.external_domain, + nft.uri_meta.image_details.map(|v| v.to_string()), + Some(eth_addr_to_hex(&nft.common.token_address)), + Some(nft.token_id.to_string()), + ]; + sql_transaction.execute(&sql, params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_last_block_number(&self, chain: &Chain) -> MmResult, Self::Error> { let table_name = chain.nft_list_table_name()?; let sql = select_last_block_number_sql(table_name)?; - self.connection - .call(move |conn| { - let block_number = query_single_row(conn, &sql, [], block_number_from_row)?; - Ok(block_number) - }) - .await? - .map(|b| b.try_into()) - .transpose() - .map_to_mm(|e| AsyncConnError::Rusqlite(SqlError::FromSqlConversionFailure(2, Type::Integer, Box::new(e)))) + self.call(move |conn| { + let block_number = query_single_row(conn, &sql, [], block_number_from_row)?; + Ok(block_number) + }) + .await? + .map(|b| b.try_into()) + .transpose() + .map_to_mm(|e| AsyncConnError::Rusqlite(SqlError::FromSqlConversionFailure(2, Type::Integer, Box::new(e)))) } async fn get_last_scanned_block(&self, chain: &Chain) -> MmResult, Self::Error> { let sql = select_last_scanned_block_sql()?; let params = [chain.to_ticker()]; - self.connection - .call(move |conn| { - let block_number = query_single_row(conn, &sql, params, block_number_from_row)?; - Ok(block_number) - }) - .await? - .map(|b| b.try_into()) - .transpose() - .map_to_mm(|e| AsyncConnError::Rusqlite(SqlError::FromSqlConversionFailure(2, Type::Integer, Box::new(e)))) + self.call(move |conn| { + let block_number = query_single_row(conn, &sql, params, block_number_from_row)?; + Ok(block_number) + }) + .await? + .map(|b| b.try_into()) + .transpose() + .map_to_mm(|e| AsyncConnError::Rusqlite(SqlError::FromSqlConversionFailure(2, Type::Integer, Box::new(e)))) } async fn update_nft_amount(&self, chain: &Chain, nft: Nft, scanned_block: u64) -> MmResult<(), Self::Error> { @@ -819,21 +808,20 @@ impl NftListStorageOps for MutexGuard<'_, AsyncConnectionCtx> { table_name.inner() ); let scanned_block_params = [chain.to_ticker().to_string(), scanned_block.to_string()]; - self.connection - .call(move |conn| { - let sql_transaction = conn.transaction()?; - let params = [ - Some(nft.common.amount.to_string()), - Some(eth_addr_to_hex(&nft.common.token_address)), - Some(nft.token_id.to_string()), - ]; - sql_transaction.execute(&sql, params)?; - sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_transaction = conn.transaction()?; + let params = [ + Some(nft.common.amount.to_string()), + Some(eth_addr_to_hex(&nft.common.token_address)), + Some(nft.token_id.to_string()), + ]; + sql_transaction.execute(&sql, params)?; + sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn update_nft_amount_and_block_number(&self, chain: &Chain, nft: Nft) -> MmResult<(), Self::Error> { @@ -843,36 +831,34 @@ impl NftListStorageOps for MutexGuard<'_, AsyncConnectionCtx> { table_name.inner() ); let scanned_block_params = [chain.to_ticker().to_string(), nft.block_number.to_string()]; - self.connection - .call(move |conn| { - let sql_transaction = conn.transaction()?; - let params = [ - Some(nft.common.amount.to_string()), - Some(nft.block_number.to_string()), - Some(eth_addr_to_hex(&nft.common.token_address)), - Some(nft.token_id.to_string()), - ]; - sql_transaction.execute(&sql, params)?; - sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_transaction = conn.transaction()?; + let params = [ + Some(nft.common.amount.to_string()), + Some(nft.block_number.to_string()), + Some(eth_addr_to_hex(&nft.common.token_address)), + Some(nft.token_id.to_string()), + ]; + sql_transaction.execute(&sql, params)?; + sql_transaction.execute(&upsert_last_scanned_block_sql()?, scanned_block_params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_nfts_by_token_address(&self, chain: Chain, token_address: String) -> MmResult, Self::Error> { - self.connection - .call(move |conn| { - let table_name = chain.nft_list_table_name()?; - let mut stmt = get_nfts_by_token_address_statement(conn, table_name)?; - let nfts = stmt - .query_map([token_address], nft_from_row)? - .collect::, _>>()?; - Ok(nfts) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let table_name = chain.nft_list_table_name()?; + let mut stmt = get_nfts_by_token_address_statement(conn, table_name)?; + let nfts = stmt + .query_map([token_address], nft_from_row)? + .collect::, _>>()?; + Ok(nfts) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn update_nft_spam_by_token_address( @@ -886,36 +872,34 @@ impl NftListStorageOps for MutexGuard<'_, AsyncConnectionCtx> { "UPDATE {} SET possible_spam = ?1 WHERE token_address = ?2;", table_name.inner() ); - self.connection - .call(move |conn| { - let sql_transaction = conn.transaction()?; - let params = [Some(i32::from(possible_spam).to_string()), Some(token_address.clone())]; - sql_transaction.execute(&sql, params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_transaction = conn.transaction()?; + let params = [Some(i32::from(possible_spam).to_string()), Some(token_address.clone())]; + sql_transaction.execute(&sql, params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_animation_external_domains(&self, chain: &Chain) -> MmResult, Self::Error> { let safe_table_name = chain.nft_list_table_name()?; - self.connection - .call(move |conn| { - let table_name = safe_table_name.inner(); - let sql_query = format!( - "SELECT DISTINCT animation_domain FROM {} UNION SELECT DISTINCT external_domain FROM {}", - table_name, table_name - ); - let mut stmt = conn.prepare(&sql_query)?; - let domains = stmt - .query_map([], |row| row.get::<_, Option>(0))? - .collect::, _>>()?; - let domains = domains.into_iter().flatten().collect(); - Ok(domains) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let table_name = safe_table_name.inner(); + let sql_query = format!( + "SELECT DISTINCT animation_domain FROM {} UNION SELECT DISTINCT external_domain FROM {}", + table_name, table_name + ); + let mut stmt = conn.prepare(&sql_query)?; + let domains = stmt + .query_map([], |row| row.get::<_, Option>(0))? + .collect::, _>>()?; + let domains = domains.into_iter().flatten().collect(); + Ok(domains) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn update_nft_phishing_by_domain( @@ -930,16 +914,15 @@ impl NftListStorageOps for MutexGuard<'_, AsyncConnectionCtx> { OR image_domain = ?2 OR animation_domain = ?2 OR external_domain = ?2;", table_name.inner() ); - self.connection - .call(move |conn| { - let sql_transaction = conn.transaction()?; - let params = [Some(i32::from(possible_phishing).to_string()), Some(domain)]; - sql_transaction.execute(&sql, params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_transaction = conn.transaction()?; + let params = [Some(i32::from(possible_phishing).to_string()), Some(domain)]; + sql_transaction.execute(&sql, params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn clear_nft_data(&self, chain: &Chain) -> MmResult<(), Self::Error> { @@ -948,65 +931,61 @@ impl NftListStorageOps for MutexGuard<'_, AsyncConnectionCtx> { let table_scanned_blocks = scanned_nft_blocks_table_name()?; let sql_scanned_block = format!("DELETE from {} where chain=?1", table_scanned_blocks.inner()); let scanned_block_param = [chain.to_ticker()]; - self.connection - .call(move |conn| { - let sql_transaction = conn.transaction()?; - sql_transaction.execute(&sql_nft, [])?; - sql_transaction.execute(&sql_scanned_block, scanned_block_param)?; - sql_transaction.commit()?; - if is_table_empty(conn, table_scanned_blocks.clone())? { - conn.execute(&format!("DROP TABLE IF EXISTS {};", table_scanned_blocks.inner()), []) - .map(|_| ())?; - } - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_transaction = conn.transaction()?; + sql_transaction.execute(&sql_nft, [])?; + sql_transaction.execute(&sql_scanned_block, scanned_block_param)?; + sql_transaction.commit()?; + if is_table_empty(conn, table_scanned_blocks.clone())? { + conn.execute(&format!("DROP TABLE IF EXISTS {};", table_scanned_blocks.inner()), []) + .map(|_| ())?; + } + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn clear_all_nft_data(&self) -> MmResult<(), Self::Error> { - self.connection - .call(move |conn| { - let sql_transaction = conn.transaction()?; - for chain in Chain::variant_list().into_iter() { - let table_name = chain.nft_list_table_name()?; - sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_name.inner()), [])?; - } - let table_scanned_blocks = scanned_nft_blocks_table_name()?; - sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_scanned_blocks.inner()), [])?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_transaction = conn.transaction()?; + for chain in Chain::variant_list().into_iter() { + let table_name = chain.nft_list_table_name()?; + sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_name.inner()), [])?; + } + let table_scanned_blocks = scanned_nft_blocks_table_name()?; + sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_scanned_blocks.inner()), [])?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } } #[async_trait] -impl NftTransferHistoryStorageOps for MutexGuard<'_, AsyncConnectionCtx> { +impl NftTransferHistoryStorageOps for OwnedMutexGuard { type Error = AsyncConnError; async fn init(&self, chain: &Chain) -> MmResult<(), Self::Error> { let sql_transfer_history = create_transfer_history_table_sql(chain)?; - self.connection - .call(move |conn| { - conn.execute(&sql_transfer_history, []).map(|_| ())?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + conn.execute(&sql_transfer_history, []).map(|_| ())?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn is_initialized(&self, chain: &Chain) -> MmResult { let table_name = chain.transfer_history_table_name()?; - self.connection - .call(move |conn| { - let nft_list_initialized = - query_single_row(conn, CHECK_TABLE_EXISTS_SQL, [table_name.inner()], string_from_row)?; - Ok(nft_list_initialized.is_some()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let nft_list_initialized = + query_single_row(conn, CHECK_TABLE_EXISTS_SQL, [table_name.inner()], string_from_row)?; + Ok(nft_list_initialized.is_some()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_transfer_history( @@ -1017,34 +996,33 @@ impl NftTransferHistoryStorageOps for MutexGuard<'_, AsyncConnectionCtx> { page_number: Option, filters: Option, ) -> MmResult { - self.connection - .call(move |conn| { - let sql_builder = get_nft_transfer_builder_preimage(chains, filters)?; - let total_count_builder_sql = sql_builder - .clone() - .count("*") - .sql() - .map_err(|e| SqlError::ToSqlConversionFailure(e.into()))?; - let total: isize = conn - .prepare(&total_count_builder_sql)? - .query_row([], |row| row.get(0))?; - let count_total = total.try_into().expect("count should not be failed"); - - let (offset, limit) = get_offset_limit(max, limit, page_number, count_total); - let sql = finalize_sql_builder(sql_builder, offset, limit)?; - let transfers = conn - .prepare(&sql)? - .query_map([], transfer_history_from_row)? - .collect::, _>>()?; - let result = NftsTransferHistoryList { - transfer_history: transfers, - skipped: offset, - total: count_total, - }; - Ok(result) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_builder = get_nft_transfer_builder_preimage(chains, filters)?; + let total_count_builder_sql = sql_builder + .clone() + .count("*") + .sql() + .map_err(|e| SqlError::ToSqlConversionFailure(e.into()))?; + let total: isize = conn + .prepare(&total_count_builder_sql)? + .query_row([], |row| row.get(0))?; + let count_total = total.try_into().expect("count should not be failed"); + + let (offset, limit) = get_offset_limit(max, limit, page_number, count_total); + let sql = finalize_sql_builder(sql_builder, offset, limit)?; + let transfers = conn + .prepare(&sql)? + .query_map([], transfer_history_from_row)? + .collect::, _>>()?; + let result = NftsTransferHistoryList { + transfer_history: transfers, + skipped: offset, + total: count_total, + }; + Ok(result) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn add_transfers_to_history(&self, chain: Chain, transfers: I) -> MmResult<(), Self::Error> @@ -1052,64 +1030,62 @@ impl NftTransferHistoryStorageOps for MutexGuard<'_, AsyncConnectionCtx> { I: IntoIterator + Send + 'static, I::IntoIter: Send, { - self.connection - .call(move |conn| { - let sql_transaction = conn.transaction()?; - for transfer in transfers { - let details_json = TransferDetailsJson { - block_hash: transfer.common.block_hash, - transaction_index: transfer.common.transaction_index, - value: transfer.common.value, - transaction_type: transfer.common.transaction_type, - verified: transfer.common.verified, - operator: transfer.common.operator, - from_address: transfer.common.from_address, - to_address: transfer.common.from_address, - fee_details: transfer.fee_details, - }; - let transfer_json = json::to_string(&details_json).expect("serialization should not fail"); - let params = [ - Some(transfer.common.transaction_hash), - Some(transfer.common.log_index.to_string()), - Some(transfer.chain.to_string()), - Some(transfer.block_number.to_string()), - Some(transfer.block_timestamp.to_string()), - Some(transfer.contract_type.to_string()), - Some(eth_addr_to_hex(&transfer.common.token_address)), - Some(transfer.token_id.to_string()), - Some(transfer.status.to_string()), - Some(transfer.common.amount.to_string()), - transfer.token_uri, - transfer.token_domain, - transfer.collection_name, - transfer.image_url, - transfer.image_domain, - transfer.token_name, - Some(i32::from(transfer.common.possible_spam).to_string()), - Some(i32::from(transfer.possible_phishing).to_string()), - Some(transfer_json), - ]; - sql_transaction.execute(&insert_transfer_in_history_sql(&chain)?, params)?; - } - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_transaction = conn.transaction()?; + for transfer in transfers { + let details_json = TransferDetailsJson { + block_hash: transfer.common.block_hash, + transaction_index: transfer.common.transaction_index, + value: transfer.common.value, + transaction_type: transfer.common.transaction_type, + verified: transfer.common.verified, + operator: transfer.common.operator, + from_address: transfer.common.from_address, + to_address: transfer.common.from_address, + fee_details: transfer.fee_details, + }; + let transfer_json = json::to_string(&details_json).expect("serialization should not fail"); + let params = [ + Some(transfer.common.transaction_hash), + Some(transfer.common.log_index.to_string()), + Some(transfer.chain.to_string()), + Some(transfer.block_number.to_string()), + Some(transfer.block_timestamp.to_string()), + Some(transfer.contract_type.to_string()), + Some(eth_addr_to_hex(&transfer.common.token_address)), + Some(transfer.token_id.to_string()), + Some(transfer.status.to_string()), + Some(transfer.common.amount.to_string()), + transfer.token_uri, + transfer.token_domain, + transfer.collection_name, + transfer.image_url, + transfer.image_domain, + transfer.token_name, + Some(i32::from(transfer.common.possible_spam).to_string()), + Some(i32::from(transfer.possible_phishing).to_string()), + Some(transfer_json), + ]; + sql_transaction.execute(&insert_transfer_in_history_sql(&chain)?, params)?; + } + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_last_block_number(&self, chain: &Chain) -> MmResult, Self::Error> { let table_name = chain.transfer_history_table_name()?; let sql = select_last_block_number_sql(table_name)?; - self.connection - .call(move |conn| { - let block_number = query_single_row(conn, &sql, [], block_number_from_row)?; - Ok(block_number) - }) - .await? - .map(|b| b.try_into()) - .transpose() - .map_to_mm(|e| AsyncConnError::Rusqlite(SqlError::FromSqlConversionFailure(2, Type::Integer, Box::new(e)))) + self.call(move |conn| { + let block_number = query_single_row(conn, &sql, [], block_number_from_row)?; + Ok(block_number) + }) + .await? + .map(|b| b.try_into()) + .transpose() + .map_to_mm(|e| AsyncConnError::Rusqlite(SqlError::FromSqlConversionFailure(2, Type::Integer, Box::new(e)))) } async fn get_transfers_from_block( @@ -1117,16 +1093,15 @@ impl NftTransferHistoryStorageOps for MutexGuard<'_, AsyncConnectionCtx> { chain: Chain, from_block: u64, ) -> MmResult, Self::Error> { - self.connection - .call(move |conn| { - let mut stmt = get_transfers_from_block_statement(conn, &chain)?; - let transfers = stmt - .query_map([from_block], transfer_history_from_row)? - .collect::, _>>()?; - Ok(transfers) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let mut stmt = get_transfers_from_block_statement(conn, &chain)?; + let transfers = stmt + .query_map([from_block], transfer_history_from_row)? + .collect::, _>>()?; + Ok(transfers) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_transfers_by_token_addr_id( @@ -1135,16 +1110,15 @@ impl NftTransferHistoryStorageOps for MutexGuard<'_, AsyncConnectionCtx> { token_address: String, token_id: BigUint, ) -> MmResult, Self::Error> { - self.connection - .call(move |conn| { - let mut stmt = get_transfers_by_token_addr_id_statement(conn, chain)?; - let transfers = stmt - .query_map([token_address, token_id.to_string()], transfer_history_from_row)? - .collect::, _>>()?; - Ok(transfers) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let mut stmt = get_transfers_by_token_addr_id_statement(conn, chain)?; + let transfers = stmt + .query_map([token_address, token_id.to_string()], transfer_history_from_row)? + .collect::, _>>()?; + Ok(transfers) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_transfer_by_tx_hash_and_log_index( @@ -1158,18 +1132,17 @@ impl NftTransferHistoryStorageOps for MutexGuard<'_, AsyncConnectionCtx> { "SELECT * FROM {} WHERE transaction_hash=?1 AND log_index = ?2", table_name.inner() ); - self.connection - .call(move |conn| { - let transfer = query_single_row( - conn, - &sql, - [transaction_hash, log_index.to_string()], - transfer_history_from_row, - )?; - Ok(transfer) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let transfer = query_single_row( + conn, + &sql, + [transaction_hash, log_index.to_string()], + transfer_history_from_row, + )?; + Ok(transfer) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn update_transfers_meta_by_token_addr_id( @@ -1195,29 +1168,27 @@ impl NftTransferHistoryStorageOps for MutexGuard<'_, AsyncConnectionCtx> { Some(transfer_meta.token_address), Some(transfer_meta.token_id.to_string()), ]; - self.connection - .call(move |conn| { - let sql_transaction = conn.transaction()?; - sql_transaction.execute(&sql, params)?; - if set_spam { - sql_transaction.execute(&sql_spam, params_spam)?; - } - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_transaction = conn.transaction()?; + sql_transaction.execute(&sql, params)?; + if set_spam { + sql_transaction.execute(&sql_spam, params_spam)?; + } + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_transfers_with_empty_meta(&self, chain: Chain) -> MmResult, Self::Error> { - self.connection - .call(move |conn| { - let sql_builder = get_transfers_with_empty_meta_builder(conn, &chain)?; - let token_addr_id_pair = sql_builder.query(token_address_id_from_row)?; - Ok(token_addr_id_pair) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_builder = get_transfers_with_empty_meta_builder(conn, &chain)?; + let token_addr_id_pair = sql_builder.query(token_address_id_from_row)?; + Ok(token_addr_id_pair) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_transfers_by_token_address( @@ -1225,17 +1196,16 @@ impl NftTransferHistoryStorageOps for MutexGuard<'_, AsyncConnectionCtx> { chain: Chain, token_address: String, ) -> MmResult, Self::Error> { - self.connection - .call(move |conn| { - let table_name = chain.transfer_history_table_name()?; - let mut stmt = get_nfts_by_token_address_statement(conn, table_name)?; - let transfers = stmt - .query_map([token_address], transfer_history_from_row)? - .collect::, _>>()?; - Ok(transfers) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let table_name = chain.transfer_history_table_name()?; + let mut stmt = get_nfts_by_token_address_statement(conn, table_name)?; + let transfers = stmt + .query_map([token_address], transfer_history_from_row)? + .collect::, _>>()?; + Ok(transfers) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn update_transfer_spam_by_token_address( @@ -1249,50 +1219,47 @@ impl NftTransferHistoryStorageOps for MutexGuard<'_, AsyncConnectionCtx> { "UPDATE {} SET possible_spam = ?1 WHERE token_address = ?2;", table_name.inner() ); - self.connection - .call(move |conn| { - let sql_transaction = conn.transaction()?; - let params = [Some(i32::from(possible_spam).to_string()), Some(token_address.clone())]; - sql_transaction.execute(&sql, params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_transaction = conn.transaction()?; + let params = [Some(i32::from(possible_spam).to_string()), Some(token_address.clone())]; + sql_transaction.execute(&sql, params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_token_addresses(&self, chain: Chain) -> MmResult, Self::Error> { - self.connection - .call(move |conn| { - let table_name = chain.transfer_history_table_name()?; - let mut stmt = get_token_addresses_statement(conn, table_name)?; - let addresses = stmt - .query_map([], address_from_row)? - .collect::, _>>()?; - Ok(addresses) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let table_name = chain.transfer_history_table_name()?; + let mut stmt = get_token_addresses_statement(conn, table_name)?; + let addresses = stmt + .query_map([], address_from_row)? + .collect::, _>>()?; + Ok(addresses) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn get_domains(&self, chain: &Chain) -> MmResult, Self::Error> { let safe_table_name = chain.transfer_history_table_name()?; - self.connection - .call(move |conn| { - let table_name = safe_table_name.inner(); - let sql_query = format!( - "SELECT DISTINCT token_domain FROM {} UNION SELECT DISTINCT image_domain FROM {}", - table_name, table_name - ); - let mut stmt = conn.prepare(&sql_query)?; - let domains = stmt - .query_map([], |row| row.get::<_, Option>(0))? - .collect::, _>>()?; - let domains = domains.into_iter().flatten().collect(); - Ok(domains) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let table_name = safe_table_name.inner(); + let sql_query = format!( + "SELECT DISTINCT token_domain FROM {} UNION SELECT DISTINCT image_domain FROM {}", + table_name, table_name + ); + let mut stmt = conn.prepare(&sql_query)?; + let domains = stmt + .query_map([], |row| row.get::<_, Option>(0))? + .collect::, _>>()?; + let domains = domains.into_iter().flatten().collect(); + Ok(domains) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn update_transfer_phishing_by_domain( @@ -1306,43 +1273,40 @@ impl NftTransferHistoryStorageOps for MutexGuard<'_, AsyncConnectionCtx> { "UPDATE {} SET possible_phishing = ?1 WHERE token_domain = ?2 OR image_domain = ?2;", safe_table_name.inner() ); - self.connection - .call(move |conn| { - let sql_transaction = conn.transaction()?; - let params = [Some(i32::from(possible_phishing).to_string()), Some(domain)]; - sql_transaction.execute(&sql, params)?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_transaction = conn.transaction()?; + let params = [Some(i32::from(possible_phishing).to_string()), Some(domain)]; + sql_transaction.execute(&sql, params)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn clear_history_data(&self, chain: &Chain) -> MmResult<(), Self::Error> { let table_name = chain.transfer_history_table_name()?; - self.connection - .call(move |conn| { - let sql_transaction = conn.transaction()?; - sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_name.inner()), [])?; - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_transaction = conn.transaction()?; + sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_name.inner()), [])?; + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } async fn clear_all_history_data(&self) -> MmResult<(), Self::Error> { - self.connection - .call(move |conn| { - let sql_transaction = conn.transaction()?; - for chain in Chain::variant_list().into_iter() { - let table_name = chain.transfer_history_table_name()?; - sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_name.inner()), [])?; - } - sql_transaction.commit()?; - Ok(()) - }) - .await - .map_to_mm(AsyncConnError::from) + self.call(move |conn| { + let sql_transaction = conn.transaction()?; + for chain in Chain::variant_list().into_iter() { + let table_name = chain.transfer_history_table_name()?; + sql_transaction.execute(&format!("DROP TABLE IF EXISTS {};", table_name.inner()), [])?; + } + sql_transaction.commit()?; + Ok(()) + }) + .await + .map_to_mm(AsyncConnError::from) } } diff --git a/mm2src/db_common/src/lib.rs b/mm2src/db_common/src/lib.rs index d630d30ecd..c1806e3b97 100644 --- a/mm2src/db_common/src/lib.rs +++ b/mm2src/db_common/src/lib.rs @@ -22,19 +22,3 @@ pub mod sql_build { pub use crate::sql_update::SqlUpdate; pub use crate::sql_value::{FromQuoted, SqlValue, SqlValueOptional}; } - -#[cfg(not(target_arch = "wasm32"))] -use async_sql_conn::AsyncConnection; -#[cfg(not(target_arch = "wasm32"))] use rusqlite::Connection; - -#[cfg(not(target_arch = "wasm32"))] -pub struct AsyncConnectionCtx { - pub db_id: String, - pub connection: AsyncConnection, -} - -#[cfg(not(target_arch = "wasm32"))] -pub struct SyncConnectionCtx { - pub db_id: String, - pub connection: Connection, -} diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 996056f01a..fccb19dcad 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -26,11 +26,8 @@ cfg_wasm32! { } cfg_native! { - use db_common::AsyncConnectionCtx; - use db_common::async_sql_conn::AsyncConnection; use db_common::sqlite::rusqlite::Connection; - use crate::sql_connection_pool::{SqliteConnPool, ASYNC_SQLITE_DB_ID}; - use futures::lock::Mutex as AsyncMutex; + use crate::sql_connection_pool::{AsyncSqliteConnPool, SqliteConnPool}; use rustls::ServerName; use mm2_metrics::prometheus; use mm2_metrics::MmMetricsError; @@ -123,7 +120,7 @@ pub struct MmCtx { pub sqlite_conn_pool: Constructible, /// asynchronous handle for rusqlite connection. #[cfg(not(target_arch = "wasm32"))] - pub async_sqlite_connection: Constructible>>, + pub async_sqlite_conn_pool: Constructible, pub mm_version: String, pub datetime: String, pub mm_init_ctx: Mutex>>, @@ -176,7 +173,7 @@ impl MmCtx { #[cfg(not(target_arch = "wasm32"))] sqlite_conn_pool: Constructible::default(), #[cfg(not(target_arch = "wasm32"))] - async_sqlite_connection: Constructible::default(), + async_sqlite_conn_pool: Constructible::default(), mm_version: "".into(), datetime: "".into(), mm_init_ctx: Mutex::new(None), @@ -349,23 +346,6 @@ impl MmCtx { pub fn mm_version(&self) -> &str { &self.mm_version } - #[cfg(not(target_arch = "wasm32"))] - pub async fn init_async_sqlite_connection(&self, db_id: Option<&str>) -> Result<(), String> { - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| self.rmd160_hex()); - let sqlite_file_path = self.dbdir(Some(&db_id)).join(ASYNC_SQLITE_DB_ID); - log_sqlite_file_open_attempt(&sqlite_file_path); - let async_conn = try_s!(AsyncConnection::open(sqlite_file_path).await); - - try_s!(self - .async_sqlite_connection - .pin(Arc::new(AsyncMutex::new(AsyncConnectionCtx { - connection: async_conn, - db_id, - })))); - - Ok(()) - } - /// Retrieves an optional shared connection from the pool for the specified database ID. /// Returns `None` if the connection pool is not initialized. #[cfg(not(target_arch = "wasm32"))] diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 9a2a8fcbbf..dbe80dd2eb 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -1,5 +1,8 @@ use crate::mm_ctx::{log_sqlite_file_open_attempt, MmCtx}; +use common::log::error; +use db_common::async_sql_conn::AsyncConnection; use db_common::sqlite::rusqlite::Connection; +use futures::lock::Mutex as AsyncMutex; use gstuff::try_s; use std::collections::HashMap; use std::sync::{Arc, Mutex}; @@ -159,3 +162,115 @@ impl SqliteConnPool { )) } } + +/// A pool for managing async SQLite connections, where each connection is keyed by a unique string identifier. +#[derive(Clone)] +pub struct AsyncSqliteConnPool(Arc>>>>); + +impl AsyncSqliteConnPool { + /// Initialize a database connection. + pub async fn init(ctx: &MmCtx, db_id: Option<&str>) -> Result<(), String> { + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160_hex()); + + match ctx.async_sqlite_conn_pool.as_option() { + // if connection pool is not already initialized, create new connection pool. + None => { + let conn = Self::open_connection(ctx, &db_id).await; + let store = Arc::new(AsyncMutex::new(HashMap::from([(db_id, conn)]))); + try_s!(ctx.async_sqlite_conn_pool.pin(Self(store))); + }, + // if connection pool is already initialized, insert new connection. + Some(pool) => { + let conn = Self::open_connection(ctx, &db_id).await; + let mut pool = pool.0.lock().await; + pool.insert(db_id, conn); + }, + }; + + Ok(()) + } + + /// Initialize a database connection. + pub async fn init_test(ctx: &MmCtx, db_id: Option<&str>) -> Result<(), String> { + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160_hex()); + + match ctx.async_sqlite_conn_pool.as_option() { + // if connection pool is not already initialized, create new connection pool. + None => { + let conn = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); + let store = Arc::new(AsyncMutex::new(HashMap::from([(db_id, conn)]))); + try_s!(ctx.async_sqlite_conn_pool.pin(Self(store))); + }, + // if connection pool is already initialized, insert new connection. + Some(pool) => { + let mut pool = pool.0.lock().await; + let conn = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); + pool.insert(db_id, conn); + }, + }; + + Ok(()) + } + + /// Retrieve or create a connection optionally. + pub async fn async_sqlite_conn_opt( + &self, + ctx: &MmCtx, + db_id: Option<&str>, + ) -> Option>> { + if let Some(connections) = ctx.async_sqlite_conn_pool.as_option() { + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160_hex()); + let mut connections = connections.0.lock().await; + return if let Some(connection) = connections.get(&db_id) { + Some(connection.clone()) + } else { + let conn = Self::open_connection(ctx, &db_id).await; + connections.insert(db_id, conn.clone()); + // TODO: run migration and fix directions + Some(conn) + }; + }; + None + } + + /// Retrieve or create a connection. + pub async fn async_sqlite_conn(&self, ctx: &MmCtx, db_id: Option<&str>) -> Arc> { + let mut connections = ctx + .async_sqlite_conn_pool + .or(&|| panic!("async_sqlite_conn_pool is not initialized")) + .0 + .lock() + .await; + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160_hex()); + + if let Some(connection) = connections.get(&db_id) { + connection.clone() + } else { + let conn = Self::open_connection(ctx, &db_id).await; + connections.insert(db_id, conn.clone()); + // TODO: run migration and fix directions + conn + } + } + + pub async fn close_connections(&self) { + let mut connections = self.0.lock().await; + for (id, connection) in connections.iter_mut() { + let mut connection = connection.lock().await; + if let Err(e) = connection.close().await { + error!("Error stopping AsyncConnection: {}, connection_id=({id})", e); + } + } + } + + async fn open_connection(ctx: &MmCtx, db_id: &str) -> Arc> { + let sqlite_file_path = ctx.dbdir(Some(db_id)).join(ASYNC_SQLITE_DB_ID); + log_sqlite_file_open_attempt(&sqlite_file_path); + + Arc::new(AsyncMutex::new( + AsyncConnection::open(sqlite_file_path) + .await + .expect("failed to open db"), + )) + } +} diff --git a/mm2src/mm2_gui_storage/src/context.rs b/mm2src/mm2_gui_storage/src/context.rs index fb6203e3c3..66914f7e09 100644 --- a/mm2src/mm2_gui_storage/src/context.rs +++ b/mm2src/mm2_gui_storage/src/context.rs @@ -14,7 +14,9 @@ impl AccountContext { pub(crate) fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { from_ctx(&ctx.account_ctx, move || { Ok(AccountContext { - storage: AccountStorageBuilder::new(ctx, db_id).build().map_err(|e| e.to_string())?, + storage: AccountStorageBuilder::new(ctx, db_id) + .build() + .map_err(|e| e.to_string())?, db_id: db_id.map(|e| e.to_string()), }) }) diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index e6dae2f665..a366fa94b0 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -18,6 +18,15 @@ // marketmaker // +use crate::mm2::heartbeat_event::HeartbeatEvent; +use crate::mm2::lp_message_service::{init_message_service, InitMessageServiceError}; +use crate::mm2::lp_network::{lp_network_ports, p2p_event_process_loop, NetIdError}; +use crate::mm2::lp_ordermatch::{broadcast_maker_orders_keep_alive_loop, clean_memory_loop, init_ordermatch_context, + lp_ordermatch_loop, orders_kick_start, BalanceUpdateOrdermatchHandler, + OrdermatchInitError}; +use crate::mm2::lp_swap::{running_swaps_num, swap_kick_starts}; +use crate::mm2::lp_wallet::{initialize_wallet_passphrase, WalletInitError}; +use crate::mm2::rpc::spawn_rpc; use bitcrypto::sha256; use coins::register_balance_update_handler; use common::executor::{SpawnFuture, Timer}; @@ -44,24 +53,14 @@ use std::str; use std::time::Duration; use std::{fs, usize}; -#[cfg(not(target_arch = "wasm32"))] -use crate::mm2::database::init_and_migrate_sql_db; -use crate::mm2::heartbeat_event::HeartbeatEvent; -use crate::mm2::lp_message_service::{init_message_service, InitMessageServiceError}; -use crate::mm2::lp_network::{lp_network_ports, p2p_event_process_loop, NetIdError}; -use crate::mm2::lp_ordermatch::{broadcast_maker_orders_keep_alive_loop, clean_memory_loop, init_ordermatch_context, - lp_ordermatch_loop, orders_kick_start, BalanceUpdateOrdermatchHandler, - OrdermatchInitError}; -use crate::mm2::lp_swap::{running_swaps_num, swap_kick_starts}; -use crate::mm2::lp_wallet::{initialize_wallet_passphrase, WalletInitError}; -use crate::mm2::rpc::spawn_rpc; - cfg_native! { + use crate::mm2::database::init_and_migrate_sql_db; + use db_common::sqlite::rusqlite::Error as SqlError; + use mm2_core::sql_connection_pool::{AsyncSqliteConnPool, SqliteConnPool}; use mm2_io::fs::{ensure_dir_is_writable, ensure_file_is_writable}; use mm2_net::ip_addr::myipaddr; use rustls_pemfile as pemfile; - use mm2_core::sql_connection_pool::SqliteConnPool; } #[path = "lp_init/init_context.rs"] mod init_context; @@ -465,11 +464,11 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { #[cfg(not(target_arch = "wasm32"))] { fix_directories(&ctx, None, None)?; - SqliteConnPool::init(&ctx, None).map_to_mm(MmInitError::ErrorSqliteInitializing)?; - SqliteConnPool::init_shared(&ctx, None).map_to_mm(MmInitError::ErrorSqliteInitializing)?; - ctx.init_async_sqlite_connection(None) + AsyncSqliteConnPool::init(&ctx, None) .await .map_to_mm(MmInitError::ErrorSqliteInitializing)?; + SqliteConnPool::init(&ctx, None).map_to_mm(MmInitError::ErrorSqliteInitializing)?; + SqliteConnPool::init_shared(&ctx, None).map_to_mm(MmInitError::ErrorSqliteInitializing)?; init_and_migrate_sql_db(&ctx, None).await?; migrate_db(&ctx, None)?; } diff --git a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs index 34653f3088..f18ed7f97b 100644 --- a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs +++ b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs @@ -33,6 +33,7 @@ use mm2_rpc::data::legacy::{BalanceResponse, CoinInitResponse, Mm2RpcResult, MmV use serde_json::{self as json, Value as Json}; use std::borrow::Cow; use std::collections::HashSet; +use std::fmt::Debug; use std::iter::Extend; use uuid::Uuid; @@ -248,11 +249,8 @@ pub async fn my_balance(ctx: MmArc, req: Json) -> Result>, Stri #[cfg(not(target_arch = "wasm32"))] async fn close_async_connection(ctx: &MmArc) { - if let Some(connections) = ctx.async_sqlite_connection.as_option() { - let mut conn = connections.lock().await; - if let Err(e) = conn.connection.close().await { - error!("Error stopping AsyncConnection: {}", e); - } + if let Some(connections) = ctx.async_sqlite_conn_pool.as_option() { + connections.close_connections().await; } } diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index d8d191149d..d606bbf61f 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -41,7 +41,7 @@ cfg_native! { use futures::task::SpawnExt; use http::Request; use regex::Regex; - use mm2_core::sql_connection_pool::SqliteConnPool; + use mm2_core::sql_connection_pool::{AsyncSqliteConnPool, SqliteConnPool}; use std::fs; use std::io::Write; use std::net::Ipv4Addr; @@ -1098,18 +1098,8 @@ pub fn mm_ctx_with_custom_db_with_conf(conf: Option) -> MmArc { #[cfg(not(target_arch = "wasm32"))] pub async fn mm_ctx_with_custom_async_db() -> MmArc { - use db_common::async_sql_conn::AsyncConnection; - use futures::lock::Mutex as AsyncMutex; - use std::sync::Arc; - let ctx = MmCtxBuilder::new().into_mm_arc(); - - let connection = AsyncConnection::open_in_memory().await.unwrap(); - let connection = db_common::AsyncConnectionCtx { - connection, - db_id: ctx.rmd160_hex(), - }; - let _ = ctx.async_sqlite_connection.pin(Arc::new(AsyncMutex::new(connection))); + AsyncSqliteConnPool::init_test(&ctx, None).await.unwrap(); ctx } From 0686d0e47066138439ee5bcf624188b7a3da1cad Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 6 May 2024 19:57:28 +0100 Subject: [PATCH 098/186] code organization and improvements --- mm2src/mm2_core/src/mm_ctx.rs | 4 +- mm2src/mm2_core/src/sql_connection_pool.rs | 46 ++++------------------ 2 files changed, 9 insertions(+), 41 deletions(-) diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index fccb19dcad..4613027787 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -351,7 +351,7 @@ impl MmCtx { #[cfg(not(target_arch = "wasm32"))] pub fn shared_sqlite_conn_opt(&self, db_id: Option<&str>) -> Option>> { if let Some(pool) = self.sqlite_conn_pool.as_option().cloned() { - return pool.shared_sqlite_conn_opt(self, db_id); + return Some(pool.sqlite_conn_shared(self, db_id)); }; None @@ -362,7 +362,7 @@ impl MmCtx { #[cfg(not(target_arch = "wasm32"))] pub fn sqlite_conn_opt(&self, db_id: Option<&str>) -> Option>> { if let Some(pool) = self.sqlite_conn_pool.as_option().cloned() { - return pool.sqlite_conn_opt(self, db_id); + return Some(pool.sqlite_conn(self, db_id)); }; None diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index dbe80dd2eb..9cfdb4dd8d 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -80,55 +80,23 @@ impl SqliteConnPool { /// Retrieves a single-user connection from the pool. pub fn sqlite_conn(&self, ctx: &MmCtx, db_id: Option<&str>) -> Arc> { - Self::sqlite_conn_impl(ctx, db_id, DbIdConnKind::Single) + self.sqlite_conn_impl(ctx, db_id, DbIdConnKind::Single) } /// Retrieves a shared connection from the pool. pub fn sqlite_conn_shared(&self, ctx: &MmCtx, db_id: Option<&str>) -> Arc> { - Self::sqlite_conn_impl(ctx, db_id, DbIdConnKind::Shared) + self.sqlite_conn_impl(ctx, db_id, DbIdConnKind::Shared) } - /// Optionally retrieves a single-user connection from the pool if available. - pub fn sqlite_conn_opt(&self, ctx: &MmCtx, db_id: Option<&str>) -> Option>> { - Self::sqlite_conn_opt_impl(ctx, db_id, DbIdConnKind::Single) - } - - /// Optionally retrieves a shared connection from the pool if available. - pub fn shared_sqlite_conn_opt(&self, ctx: &MmCtx, db_id: Option<&str>) -> Option>> { - Self::sqlite_conn_opt_impl(ctx, db_id, DbIdConnKind::Shared) - } - - /// Internal implementation to retrieve or create a connection optionally. - fn sqlite_conn_opt_impl( + /// Internal implementation to retrieve or create a connection. + fn sqlite_conn_impl( + &self, ctx: &MmCtx, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind, - ) -> Option>> { - if let Some(connections) = ctx.sqlite_conn_pool.as_option() { - let db_id = Self::db_id(ctx, db_id, &db_id_conn_kind); - let mut connections = connections.0.lock().unwrap(); - return if let Some(connection) = connections.get(&db_id) { - Some(connection.clone()) - } else { - let conn = Self::open_connection(ctx, &db_id, &db_id_conn_kind); - connections.insert(db_id, conn.clone()); - // TODO: run migration and fix directions - Some(conn) - }; - }; - None - } - - /// Internal implementation to retrieve or create a connection. - fn sqlite_conn_impl(ctx: &MmCtx, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind) -> Arc> { - let mut connections = ctx - .sqlite_conn_pool - .or(&|| panic!("sqlite_conn_pool is not initialized")) - .0 - .lock() - .unwrap(); - + ) -> Arc> { let db_id = Self::db_id(ctx, db_id, &db_id_conn_kind); + let mut connections = self.0.lock().unwrap(); return if let Some(connection) = connections.get(&db_id) { connection.clone() } else { From 719862ba7f1e5e8885a8c472c9392a1792e985b3 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 7 May 2024 17:46:58 +0100 Subject: [PATCH 099/186] minor changes --- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 7 ++----- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 8 ++------ 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index c2ff0f4da9..898edf51f4 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -2167,15 +2167,12 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { command = c; }, None => { - if let Err(e) = - mark_swap_as_finished(ctx.clone(), running_swap.uuid, running_swap.db_id().as_deref()).await - { + if let Err(e) = mark_swap_as_finished(ctx.clone(), running_swap.uuid, db_id.as_deref()).await { error!("!mark_swap_finished({}): {}", uuid, e); } if to_broadcast { - if let Err(e) = broadcast_my_swap_status(&ctx, uuid, running_swap.db_id().as_deref()).await - { + if let Err(e) = broadcast_my_swap_status(&ctx, uuid, db_id.as_deref()).await { error!("!broadcast_my_swap_status({}): {}", uuid, e); } } diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 48f8730521..d3103e105b 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -504,16 +504,12 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { command = c; }, None => { - if let Err(e) = - mark_swap_as_finished(ctx.clone(), running_swap.uuid, running_swap.db_id().as_deref()).await - { + if let Err(e) = mark_swap_as_finished(ctx.clone(), running_swap.uuid, db_id.as_deref()).await { error!("!mark_swap_finished({}): {}", uuid, e); } if to_broadcast { - if let Err(e) = - broadcast_my_swap_status(&ctx, running_swap.uuid, running_swap.db_id().as_deref()).await - { + if let Err(e) = broadcast_my_swap_status(&ctx, running_swap.uuid, db_id.as_deref()).await { error!("!broadcast_my_swap_status({}): {}", uuid, e); } } From 6d6169f46f39bf70ea82f42759e600f31cf330ba Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 7 May 2024 19:06:48 +0100 Subject: [PATCH 100/186] fix hd_wallet todos --- mm2src/coins/eth/v2_activation.rs | 14 +++++------ mm2src/coins/hd_wallet/storage/mod.rs | 23 ++++++++++--------- .../utxo/utxo_builder/utxo_coin_builder.rs | 6 ++--- 3 files changed, 20 insertions(+), 23 deletions(-) diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 4d99dfebf2..5a79e19f9e 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -649,10 +649,9 @@ pub(crate) async fn build_address_and_priv_key_policy( let bip39_secp_priv_key = global_hd_ctx.root_priv_key().clone(); let hd_wallet_rmd160 = *ctx.rmd160(); - let hd_wallet_storage = - HDWalletCoinStorage::init_with_rmd160(ctx, ticker.to_string(), hd_wallet_rmd160, None) - .await - .mm_err(EthActivationV2Error::from)?; + let hd_wallet_storage = HDWalletCoinStorage::init_with_rmd160(ctx, ticker.to_string(), hd_wallet_rmd160) + .await + .mm_err(EthActivationV2Error::from)?; let accounts = load_hd_accounts_from_storage(&hd_wallet_storage, &path_to_coin).await?; let gap_limit = gap_limit.unwrap_or(DEFAULT_GAP_LIMIT); let hd_wallet = EthHDWallet { @@ -686,10 +685,9 @@ pub(crate) async fn build_address_and_priv_key_policy( .hw_ctx() .or_mm_err(|| EthActivationV2Error::HwContextNotInitialized)?; let hd_wallet_rmd160 = hw_ctx.rmd160(); - let hd_wallet_storage = - HDWalletCoinStorage::init_with_rmd160(ctx, ticker.to_string(), hd_wallet_rmd160, None) - .await - .mm_err(EthActivationV2Error::from)?; + let hd_wallet_storage = HDWalletCoinStorage::init_with_rmd160(ctx, ticker.to_string(), hd_wallet_rmd160) + .await + .mm_err(EthActivationV2Error::from)?; let accounts = load_hd_accounts_from_storage(&hd_wallet_storage, &path_to_coin).await?; let gap_limit = gap_limit.unwrap_or(DEFAULT_GAP_LIMIT); let hd_wallet = EthHDWallet { diff --git a/mm2src/coins/hd_wallet/storage/mod.rs b/mm2src/coins/hd_wallet/storage/mod.rs index a38551e687..6afacf0aed 100644 --- a/mm2src/coins/hd_wallet/storage/mod.rs +++ b/mm2src/coins/hd_wallet/storage/mod.rs @@ -220,12 +220,13 @@ impl Default for HDWalletCoinStorage { } impl HDWalletCoinStorage { - pub async fn init(ctx: &MmArc, coin: String, db_id: Option<&str>) -> HDWalletStorageResult { - let inner = Box::new(HDWalletStorageInstance::init(ctx, db_id).await?); + pub async fn init(ctx: &MmArc, coin: String) -> HDWalletStorageResult { let crypto_ctx = CryptoCtx::from_ctx(ctx)?; let hd_wallet_rmd160 = crypto_ctx .hw_wallet_rmd160() .or_mm_err(|| HDWalletStorageError::HDWalletUnavailable)?; + let db_id = hex::encode(hd_wallet_rmd160.as_slice()); + let inner = Box::new(HDWalletStorageInstance::init(ctx, Some(&db_id)).await?); Ok(HDWalletCoinStorage { coin, hd_wallet_rmd160, @@ -237,9 +238,9 @@ impl HDWalletCoinStorage { ctx: &MmArc, coin: String, hd_wallet_rmd160: H160, - db_id: Option<&str>, ) -> HDWalletStorageResult { - let inner = Box::new(HDWalletStorageInstance::init(ctx, db_id).await?); + let db_id = hex::encode(hd_wallet_rmd160.as_slice()); + let inner = Box::new(HDWalletStorageInstance::init(ctx, Some(&db_id)).await?); Ok(HDWalletCoinStorage { coin, hd_wallet_rmd160, @@ -343,13 +344,13 @@ mod tests { let device0_rmd160 = H160::from("0000000000000000000000000000000000000020"); let device1_rmd160 = H160::from("0000000000000000000000000000000000000030"); - let rick_device0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device0_rmd160, None) + let rick_device0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device0_rmd160) .await .expect("!HDWalletCoinStorage::new"); - let rick_device1_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device1_rmd160, None) + let rick_device1_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device1_rmd160) .await .expect("!HDWalletCoinStorage::new"); - let morty_device0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "MORTY".to_owned(), device0_rmd160, None) + let morty_device0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "MORTY".to_owned(), device0_rmd160) .await .expect("!HDWalletCoinStorage::new"); @@ -435,13 +436,13 @@ mod tests { let device1_rmd160 = H160::from("0000000000000000000000000000000000000020"); let device2_rmd160 = H160::from("0000000000000000000000000000000000000030"); - let wallet0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device0_rmd160, None) + let wallet0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device0_rmd160) .await .expect("!HDWalletCoinStorage::new"); - let wallet1_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device1_rmd160, None) + let wallet1_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device1_rmd160) .await .expect("!HDWalletCoinStorage::new"); - let wallet2_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device2_rmd160, None) + let wallet2_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device2_rmd160) .await .expect("!HDWalletCoinStorage::new"); @@ -494,7 +495,7 @@ mod tests { let ctx = mm_ctx_with_custom_db(); let device_rmd160 = H160::from("0000000000000000000000000000000000000010"); - let db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device_rmd160, None) + let db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device_rmd160) .await .expect("!HDWalletCoinStorage::new"); diff --git a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs index ae8fde91d9..1fc1e8a8e7 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs @@ -223,7 +223,7 @@ pub trait UtxoFieldsWithGlobalHDBuilder: UtxoCoinBuilderCommonOps { let hd_wallet_rmd160 = *self.ctx().rmd160(); // TODO shared_db_id let hd_wallet_storage = - HDWalletCoinStorage::init_with_rmd160(self.ctx(), self.ticker().to_owned(), hd_wallet_rmd160, None).await?; + HDWalletCoinStorage::init_with_rmd160(self.ctx(), self.ticker().to_owned(), hd_wallet_rmd160).await?; let accounts = load_hd_accounts_from_storage(&hd_wallet_storage, path_to_coin) .await .mm_err(UtxoCoinBuildError::from)?; @@ -346,7 +346,6 @@ pub trait UtxoFieldsWithHardwareWalletBuilder: UtxoCoinBuilderCommonOps { return MmError::err(UtxoCoinBuildError::CoinDoesntSupportTrezor); } let hd_wallet_rmd160 = self.trezor_wallet_rmd160()?; - // For now, use a default script pubkey. // TODO change the type of `recently_spent_outpoints` to `AsyncMutex>` let my_script_pubkey = Bytes::new(); @@ -358,8 +357,7 @@ pub trait UtxoFieldsWithHardwareWalletBuilder: UtxoCoinBuilderCommonOps { .clone() .or_mm_err(|| UtxoConfError::DerivationPathIsNotSet)?; - // TODO shared_d_id - let hd_wallet_storage = HDWalletCoinStorage::init(self.ctx(), ticker, None).await?; + let hd_wallet_storage = HDWalletCoinStorage::init(self.ctx(), ticker).await?; let accounts = load_hd_accounts_from_storage(&hd_wallet_storage, &path_to_coin) .await From 6954d62acc6cb993398b321ac68dfabcaac480d5 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 8 May 2024 21:36:31 +0100 Subject: [PATCH 101/186] add more todos --- mm2src/coins/lp_coins.rs | 15 ++++++++++----- .../mm2_main/src/rpc/lp_commands/lp_commands.rs | 3 +++ 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 34950dc38a..1518196c04 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -263,6 +263,7 @@ use hd_wallet::{AccountUpdatingError, AddressDerivingError, HDAccountOps, HDAddr HDCoinHDAccount, HDExtractPubkeyError, HDPathAccountToAddressId, HDWalletAddress, HDWalletCoinOps, HDWalletOps, HDWithdrawError, HDXPubExtractor, WithdrawFrom, WithdrawSenderAddress}; use nft::nft_errors::GetNftInfoError; +use primitives::hash::H160; use qrc20::{qrc20_coin_with_policy, Qrc20ActivationParams, Qrc20Coin, Qrc20FeeDetails}; use rpc_command::{get_new_address::{GetNewAddressTaskManager, GetNewAddressTaskManagerShared}, init_account_balance::{AccountBalanceTaskManager, AccountBalanceTaskManagerShared}, @@ -3071,6 +3072,8 @@ pub trait MmCoin: fn account_db_id(&self) -> Option { None } + fn account_shared_db_id(&self) -> Option { None } + /// Path to tx history file #[cfg(not(target_arch = "wasm32"))] fn tx_history_path(&self, ctx: &MmArc) -> PathBuf { @@ -4427,22 +4430,24 @@ pub async fn lp_coinfind_any(ctx: &MmArc, ticker: &str) -> Result Result, String> { find_unique_account_ids(ctx, false).await } +/// Finds unique account IDs for active accounts. pub async fn find_unique_account_ids_active(ctx: &MmArc) -> Result, String> { find_unique_account_ids(ctx, true).await } +/// Finds unique account IDs based on the given context and active status. async fn find_unique_account_ids(ctx: &MmArc, active_only: bool) -> Result, String> { // Using a HashSet to ensure uniqueness efficiently - let mut account_ids = HashSet::new(); - // Add default wallet pubkey as coin.account_db_id() will return None by default - account_ids.insert(ctx.rmd160_hex()); + // Initialize with default wallet pubkey as coin.account_db_id() will return None by default. + let mut account_ids = HashSet::from([ctx.rmd160_hex()]); - let cctx = try_s!(CoinsContext::from_ctx(ctx)); - let coins = cctx.coins.lock().await; + let coin_ctx = try_s!(CoinsContext::from_ctx(ctx)); + let coins = coin_ctx.coins.lock().await; let coins = coins.values().collect::>(); for coin in coins.iter() { diff --git a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs index ae992c6d3e..2bbb91ab4a 100644 --- a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs +++ b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs @@ -34,6 +34,7 @@ impl HttpStatusCode for GetPublicKeyError { } } +// TODO: Return public_key for all available and active unique pubkeys pub async fn get_public_key(ctx: MmArc, _req: Json) -> GetPublicKeyRpcResult { let public_key = CryptoCtx::from_ctx(&ctx)?.mm2_internal_pubkey().to_string(); Ok(GetPublicKeyResponse { public_key }) @@ -44,6 +45,7 @@ pub struct GetPublicKeyHashResponse { public_key_hash: H160Json, } +// TODO: Return public_key_hash for all available and active unique pubkeys pub async fn get_public_key_hash(ctx: MmArc, _req: Json) -> GetPublicKeyRpcResult { let public_key_hash = ctx.rmd160().to_owned().into(); Ok(GetPublicKeyHashResponse { public_key_hash }) @@ -54,6 +56,7 @@ pub struct GetSharedDbIdResponse { shared_db_id: H160Json, } +// TODO: Return shared_db_id for all available and active unique pubkeys pub async fn get_shared_db_id(ctx: MmArc, _req: Json) -> GetSharedDbIdResult { let shared_db_id = ctx.shared_db_id().to_owned().into(); Ok(GetSharedDbIdResponse { shared_db_id }) From 79e7c892e30ec0f139e258250a2df987b2f50e95 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 9 May 2024 20:48:25 +0100 Subject: [PATCH 102/186] add nft unit test for multikeydb --- mm2src/coins/Cargo.toml | 24 ++++---- mm2src/coins/nft/nft_tests.rs | 31 ++++++++++ mm2src/common/common.rs | 6 +- mm2src/mm2_core/src/sql_connection_pool.rs | 21 ++++--- mm2src/mm2_main/src/lp_swap.rs | 15 ++++- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 6 +- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 6 +- mm2src/mm2_test_helpers/src/for_tests.rs | 71 +++++++++++----------- mm2src/mm2_test_helpers/src/lib.rs | 3 +- 9 files changed, 112 insertions(+), 71 deletions(-) diff --git a/mm2src/coins/Cargo.toml b/mm2src/coins/Cargo.toml index 55dc1903e5..c7223137de 100644 --- a/mm2src/coins/Cargo.toml +++ b/mm2src/coins/Cargo.toml @@ -7,17 +7,17 @@ edition = "2018" zhtlc-native-tests = [] # TODO enable-solana = [ - "dep:bincode", - "dep:ed25519-dalek-bip32", - "dep:solana-client", - "dep:solana-sdk", - "dep:solana-transaction-status", - "dep:spl-token", - "dep:spl-associated-token-account" + "dep:bincode", + "dep:ed25519-dalek-bip32", + "dep:solana-client", + "dep:solana-sdk", + "dep:solana-transaction-status", + "dep:spl-token", + "dep:spl-associated-token-account" ] enable-sia = [ - "dep:reqwest", - "blake2b_simd" + "dep:reqwest", + "blake2b_simd" ] default = [] run-docker-tests = [] @@ -76,7 +76,7 @@ mm2_git = { path = "../mm2_git" } mm2_io = { path = "../mm2_io" } mm2_metrics = { path = "../mm2_metrics" } mm2_net = { path = "../mm2_net" } -mm2_number = { path = "../mm2_number"} +mm2_number = { path = "../mm2_number" } mm2_rpc = { path = "../mm2_rpc" } mm2_state_machine = { path = "../mm2_state_machine" } mocktopus = "0.8.0" @@ -107,7 +107,7 @@ sha3 = "0.9" utxo_signer = { path = "utxo_signer" } # using the same version as cosmrs tendermint-rpc = { version = "0.32.0", default-features = false } -tokio-tungstenite-wasm = { git = "https://github.com/KomodoPlatform/tokio-tungstenite-wasm", rev = "d20abdb", features = ["rustls-tls-native-roots"]} +tokio-tungstenite-wasm = { git = "https://github.com/KomodoPlatform/tokio-tungstenite-wasm", rev = "d20abdb", features = ["rustls-tls-native-roots"] } url = { version = "2.2.2", features = ["serde"] } uuid = { version = "1.2.2", features = ["fast-rng", "serde", "v4"] } # One of web3 dependencies is the old `tokio-uds 0.1.7` which fails cross-compiling to ARM. @@ -116,7 +116,7 @@ web3 = { git = "https://github.com/KomodoPlatform/rust-web3", tag = "v0.19.0", d zbase32 = "0.1.2" zcash_client_backend = { git = "https://github.com/KomodoPlatform/librustzcash.git", tag = "k-1.4.1" } zcash_extras = { git = "https://github.com/KomodoPlatform/librustzcash.git", tag = "k-1.4.1" } -zcash_primitives = {features = ["transparent-inputs"], git = "https://github.com/KomodoPlatform/librustzcash.git", tag = "k-1.4.1" } +zcash_primitives = { features = ["transparent-inputs"], git = "https://github.com/KomodoPlatform/librustzcash.git", tag = "k-1.4.1" } [target.'cfg(all(not(target_os = "ios"), not(target_os = "android"), not(target_arch = "wasm32")))'.dependencies] bincode = { version = "1.3.3", default-features = false, optional = true } diff --git a/mm2src/coins/nft/nft_tests.rs b/mm2src/coins/nft/nft_tests.rs index d9943d61c4..0784e1fffd 100644 --- a/mm2src/coins/nft/nft_tests.rs +++ b/mm2src/coins/nft/nft_tests.rs @@ -187,6 +187,37 @@ cross_test!(test_last_nft_block, { assert_eq!(last_block, 28056726); }); +cross_test!(test_last_nft_block_multikey_db, { + let chain = Chain::Bsc; + let nft_ctx = get_nft_ctx(&chain).await; + let storage_1 = nft_ctx.lock_db(None).await.unwrap(); + let storage_2 = nft_ctx.lock_db(Some("TEST_DB_ID")).await.unwrap(); + NftListStorageOps::init(&storage_1, &chain).await.unwrap(); + NftListStorageOps::init(&storage_2, &chain).await.unwrap(); + + // insert nft into storage_1 and query for last block(should return 28056726 and assert_eq should pass!) + let nft_list = nft_list(); + storage_1.add_nfts_to_list(chain, nft_list, 28056726).await.unwrap(); + let last_block = NftListStorageOps::get_last_block_number(&storage_1, &chain) + .await + .unwrap() + .unwrap(); + assert_eq!(last_block, 28056726); + + // Since we didn't insert nft into storage_2 instance yet, last_block should return None and assert_eq should pass! + let last_block = NftListStorageOps::get_last_block_number(&storage_2, &chain) + .await + .unwrap(); + assert_eq!(last_block, None); + + // storage_1 last block should pass again + let last_block = NftListStorageOps::get_last_block_number(&storage_1, &chain) + .await + .unwrap() + .unwrap(); + assert_eq!(last_block, 28056726); +}); + cross_test!(test_nft_list, { let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; diff --git a/mm2src/common/common.rs b/mm2src/common/common.rs index 5e7efb7e37..95fed9ef5b 100644 --- a/mm2src/common/common.rs +++ b/mm2src/common/common.rs @@ -530,7 +530,7 @@ pub fn double_panic_crash() { drop(panicker) // Delays the drop. } -/// RPC response, returned by the RPC handlers. +/// RPC response, returned by the RPC handlers. /// NB: By default the future is executed on the shared asynchronous reactor (`CORE`), /// the handler is responsible for spawning the future on another reactor if it doesn't fit the `CORE` well. pub type HyRes = Box>, Error = String> + Send>; @@ -696,8 +696,8 @@ pub fn now_sec_i64() -> i64 { #[cfg(not(target_arch = "wasm32"))] pub fn temp_dir() -> PathBuf { env::temp_dir() } -/// If the `MM_LOG` variable is present then tries to open that file. -/// Prints a warning to `stdout` if there's a problem opening the file. +/// If the `MM_LOG` variable is present then tries to open that file. +/// Prints a warning to `stdout` if there's a problem opening the file. /// Returns `None` if `MM_LOG` variable is not present or if the specified path can't be opened. #[cfg(not(target_arch = "wasm32"))] pub(crate) fn open_log_file() -> Option { diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 9cfdb4dd8d..6bc41e0239 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -41,13 +41,13 @@ impl SqliteConnPool { let conn = Self::open_connection(ctx, &db_id, &db_id_conn_kind); let store = Arc::new(Mutex::new(HashMap::from([(db_id, conn)]))); try_s!(ctx.sqlite_conn_pool.pin(Self(store))); - }, + } // if connection pool is already initialized, insert new connection. Some(pool) => { let conn = Self::open_connection(ctx, &db_id, &db_id_conn_kind); let mut pool = pool.0.lock().unwrap(); pool.insert(db_id, conn); - }, + } }; Ok(()) @@ -68,12 +68,12 @@ impl SqliteConnPool { let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); let store = Arc::new(Mutex::new(HashMap::from([(db_id, connection)]))); try_s!(ctx.sqlite_conn_pool.pin(Self(store))); - }, + } Some(pool) => { let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); let mut pool = pool.0.lock().unwrap(); pool.insert(db_id, connection); - }, + } } Ok(()) } @@ -146,13 +146,13 @@ impl AsyncSqliteConnPool { let conn = Self::open_connection(ctx, &db_id).await; let store = Arc::new(AsyncMutex::new(HashMap::from([(db_id, conn)]))); try_s!(ctx.async_sqlite_conn_pool.pin(Self(store))); - }, + } // if connection pool is already initialized, insert new connection. Some(pool) => { let conn = Self::open_connection(ctx, &db_id).await; let mut pool = pool.0.lock().await; pool.insert(db_id, conn); - }, + } }; Ok(()) @@ -166,15 +166,18 @@ impl AsyncSqliteConnPool { // if connection pool is not already initialized, create new connection pool. None => { let conn = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); - let store = Arc::new(AsyncMutex::new(HashMap::from([(db_id, conn)]))); + // extra connection to test accessing different db test + let conn2 = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); + let connections = HashMap::from([(db_id, conn), ("TEST_DB_ID".to_owned(), conn2)]); + let store = Arc::new(AsyncMutex::new(connections)); try_s!(ctx.async_sqlite_conn_pool.pin(Self(store))); - }, + } // if connection pool is already initialized, insert new connection. Some(pool) => { let mut pool = pool.0.lock().await; let conn = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); pool.insert(db_id, conn); - }, + } }; Ok(()) diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index ec2867a87e..020f6fb1b2 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -1359,21 +1359,30 @@ pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result warn!("No such swap with the uuid '{}'", uuid), - Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), + Err(e) => error!( + "Error loading a swap with the uuid '{}': {} for db_id=({db_id})", + uuid, e + ), }, MAKER_SWAP_V2_TYPE => match get_maker_swap_data_for_rpc(&ctx, uuid, Some(&db_result.pubkey)).await { Ok(data) => { let swap_json = try_s!(json::to_value(data)); swaps.push(swap_json); }, - Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), + Err(e) => error!( + "Error loading a swap with the uuid '{}': {} for db_id=({db_id})", + uuid, e + ), }, TAKER_SWAP_V2_TYPE => match get_taker_swap_data_for_rpc(&ctx, uuid, Some(&db_result.pubkey)).await { Ok(data) => { let swap_json = try_s!(json::to_value(data)); swaps.push(swap_json); }, - Err(e) => error!("Error loading a swap with the uuid '{}': {}", uuid, e), + Err(e) => error!( + "Error loading a swap with the uuid '{}': {} for db_id=({db_id})", + uuid, e + ), }, unknown_type => error!("Swap with the uuid '{}' has unknown type {}", uuid, unknown_type), } diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index 2e21b44b69..0fd3d407d4 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -1304,9 +1304,9 @@ impl MakerSwap { maker_coin: MmCoinEnum, taker_coin: MmCoinEnum, swap_uuid: &Uuid, + db_id: Option<&str>, ) -> Result<(Self, Option), String> { - let saved = match SavedSwap::load_my_swap_from_db(&ctx, maker_coin.account_db_id().as_deref(), *swap_uuid).await - { + let saved = match SavedSwap::load_my_swap_from_db(&ctx, db_id, *swap_uuid).await { Ok(Some(saved)) => saved, Ok(None) => return ERR!("Couldn't find a swap with the uuid '{}'", swap_uuid), Err(e) => return ERR!("{}", e), @@ -2076,7 +2076,7 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { maker_coin, taker_coin, swap_uuid, - } => match MakerSwap::load_from_db_by_uuid(ctx, maker_coin, taker_coin, &swap_uuid).await { + } => match MakerSwap::load_from_db_by_uuid(ctx, maker_coin, taker_coin, &swap_uuid, db_id.as_deref()).await { Ok((swap, command)) => match command { Some(c) => { info!("Swap {} kick started.", uuid); diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index a879c3bcc5..9acdc8a7f5 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -425,7 +425,7 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { maker_coin, taker_coin, swap_uuid, - } => match TakerSwap::load_from_db_by_uuid(ctx, maker_coin, taker_coin, &swap_uuid).await { + } => match TakerSwap::load_from_db_by_uuid(ctx, maker_coin, taker_coin, &swap_uuid, db_id.as_deref()).await { Ok((swap, command)) => match command { Some(c) => { info!("Swap {} kick started.", uuid); @@ -1979,9 +1979,9 @@ impl TakerSwap { maker_coin: MmCoinEnum, taker_coin: MmCoinEnum, swap_uuid: &Uuid, + db_id: Option<&str>, ) -> Result<(Self, Option), String> { - let account_key = taker_coin.account_db_id(); - let saved = match SavedSwap::load_my_swap_from_db(&ctx, account_key.as_deref(), *swap_uuid).await { + let saved = match SavedSwap::load_my_swap_from_db(&ctx, db_id, *swap_uuid).await { Ok(Some(saved)) => saved, Ok(None) => return ERR!("Couldn't find a swap with the uuid '{}'", swap_uuid), Err(e) => return ERR!("{}", e), diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index d606bbf61f..8ccf64a594 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -417,10 +417,10 @@ impl Mm2TestConfForSwap { Mm2InitPrivKeyPolicy::Iguana => { let bob_passphrase = crate::get_passphrase!(".env.seed", "BOB_PASSPHRASE").unwrap(); Mm2TestConf::seednode(&bob_passphrase, coins) - } + }, Mm2InitPrivKeyPolicy::GlobalHDAccount => { Mm2TestConf::seednode_with_hd_account(Self::BOB_HD_PASSPHRASE, coins) - } + }, } } @@ -429,10 +429,10 @@ impl Mm2TestConfForSwap { Mm2InitPrivKeyPolicy::Iguana => { let alice_passphrase = crate::get_passphrase!(".env.client", "ALICE_PASSPHRASE").unwrap(); Mm2TestConf::light_node(&alice_passphrase, coins, &[bob_ip]) - } + }, Mm2InitPrivKeyPolicy::GlobalHDAccount => { Mm2TestConf::light_node_with_hd_account(Self::ALICE_HD_PASSPHRASE, coins, &[bob_ip]) - } + }, } } } @@ -1096,7 +1096,6 @@ pub fn mm_ctx_with_custom_db_with_conf(conf: Option) -> MmArc { ctx } -#[cfg(not(target_arch = "wasm32"))] pub async fn mm_ctx_with_custom_async_db() -> MmArc { let ctx = MmCtxBuilder::new().into_mm_arc(); AsyncSqliteConnPool::init_test(&ctx, None).await.unwrap(); @@ -1121,7 +1120,7 @@ impl RaiiKill { _ => { self.running = false; false - } + }, } } } @@ -1266,7 +1265,7 @@ impl MarketMakerIt { let dir = folder.join("DB"); conf["dbdir"] = dir.to_str().unwrap().into(); dir - } + }, }; try_s!(fs::create_dir(&folder)); @@ -1281,7 +1280,7 @@ impl MarketMakerIt { let path = folder.join("mm2.log"); conf["log"] = path.to_str().unwrap().into(); path - } + }, }; // If `local` is provided @@ -1373,8 +1372,8 @@ impl MarketMakerIt { /// Busy-wait on the log until the `pred` returns `true` or `timeout_sec` expires. #[cfg(not(target_arch = "wasm32"))] pub async fn wait_for_log(&mut self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { let start = now_float(); let ms = 50.min((timeout_sec * 1000.) as u64 / 20 + 10); @@ -1400,8 +1399,8 @@ impl MarketMakerIt { /// after process is stopped #[cfg(not(target_arch = "wasm32"))] pub async fn wait_for_log_after_stop(&self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { use common::try_or_ready_err; @@ -1414,19 +1413,19 @@ impl MarketMakerIt { } Retry(()) }) - .repeat_every_ms(ms) - .with_timeout_secs(timeout_sec) - .await - .map_err(|e| ERRL!("{:?}", e)) - // Convert `Result, String>` to `Result<(), String>` - .flatten() + .repeat_every_ms(ms) + .with_timeout_secs(timeout_sec) + .await + .map_err(|e| ERRL!("{:?}", e)) + // Convert `Result, String>` to `Result<(), String>` + .flatten() } /// Busy-wait on the instance in-memory log until the `pred` returns `true` or `timeout_sec` expires. #[cfg(target_arch = "wasm32")] pub async fn wait_for_log(&mut self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { wait_for_log(&self.ctx, timeout_sec, pred).await } @@ -1450,7 +1449,7 @@ impl MarketMakerIt { let body_str = json::to_string(&body).unwrap_or_else(|_| panic!("Response {:?} is not a valid JSON", body)); Ok((status_code, body_str, HeaderMap::new())) - } + }, Err(e) => Ok((StatusCode::INTERNAL_SERVER_ERROR, e, HeaderMap::new())), } } @@ -1516,7 +1515,7 @@ impl MarketMakerIt { } else { return ERR!("{}", err); } - } + }, }; if status != StatusCode::OK { return ERR!("MM didn't accept a stop. body: {}", body); @@ -1539,10 +1538,10 @@ impl MarketMakerIt { } Retry(()) }) - .repeat_every_secs(0.05) - .with_timeout_ms(timeout_ms) - .await - .map_err(|e| ERRL!("{:?}", e)) + .repeat_every_secs(0.05) + .with_timeout_ms(timeout_ms) + .await + .map_err(|e| ERRL!("{:?}", e)) } /// Currently, we cannot wait for the `Completed IAmrelay handling for peer` log entry on WASM node, @@ -1647,8 +1646,8 @@ impl Drop for MarketMakerIt { /// Busy-wait on the log until the `pred` returns `true` or `timeout_sec` expires. pub async fn wait_for_log(ctx: &MmArc, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, +where + F: Fn(&str) -> bool, { let start = now_float(); let ms = 50.min((timeout_sec * 1000.) as u64 / 20 + 10); @@ -1748,7 +1747,7 @@ pub fn mm_spat() -> (&'static str, MarketMakerIt, RaiiDump, RaiiDump) { "pass".into(), None, ) - .unwrap(); + .unwrap(); let (dump_log, dump_dashboard) = mm_dump(&mm.log_path); (passphrase, mm, dump_log, dump_dashboard) } @@ -1825,10 +1824,10 @@ pub fn from_env_file(env: Vec) -> (Option, Option) { match cap.get(1) { Some(name) if name.as_bytes() == b"PASSPHRASE" => { passphrase = cap.get(2).map(|v| String::from_utf8(v.as_bytes().into()).unwrap()) - } + }, Some(name) if name.as_bytes() == b"USERPASS" => { userpass = cap.get(2).map(|v| String::from_utf8(v.as_bytes().into()).unwrap()) - } + }, _ => (), } } @@ -2181,7 +2180,7 @@ pub async fn init_lightning_status(mm: &MarketMakerIt, task_id: u64) -> Json { pub fn new_mm2_temp_folder_path(ip: Option) -> PathBuf { let now = common::now_ms(); #[allow(deprecated)] - let now = Local.timestamp((now / 1000) as i64, (now % 1000) as u32 * 1_000_000); + let now = Local.timestamp((now / 1000) as i64, (now % 1000) as u32 * 1_000_000); let folder = match ip { Some(ip) => format!("mm2_{}_{}", now.format("%Y-%m-%d_%H-%M-%S-%3f"), ip), None => format!("mm2_{}", now.format("%Y-%m-%d_%H-%M-%S-%3f")), @@ -3285,7 +3284,7 @@ pub async fn wait_for_swaps_finish_and_check_status( BigDecimal::try_from(volume).unwrap(), BigDecimal::try_from(volume * maker_price).unwrap(), ) - .await; + .await; log!("Checking maker status.."); check_my_swap_status( @@ -3294,7 +3293,7 @@ pub async fn wait_for_swaps_finish_and_check_status( BigDecimal::try_from(volume).unwrap(), BigDecimal::try_from(volume * maker_price).unwrap(), ) - .await; + .await; } } @@ -3319,8 +3318,8 @@ pub async fn test_qrc20_history_impl(local_start: Option) { "pass".into(), local_start, ) - .await - .unwrap(); + .await + .unwrap(); let (_dump_log, _dump_dashboard) = mm.mm_dump(); #[cfg(not(target_arch = "wasm32"))] diff --git a/mm2src/mm2_test_helpers/src/lib.rs b/mm2src/mm2_test_helpers/src/lib.rs index 2a09433eef..92da032370 100644 --- a/mm2src/mm2_test_helpers/src/lib.rs +++ b/mm2src/mm2_test_helpers/src/lib.rs @@ -1,7 +1,6 @@ #![feature(result_flattening)] -#[macro_use] -extern crate serde_derive; +#[macro_use] extern crate serde_derive; pub mod electrums; pub mod for_tests; From 20a44861d83427941e1522e4d76645d31d0b430e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 9 May 2024 21:04:41 +0100 Subject: [PATCH 103/186] make mm_ctx_with_custom_async_db non wasm32 target --- mm2src/mm2_core/src/sql_connection_pool.rs | 16 ++--- mm2src/mm2_test_helpers/src/for_tests.rs | 71 +++++++++++----------- 2 files changed, 44 insertions(+), 43 deletions(-) diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 6bc41e0239..402440054a 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -41,13 +41,13 @@ impl SqliteConnPool { let conn = Self::open_connection(ctx, &db_id, &db_id_conn_kind); let store = Arc::new(Mutex::new(HashMap::from([(db_id, conn)]))); try_s!(ctx.sqlite_conn_pool.pin(Self(store))); - } + }, // if connection pool is already initialized, insert new connection. Some(pool) => { let conn = Self::open_connection(ctx, &db_id, &db_id_conn_kind); let mut pool = pool.0.lock().unwrap(); pool.insert(db_id, conn); - } + }, }; Ok(()) @@ -68,12 +68,12 @@ impl SqliteConnPool { let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); let store = Arc::new(Mutex::new(HashMap::from([(db_id, connection)]))); try_s!(ctx.sqlite_conn_pool.pin(Self(store))); - } + }, Some(pool) => { let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); let mut pool = pool.0.lock().unwrap(); pool.insert(db_id, connection); - } + }, } Ok(()) } @@ -146,13 +146,13 @@ impl AsyncSqliteConnPool { let conn = Self::open_connection(ctx, &db_id).await; let store = Arc::new(AsyncMutex::new(HashMap::from([(db_id, conn)]))); try_s!(ctx.async_sqlite_conn_pool.pin(Self(store))); - } + }, // if connection pool is already initialized, insert new connection. Some(pool) => { let conn = Self::open_connection(ctx, &db_id).await; let mut pool = pool.0.lock().await; pool.insert(db_id, conn); - } + }, }; Ok(()) @@ -171,13 +171,13 @@ impl AsyncSqliteConnPool { let connections = HashMap::from([(db_id, conn), ("TEST_DB_ID".to_owned(), conn2)]); let store = Arc::new(AsyncMutex::new(connections)); try_s!(ctx.async_sqlite_conn_pool.pin(Self(store))); - } + }, // if connection pool is already initialized, insert new connection. Some(pool) => { let mut pool = pool.0.lock().await; let conn = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); pool.insert(db_id, conn); - } + }, }; Ok(()) diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index 8ccf64a594..d606bbf61f 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -417,10 +417,10 @@ impl Mm2TestConfForSwap { Mm2InitPrivKeyPolicy::Iguana => { let bob_passphrase = crate::get_passphrase!(".env.seed", "BOB_PASSPHRASE").unwrap(); Mm2TestConf::seednode(&bob_passphrase, coins) - }, + } Mm2InitPrivKeyPolicy::GlobalHDAccount => { Mm2TestConf::seednode_with_hd_account(Self::BOB_HD_PASSPHRASE, coins) - }, + } } } @@ -429,10 +429,10 @@ impl Mm2TestConfForSwap { Mm2InitPrivKeyPolicy::Iguana => { let alice_passphrase = crate::get_passphrase!(".env.client", "ALICE_PASSPHRASE").unwrap(); Mm2TestConf::light_node(&alice_passphrase, coins, &[bob_ip]) - }, + } Mm2InitPrivKeyPolicy::GlobalHDAccount => { Mm2TestConf::light_node_with_hd_account(Self::ALICE_HD_PASSPHRASE, coins, &[bob_ip]) - }, + } } } } @@ -1096,6 +1096,7 @@ pub fn mm_ctx_with_custom_db_with_conf(conf: Option) -> MmArc { ctx } +#[cfg(not(target_arch = "wasm32"))] pub async fn mm_ctx_with_custom_async_db() -> MmArc { let ctx = MmCtxBuilder::new().into_mm_arc(); AsyncSqliteConnPool::init_test(&ctx, None).await.unwrap(); @@ -1120,7 +1121,7 @@ impl RaiiKill { _ => { self.running = false; false - }, + } } } } @@ -1265,7 +1266,7 @@ impl MarketMakerIt { let dir = folder.join("DB"); conf["dbdir"] = dir.to_str().unwrap().into(); dir - }, + } }; try_s!(fs::create_dir(&folder)); @@ -1280,7 +1281,7 @@ impl MarketMakerIt { let path = folder.join("mm2.log"); conf["log"] = path.to_str().unwrap().into(); path - }, + } }; // If `local` is provided @@ -1372,8 +1373,8 @@ impl MarketMakerIt { /// Busy-wait on the log until the `pred` returns `true` or `timeout_sec` expires. #[cfg(not(target_arch = "wasm32"))] pub async fn wait_for_log(&mut self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { let start = now_float(); let ms = 50.min((timeout_sec * 1000.) as u64 / 20 + 10); @@ -1399,8 +1400,8 @@ impl MarketMakerIt { /// after process is stopped #[cfg(not(target_arch = "wasm32"))] pub async fn wait_for_log_after_stop(&self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { use common::try_or_ready_err; @@ -1413,19 +1414,19 @@ impl MarketMakerIt { } Retry(()) }) - .repeat_every_ms(ms) - .with_timeout_secs(timeout_sec) - .await - .map_err(|e| ERRL!("{:?}", e)) - // Convert `Result, String>` to `Result<(), String>` - .flatten() + .repeat_every_ms(ms) + .with_timeout_secs(timeout_sec) + .await + .map_err(|e| ERRL!("{:?}", e)) + // Convert `Result, String>` to `Result<(), String>` + .flatten() } /// Busy-wait on the instance in-memory log until the `pred` returns `true` or `timeout_sec` expires. #[cfg(target_arch = "wasm32")] pub async fn wait_for_log(&mut self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { wait_for_log(&self.ctx, timeout_sec, pred).await } @@ -1449,7 +1450,7 @@ impl MarketMakerIt { let body_str = json::to_string(&body).unwrap_or_else(|_| panic!("Response {:?} is not a valid JSON", body)); Ok((status_code, body_str, HeaderMap::new())) - }, + } Err(e) => Ok((StatusCode::INTERNAL_SERVER_ERROR, e, HeaderMap::new())), } } @@ -1515,7 +1516,7 @@ impl MarketMakerIt { } else { return ERR!("{}", err); } - }, + } }; if status != StatusCode::OK { return ERR!("MM didn't accept a stop. body: {}", body); @@ -1538,10 +1539,10 @@ impl MarketMakerIt { } Retry(()) }) - .repeat_every_secs(0.05) - .with_timeout_ms(timeout_ms) - .await - .map_err(|e| ERRL!("{:?}", e)) + .repeat_every_secs(0.05) + .with_timeout_ms(timeout_ms) + .await + .map_err(|e| ERRL!("{:?}", e)) } /// Currently, we cannot wait for the `Completed IAmrelay handling for peer` log entry on WASM node, @@ -1646,8 +1647,8 @@ impl Drop for MarketMakerIt { /// Busy-wait on the log until the `pred` returns `true` or `timeout_sec` expires. pub async fn wait_for_log(ctx: &MmArc, timeout_sec: f64, pred: F) -> Result<(), String> -where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { let start = now_float(); let ms = 50.min((timeout_sec * 1000.) as u64 / 20 + 10); @@ -1747,7 +1748,7 @@ pub fn mm_spat() -> (&'static str, MarketMakerIt, RaiiDump, RaiiDump) { "pass".into(), None, ) - .unwrap(); + .unwrap(); let (dump_log, dump_dashboard) = mm_dump(&mm.log_path); (passphrase, mm, dump_log, dump_dashboard) } @@ -1824,10 +1825,10 @@ pub fn from_env_file(env: Vec) -> (Option, Option) { match cap.get(1) { Some(name) if name.as_bytes() == b"PASSPHRASE" => { passphrase = cap.get(2).map(|v| String::from_utf8(v.as_bytes().into()).unwrap()) - }, + } Some(name) if name.as_bytes() == b"USERPASS" => { userpass = cap.get(2).map(|v| String::from_utf8(v.as_bytes().into()).unwrap()) - }, + } _ => (), } } @@ -2180,7 +2181,7 @@ pub async fn init_lightning_status(mm: &MarketMakerIt, task_id: u64) -> Json { pub fn new_mm2_temp_folder_path(ip: Option) -> PathBuf { let now = common::now_ms(); #[allow(deprecated)] - let now = Local.timestamp((now / 1000) as i64, (now % 1000) as u32 * 1_000_000); + let now = Local.timestamp((now / 1000) as i64, (now % 1000) as u32 * 1_000_000); let folder = match ip { Some(ip) => format!("mm2_{}_{}", now.format("%Y-%m-%d_%H-%M-%S-%3f"), ip), None => format!("mm2_{}", now.format("%Y-%m-%d_%H-%M-%S-%3f")), @@ -3284,7 +3285,7 @@ pub async fn wait_for_swaps_finish_and_check_status( BigDecimal::try_from(volume).unwrap(), BigDecimal::try_from(volume * maker_price).unwrap(), ) - .await; + .await; log!("Checking maker status.."); check_my_swap_status( @@ -3293,7 +3294,7 @@ pub async fn wait_for_swaps_finish_and_check_status( BigDecimal::try_from(volume).unwrap(), BigDecimal::try_from(volume * maker_price).unwrap(), ) - .await; + .await; } } @@ -3318,8 +3319,8 @@ pub async fn test_qrc20_history_impl(local_start: Option) { "pass".into(), local_start, ) - .await - .unwrap(); + .await + .unwrap(); let (_dump_log, _dump_dashboard) = mm.mm_dump(); #[cfg(not(target_arch = "wasm32"))] From 7a2ebafbd014a239bf1944a3fc8343b55373eb46 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 10 May 2024 20:12:24 +0100 Subject: [PATCH 104/186] refactor db_lock and use tokio sync for handling lock --- Cargo.lock | 1 + .../coins/hd_wallet/storage/wasm_storage.rs | 4 +- mm2src/coins/lp_coins.rs | 4 +- mm2src/coins/nft/nft_tests.rs | 4 +- mm2src/coins/nft/storage/wasm/nft_idb.rs | 2 +- mm2src/coins/nft/storage/wasm/wasm_storage.rs | 4 +- .../tx_history_storage/wasm/tx_history_db.rs | 2 +- .../wasm/tx_history_storage_v2.rs | 2 +- .../wasm/indexeddb_block_header_storage.rs | 4 +- .../storage/blockdb/blockdb_idb_storage.rs | 4 +- .../z_coin/storage/walletdb/wasm/storage.rs | 4 +- .../z_coin/storage/z_params/indexeddb.rs | 6 +- mm2src/mm2_db/Cargo.toml | 1 + mm2src/mm2_db/src/indexed_db/db_lock.rs | 67 ++++++++++--------- .../src/account/storage/wasm_storage.rs | 4 +- mm2src/mm2_main/src/lp_ordermatch.rs | 4 +- mm2src/mm2_main/src/lp_swap.rs | 4 +- mm2src/mm2_main/src/lp_wallet.rs | 6 +- 18 files changed, 65 insertions(+), 62 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d71fc532c8..ab5130fc2f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4424,6 +4424,7 @@ dependencies = [ "rand 0.7.3", "serde", "serde_json", + "tokio", "wasm-bindgen", "wasm-bindgen-futures", "wasm-bindgen-test", diff --git a/mm2src/coins/hd_wallet/storage/wasm_storage.rs b/mm2src/coins/hd_wallet/storage/wasm_storage.rs index 1bff4dae23..f2e2e18335 100644 --- a/mm2src/coins/hd_wallet/storage/wasm_storage.rs +++ b/mm2src/coins/hd_wallet/storage/wasm_storage.rs @@ -21,7 +21,7 @@ const WALLET_ID_INDEX: &str = "wallet_id"; /// * account_id - HD account id const WALLET_ACCOUNT_ID_INDEX: &str = "wallet_account_id"; -type HDWalletDbLocked<'a> = DbLocked<'a, HDWalletDb>; +type HDWalletDbLocked = DbLocked; impl From for HDWalletStorageError { fn from(e: DbTransactionError) -> Self { @@ -270,7 +270,7 @@ impl HDWalletIndexedDbStorage { .or_mm_err(|| HDWalletStorageError::Internal("'HDWalletIndexedDbStorage::db' doesn't exist".to_owned())) } - async fn lock_db_mutex(db: &SharedDb) -> HDWalletStorageResult> { + async fn lock_db_mutex(db: &SharedDb) -> HDWalletStorageResult { // TODO: db_id db.get_or_initialize(None).await.mm_err(HDWalletStorageError::from) } diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 1518196c04..dec3afefb7 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -103,7 +103,7 @@ cfg_wasm32! { use hd_wallet::HDWalletDb; use mm2_db::indexed_db::{ConstructibleDb, DbLocked, SharedDb}; use tx_history_storage::wasm::{clear_tx_history, load_tx_history, save_tx_history, TxHistoryDb}; - pub type TxHistoryDbLocked<'a> = DbLocked<'a, TxHistoryDb>; + pub type TxHistoryDbLocked = DbLocked; } // using custom copy of try_fus as futures crate was renamed to futures01 @@ -3658,7 +3658,7 @@ impl CoinsContext { } #[cfg(target_arch = "wasm32")] - async fn tx_history_db(&self, db_id: Option<&str>) -> TxHistoryResult> { + async fn tx_history_db(&self, db_id: Option<&str>) -> TxHistoryResult { Ok(self.tx_history_db.get_or_initialize(db_id).await?) } } diff --git a/mm2src/coins/nft/nft_tests.rs b/mm2src/coins/nft/nft_tests.rs index 0784e1fffd..f9e5e3329d 100644 --- a/mm2src/coins/nft/nft_tests.rs +++ b/mm2src/coins/nft/nft_tests.rs @@ -187,7 +187,9 @@ cross_test!(test_last_nft_block, { assert_eq!(last_block, 28056726); }); -cross_test!(test_last_nft_block_multikey_db, { +cross_test!(test_last_nft_block_with_multikey_db_lock, { + #[cfg(target_arch = "wasm32")] + common::log::wasm_log::register_wasm_log(); let chain = Chain::Bsc; let nft_ctx = get_nft_ctx(&chain).await; let storage_1 = nft_ctx.lock_db(None).await.unwrap(); diff --git a/mm2src/coins/nft/storage/wasm/nft_idb.rs b/mm2src/coins/nft/storage/wasm/nft_idb.rs index 8e78be3460..ed9a957fab 100644 --- a/mm2src/coins/nft/storage/wasm/nft_idb.rs +++ b/mm2src/coins/nft/storage/wasm/nft_idb.rs @@ -9,7 +9,7 @@ const DB_VERSION: u32 = 1; /// /// This type ensures that while the database is being accessed or modified, /// no other operations can interfere, maintaining data integrity. -pub type NftCacheIDBLocked<'a> = DbLocked<'a, NftCacheIDB>; +pub type NftCacheIDBLocked = DbLocked; /// Represents the IndexedDB instance specifically designed for caching NFT data. /// diff --git a/mm2src/coins/nft/storage/wasm/wasm_storage.rs b/mm2src/coins/nft/storage/wasm/wasm_storage.rs index e5ea955918..1b31696051 100644 --- a/mm2src/coins/nft/storage/wasm/wasm_storage.rs +++ b/mm2src/coins/nft/storage/wasm/wasm_storage.rs @@ -130,7 +130,7 @@ impl NftTransferHistoryFilters { } #[async_trait] -impl NftListStorageOps for NftCacheIDBLocked<'_> { +impl NftListStorageOps for NftCacheIDBLocked { type Error = WasmNftCacheError; async fn init(&self, _chain: &Chain) -> MmResult<(), Self::Error> { Ok(()) } @@ -447,7 +447,7 @@ impl NftListStorageOps for NftCacheIDBLocked<'_> { } #[async_trait] -impl NftTransferHistoryStorageOps for NftCacheIDBLocked<'_> { +impl NftTransferHistoryStorageOps for NftCacheIDBLocked { type Error = WasmNftCacheError; async fn init(&self, _chain: &Chain) -> MmResult<(), Self::Error> { Ok(()) } diff --git a/mm2src/coins/tx_history_storage/wasm/tx_history_db.rs b/mm2src/coins/tx_history_storage/wasm/tx_history_db.rs index b646e7cefc..8602479f40 100644 --- a/mm2src/coins/tx_history_storage/wasm/tx_history_db.rs +++ b/mm2src/coins/tx_history_storage/wasm/tx_history_db.rs @@ -5,7 +5,7 @@ use mm2_db::indexed_db::{DbIdentifier, DbInstance, DbLocked, IndexedDb, IndexedD const DB_VERSION: u32 = 1; -pub type TxHistoryDbLocked<'a> = DbLocked<'a, TxHistoryDb>; +pub type TxHistoryDbLocked = DbLocked; pub struct TxHistoryDb { inner: IndexedDb, diff --git a/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v2.rs b/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v2.rs index dbdc6fe89c..a70a48aa7a 100644 --- a/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v2.rs +++ b/mm2src/coins/tx_history_storage/wasm/tx_history_storage_v2.rs @@ -356,7 +356,7 @@ impl IndexedDbTxHistoryStorage { }) } - async fn lock_db(&self) -> WasmTxHistoryResult> { + async fn lock_db(&self) -> WasmTxHistoryResult { self.db.get_or_initialize(None).await.mm_err(WasmTxHistoryError::from) } } diff --git a/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs b/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs index 18c8641223..22cd70e929 100644 --- a/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs +++ b/mm2src/coins/utxo/utxo_block_header_storage/wasm/indexeddb_block_header_storage.rs @@ -16,7 +16,7 @@ use std::collections::HashMap; const DB_VERSION: u32 = 1; pub type IDBBlockHeadersStorageRes = MmResult; -pub type IDBBlockHeadersInnerLocked<'a> = DbLocked<'a, IDBBlockHeadersInner>; +pub type IDBBlockHeadersInnerLocked = DbLocked; pub struct IDBBlockHeadersInner { pub inner: IndexedDb, @@ -56,7 +56,7 @@ impl IDBBlockHeadersStorage { } } - async fn lock_db(&self) -> IDBBlockHeadersStorageRes> { + async fn lock_db(&self) -> IDBBlockHeadersStorageRes { self.db .get_or_initialize(None) .await diff --git a/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs b/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs index 56a57fc9fd..88dfa6c517 100644 --- a/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs +++ b/mm2src/coins/z_coin/storage/blockdb/blockdb_idb_storage.rs @@ -18,7 +18,7 @@ use zcash_primitives::consensus::BlockHeight; const DB_NAME: &str = "z_compactblocks_cache"; const DB_VERSION: u32 = 1; -pub type BlockDbInnerLocked<'a> = DbLocked<'a, BlockDbInner>; +pub type BlockDbInnerLocked = DbLocked; #[derive(Clone, Debug, Deserialize, Serialize)] pub struct BlockDbTable { @@ -75,7 +75,7 @@ impl BlockDbImpl { }) } - async fn lock_db(&self) -> ZcoinStorageRes> { + async fn lock_db(&self) -> ZcoinStorageRes { self.db .get_or_initialize(self.db_id.as_deref()) .await diff --git a/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs b/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs index a2e9684b98..e130f79754 100644 --- a/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs +++ b/mm2src/coins/z_coin/storage/walletdb/wasm/storage.rs @@ -38,7 +38,7 @@ use zcash_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; const DB_NAME: &str = "wallet_db_cache"; const DB_VERSION: u32 = 1; -pub type WalletDbInnerLocked<'a> = DbLocked<'a, WalletDbInner>; +pub type WalletDbInnerLocked = DbLocked; macro_rules! num_to_bigint { ($value: ident) => { @@ -150,7 +150,7 @@ impl<'a> WalletIndexedDb { Ok(db) } - pub(crate) async fn lock_db(&self) -> ZcoinStorageRes> { + pub(crate) async fn lock_db(&self) -> ZcoinStorageRes { self.db .get_or_initialize(self.db_id.as_deref()) .await diff --git a/mm2src/coins/z_coin/storage/z_params/indexeddb.rs b/mm2src/coins/z_coin/storage/z_params/indexeddb.rs index a2b379ad0f..05ef81db1f 100644 --- a/mm2src/coins/z_coin/storage/z_params/indexeddb.rs +++ b/mm2src/coins/z_coin/storage/z_params/indexeddb.rs @@ -11,7 +11,7 @@ const DB_VERSION: u32 = 1; const TARGET_SPEND_CHUNKS: usize = 12; pub(crate) type ZcashParamsWasmRes = MmResult; -pub(crate) type ZcashParamsInnerLocked<'a> = DbLocked<'a, ZcashParamsWasmInner>; +pub(crate) type ZcashParamsInnerLocked = DbLocked; /// Since sapling_spend data way is greater than indexeddb max_data(267386880) bytes to save, we need to split /// sapling_spend and insert to db multiple times with index(sapling_spend_id) @@ -24,7 +24,7 @@ struct ZcashParamsWasmTable { } impl ZcashParamsWasmTable { - const SPEND_OUTPUT_INDEX: &str = "sapling_spend_sapling_output_index"; + const SPEND_OUTPUT_INDEX: &'static str = "sapling_spend_sapling_output_index"; } impl TableSignature for ZcashParamsWasmTable { @@ -73,7 +73,7 @@ impl ZcashParamsWasmImpl { Ok(Self(ConstructibleDb::new(ctx, db_id).into_shared())) } - async fn lock_db(&self) -> ZcashParamsWasmRes> { + async fn lock_db(&self) -> ZcashParamsWasmRes { self.0 .get_or_initialize(None) .await diff --git a/mm2src/mm2_db/Cargo.toml b/mm2src/mm2_db/Cargo.toml index 5f5374acad..b63752839b 100644 --- a/mm2src/mm2_db/Cargo.toml +++ b/mm2src/mm2_db/Cargo.toml @@ -24,6 +24,7 @@ primitives = { path = "../mm2_bitcoin/primitives" } rand = { version = "0.7", features = ["std", "small_rng", "wasm-bindgen"] } serde = "1" serde_json = { version = "1", features = ["preserve_order", "raw_value"] } +tokio = { version = "1.20", features = ["default", "sync"] } wasm-bindgen = "0.2.86" wasm-bindgen-futures = { version = "0.4.1" } wasm-bindgen-test = { version = "0.3.2" } diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index e9cdc3ec11..94f15f1f2f 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -1,20 +1,20 @@ use super::{DbIdentifier, DbInstance, InitDbResult}; -use futures::lock::{MappedMutexGuard as AsyncMappedMutexGuard, Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; use mm2_core::{mm_ctx::MmArc, DbNamespaceId}; +use std::collections::HashMap; use std::sync::{Arc, Weak}; +use tokio::sync::{Mutex as AsyncMutex, OwnedMappedMutexGuard, OwnedMutexGuard}; /// The mapped mutex guard. /// This implements `Deref`. -pub type DbLocked<'a, Db> = AsyncMappedMutexGuard<'a, Option, Db>; +pub type DbLocked = OwnedMappedMutexGuard, Db>; pub type SharedDb = Arc>; pub type WeakDb = Weak>; pub struct ConstructibleDb { /// It's better to use something like [`Constructible`], but it doesn't provide a method to get the inner value by the mutable reference. - mutex: AsyncMutex>, + mutexes: Arc>>>>>, db_namespace: DbNamespaceId, - db_id: AsyncMutex>, - default_db_id: String, + ctx: MmArc, } impl ConstructibleDb { @@ -25,11 +25,13 @@ impl ConstructibleDb { pub fn new(ctx: &MmArc, db_id: Option<&str>) -> Self { let rmd = hex::encode(ctx.rmd160().as_slice()); let db_id = db_id.unwrap_or(&rmd); + + let conns = HashMap::from([(db_id.to_owned(), Arc::new(AsyncMutex::new(None)))]); + ConstructibleDb { - mutex: AsyncMutex::new(None), + mutexes: Arc::new(AsyncMutex::new(conns)), db_namespace: ctx.db_namespace, - db_id: AsyncMutex::new(Some(db_id.to_string())), - default_db_id: rmd, + ctx: ctx.clone(), } } @@ -38,11 +40,11 @@ impl ConstructibleDb { /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new_shared_db(ctx: &MmArc) -> Self { let db_id = hex::encode(ctx.shared_db_id().as_slice()); + let conns = HashMap::from([(db_id.to_owned(), Arc::new(AsyncMutex::new(None)))]); ConstructibleDb { - mutex: AsyncMutex::new(None), + mutexes: Arc::new(AsyncMutex::new(conns)), db_namespace: ctx.db_namespace, - db_id: AsyncMutex::new(Some(db_id.to_string())), - default_db_id: db_id, + ctx: ctx.clone(), } } @@ -50,39 +52,38 @@ impl ConstructibleDb { /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new_global_db(ctx: &MmArc) -> Self { ConstructibleDb { - mutex: AsyncMutex::new(None), + mutexes: Arc::new(AsyncMutex::new(HashMap::default())), db_namespace: ctx.db_namespace, - db_id: AsyncMutex::new(None), - default_db_id: ctx.rmd160_hex(), + ctx: ctx.clone(), } } /// Locks the given mutex and checks if the inner database is initialized already or not, /// initializes it if it's required, and returns the locked instance. - pub async fn get_or_initialize(&self, db_id: Option<&str>) -> InitDbResult> { - let mut locked_db = self.mutex.lock().await; - let locked_db_id = self.db_id.lock().await; + pub async fn get_or_initialize(&self, db_id: Option<&str>) -> InitDbResult> { + // TODO: caller might be calling for shared_db instead so handle default case, shouldn't be elf.ctx.rmd160_hex() but ctx.shared_db_id() instead + let db_id = db_id.map(|id| id.to_owned()).unwrap_or_else(|| self.ctx.rmd160_hex()); - // Check if the database is initialized and if the db_id matches - if let Some(current_db_id) = &*locked_db_id { - if locked_db.is_some() && (db_id.map(|id| id.to_string()) == Some(current_db_id.clone())) { - // If the database is initialized and the db_id matches, return the existing instance + let mut connections = self.mutexes.lock().await; + if let Some(connection) = connections.get_mut(&db_id) { + let mut locked_db = connection.clone().lock_owned().await; + // check and return found connection if already initialized. + if &*locked_db.is_some() { return Ok(unwrap_db_instance(locked_db)); - } - } + }; - // Check if there is already an initialized database instance (`locked_db`) - // and if no specific db_id is provided. It then verifies whether - // the current db_id matches the default default_db_id. - // If these conditions are met, the function returns the existing database instance. - if locked_db.is_some() && db_id.is_none() && Some(self.default_db_id.as_str()) == locked_db_id.as_deref() { + // existing connection found but not initialized, hence, we initialize and return this connection. + let db = Db::init(DbIdentifier::new::(self.db_namespace, Some(db_id.clone()))).await?; + *locked_db = Some(db); return Ok(unwrap_db_instance(locked_db)); } - // Initialize the new DB instance as the db_id is different or no DB was initialized before - let db = Db::init(DbIdentifier::new::(self.db_namespace, locked_db_id.clone())).await?; - *locked_db = Some(db); + // No connection found so we create a new connection with immediate initialization + let db = Db::init(DbIdentifier::new::(self.db_namespace, Some(db_id.clone()))).await?; + let db = Arc::new(AsyncMutex::new(Some(db))); + connections.insert(db_id, db.clone()); + let locked_db = db.lock_owned().await; Ok(unwrap_db_instance(locked_db)) } } @@ -90,8 +91,8 @@ impl ConstructibleDb { /// # Panics /// /// This function will `panic!()` if the inner value of the `guard` is `None`. -fn unwrap_db_instance(guard: AsyncMutexGuard<'_, Option>) -> DbLocked<'_, Db> { - AsyncMutexGuard::map(guard, |wrapped_db| { +fn unwrap_db_instance(guard: OwnedMutexGuard>) -> DbLocked { + OwnedMutexGuard::map(guard, |wrapped_db| { wrapped_db .as_mut() .expect("The locked 'Option' must contain a value") diff --git a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs index 13853e25d3..4e26350a66 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs @@ -13,7 +13,7 @@ use std::collections::{BTreeMap, BTreeSet}; const DB_VERSION: u32 = 1; -type AccountDbLocked<'a> = DbLocked<'a, AccountDb>; +type AccountDbLocked = DbLocked; impl From for AccountStorageError { fn from(e: DbTransactionError) -> Self { @@ -67,7 +67,7 @@ impl WasmAccountStorage { } } - async fn lock_db_mutex(&self) -> AccountStorageResult> { + async fn lock_db_mutex(&self) -> AccountStorageResult { self.account_db .get_or_initialize(None) .await diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 975fad2101..0b4681315e 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -92,7 +92,7 @@ cfg_wasm32! { use mm2_db::indexed_db::{ConstructibleDb, DbLocked}; use ordermatch_wasm_db::{InitDbResult, OrdermatchDb}; - pub type OrdermatchDbLocked<'a> = DbLocked<'a, OrdermatchDb>; + pub type OrdermatchDbLocked = DbLocked; } #[path = "lp_ordermatch/best_orders.rs"] mod best_orders; @@ -2828,7 +2828,7 @@ impl OrdermatchContext { } #[cfg(target_arch = "wasm32")] - pub async fn ordermatch_db(&self, db_id: Option<&str>) -> InitDbResult> { + pub async fn ordermatch_db(&self, db_id: Option<&str>) -> InitDbResult { self.ordermatch_db.get_or_initialize(db_id).await } } diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 020f6fb1b2..e310fae228 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -167,7 +167,7 @@ cfg_wasm32! { use saved_swap::migrate_swaps_data; use swap_wasm_db::{InitDbResult, InitDbError, SwapDb}; - pub type SwapDbLocked<'a> = DbLocked<'a, SwapDb>; + pub type SwapDbLocked = DbLocked; } #[derive(Clone, Debug, Eq, Deserialize, PartialEq, Serialize)] @@ -566,7 +566,7 @@ impl SwapsContext { pub fn remove_msg_v2_store(&self, uuid: &Uuid) { self.swap_v2_msgs.lock().unwrap().remove(uuid); } #[cfg(target_arch = "wasm32")] - pub async fn swap_db(&self, db_id: Option<&str>) -> InitDbResult> { + pub async fn swap_db(&self, db_id: Option<&str>) -> InitDbResult { self.swap_db.get_or_initialize(db_id).await } } diff --git a/mm2src/mm2_main/src/lp_wallet.rs b/mm2src/mm2_main/src/lp_wallet.rs index db282bb0c5..68e00a52c9 100644 --- a/mm2src/mm2_main/src/lp_wallet.rs +++ b/mm2src/mm2_main/src/lp_wallet.rs @@ -14,7 +14,7 @@ cfg_wasm32! { use mnemonics_wasm_db::{read_encrypted_passphrase_if_available, save_encrypted_passphrase}; use std::sync::Arc; - type WalletsDbLocked<'a> = DbLocked<'a, WalletsDb>; + type WalletsDbLocked = DbLocked; } cfg_native! { @@ -97,9 +97,7 @@ impl WalletsContext { } // TODO - pub async fn wallets_db(&self) -> InitDbResult> { - self.wallets_db.get_or_initialize(None).await - } + pub async fn wallets_db(&self) -> InitDbResult { self.wallets_db.get_or_initialize(None).await } } // Utility function for deserialization to reduce repetition From fdb4f550a961768fd29cd4e4417b8db2947e0ce0 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sat, 11 May 2024 18:44:57 +0100 Subject: [PATCH 105/186] fix hd_wallet --- mm2src/coins/hd_wallet/storage/mod.rs | 14 ++++++++--- .../coins/hd_wallet/storage/wasm_storage.rs | 25 +++++++++++-------- mm2src/mm2_core/src/sql_connection_pool.rs | 13 ++++++++++ mm2src/mm2_db/src/indexed_db/db_lock.rs | 21 +++++++++++++--- .../src/account/storage/wasm_storage.rs | 2 +- 5 files changed, 56 insertions(+), 19 deletions(-) diff --git a/mm2src/coins/hd_wallet/storage/mod.rs b/mm2src/coins/hd_wallet/storage/mod.rs index 6afacf0aed..f994b35f06 100644 --- a/mm2src/coins/hd_wallet/storage/mod.rs +++ b/mm2src/coins/hd_wallet/storage/mod.rs @@ -220,13 +220,13 @@ impl Default for HDWalletCoinStorage { } impl HDWalletCoinStorage { + // TODO: Since hd_wallet_rmd160 is unique for a device, do we use it as db_id too? or we can just use mm2 shared_db_id and use hd_wallet_rmd160 for primary key as it's currently done pub async fn init(ctx: &MmArc, coin: String) -> HDWalletStorageResult { let crypto_ctx = CryptoCtx::from_ctx(ctx)?; let hd_wallet_rmd160 = crypto_ctx .hw_wallet_rmd160() .or_mm_err(|| HDWalletStorageError::HDWalletUnavailable)?; - let db_id = hex::encode(hd_wallet_rmd160.as_slice()); - let inner = Box::new(HDWalletStorageInstance::init(ctx, Some(&db_id)).await?); + let inner = Box::new(HDWalletStorageInstance::init(ctx, None).await?); Ok(HDWalletCoinStorage { coin, hd_wallet_rmd160, @@ -234,13 +234,13 @@ impl HDWalletCoinStorage { }) } + // TODO: Since hd_wallet_rmd160 is unique for a device, do we use it as db_id too? or we can just use mm2 shared_db_id and use hd_wallet_rmd160 for primary key as it's currently done pub async fn init_with_rmd160( ctx: &MmArc, coin: String, hd_wallet_rmd160: H160, ) -> HDWalletStorageResult { - let db_id = hex::encode(hd_wallet_rmd160.as_slice()); - let inner = Box::new(HDWalletStorageInstance::init(ctx, Some(&db_id)).await?); + let inner = Box::new(HDWalletStorageInstance::init(ctx, None).await?); Ok(HDWalletCoinStorage { coin, hd_wallet_rmd160, @@ -347,12 +347,18 @@ mod tests { let rick_device0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device0_rmd160) .await .expect("!HDWalletCoinStorage::new"); + let rick_device1_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device1_rmd160) .await .expect("!HDWalletCoinStorage::new"); + let morty_device0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "MORTY".to_owned(), device0_rmd160) .await .expect("!HDWalletCoinStorage::new"); + println!( + "morty_device0_db {}", + hex::encode(morty_device0_db.hd_wallet_rmd160.as_slice()) + ); rick_device0_db .upload_new_account(rick_device0_account0.clone()) diff --git a/mm2src/coins/hd_wallet/storage/wasm_storage.rs b/mm2src/coins/hd_wallet/storage/wasm_storage.rs index f2e2e18335..204e29bec2 100644 --- a/mm2src/coins/hd_wallet/storage/wasm_storage.rs +++ b/mm2src/coins/hd_wallet/storage/wasm_storage.rs @@ -156,22 +156,26 @@ impl DbInstance for HDWalletDb { /// The wrapper over the [`CoinsContext::hd_wallet_db`] weak pointer. pub(super) struct HDWalletIndexedDbStorage { db: WeakDb, + db_id: Option, } #[async_trait] impl HDWalletStorageInternalOps for HDWalletIndexedDbStorage { - async fn init(ctx: &MmArc, _db_id: Option<&str>) -> HDWalletStorageResult + async fn init(ctx: &MmArc, db_id: Option<&str>) -> HDWalletStorageResult where Self: Sized, { let coins_ctx = CoinsContext::from_ctx(ctx).map_to_mm(HDWalletStorageError::Internal)?; let db = SharedDb::downgrade(&coins_ctx.hd_wallet_db); - Ok(HDWalletIndexedDbStorage { db }) + Ok(HDWalletIndexedDbStorage { + db, + db_id: db_id.map(|e| e.to_string()), + }) } async fn load_accounts(&self, wallet_id: HDWalletId) -> HDWalletStorageResult> { let shared_db = self.get_shared_db()?; - let locked_db = Self::lock_db_mutex(&shared_db).await?; + let locked_db = Self::lock_db_mutex(&shared_db, self.db_id.as_deref()).await?; let transaction = locked_db.inner.transaction().await?; let table = transaction.table::().await?; @@ -193,7 +197,7 @@ impl HDWalletStorageInternalOps for HDWalletIndexedDbStorage { account_id: u32, ) -> HDWalletStorageResult> { let shared_db = self.get_shared_db()?; - let locked_db = Self::lock_db_mutex(&shared_db).await?; + let locked_db = Self::lock_db_mutex(&shared_db, self.db_id.as_deref()).await?; let transaction = locked_db.inner.transaction().await?; let table = transaction.table::().await?; @@ -235,7 +239,7 @@ impl HDWalletStorageInternalOps for HDWalletIndexedDbStorage { account: HDAccountStorageItem, ) -> HDWalletStorageResult<()> { let shared_db = self.get_shared_db()?; - let locked_db = Self::lock_db_mutex(&shared_db).await?; + let locked_db = Self::lock_db_mutex(&shared_db, self.db_id.as_deref()).await?; let transaction = locked_db.inner.transaction().await?; let table = transaction.table::().await?; @@ -250,7 +254,7 @@ impl HDWalletStorageInternalOps for HDWalletIndexedDbStorage { async fn clear_accounts(&self, wallet_id: HDWalletId) -> HDWalletStorageResult<()> { let shared_db = self.get_shared_db()?; - let locked_db = Self::lock_db_mutex(&shared_db).await?; + let locked_db = Self::lock_db_mutex(&shared_db, self.db_id.as_deref()).await?; let transaction = locked_db.inner.transaction().await?; let table = transaction.table::().await?; @@ -270,9 +274,10 @@ impl HDWalletIndexedDbStorage { .or_mm_err(|| HDWalletStorageError::Internal("'HDWalletIndexedDbStorage::db' doesn't exist".to_owned())) } - async fn lock_db_mutex(db: &SharedDb) -> HDWalletStorageResult { - // TODO: db_id - db.get_or_initialize(None).await.mm_err(HDWalletStorageError::from) + async fn lock_db_mutex(db: &SharedDb, db_id: Option<&str>) -> HDWalletStorageResult { + db.get_or_initialize_shared(db_id) + .await + .mm_err(HDWalletStorageError::from) } async fn find_account( @@ -295,7 +300,7 @@ impl HDWalletIndexedDbStorage { F: FnOnce(&mut HDAccountTable), { let shared_db = self.get_shared_db()?; - let locked_db = Self::lock_db_mutex(&shared_db).await?; + let locked_db = Self::lock_db_mutex(&shared_db, self.db_id.as_deref()).await?; let transaction = locked_db.inner.transaction().await?; let table = transaction.table::().await?; diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 402440054a..71ef9331a7 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -118,6 +118,7 @@ impl SqliteConnPool { } /// Opens a database connection based on the database ID and connection kind. + #[cfg(all(test))] fn open_connection(ctx: &MmCtx, db_id: &str, db_id_conn_kind: &DbIdConnKind) -> Arc> { let sqlite_file_path = match db_id_conn_kind { DbIdConnKind::Shared => ctx.shared_dbdir(Some(db_id)).join("MM2-shared.db"), @@ -129,6 +130,18 @@ impl SqliteConnPool { Connection::open(sqlite_file_path).expect("failed to open db"), )) } + + /// Opens a database connection based on the database ID and connection kind. + #[cfg(not(test))] + fn open_connection(ctx: &MmCtx, db_id: &str, db_id_conn_kind: &DbIdConnKind) -> Arc> { + let sqlite_file_path = match db_id_conn_kind { + DbIdConnKind::Shared => ctx.shared_dbdir(Some(db_id)).join("MM2-shared.db"), + DbIdConnKind::Single => ctx.dbdir(Some(db_id)).join(SYNC_SQLITE_DB_ID), + }; + + log_sqlite_file_open_attempt(&sqlite_file_path); + Arc::new(Mutex::new(Connection::open_in_memory().expect("failed to open db"))) + } } /// A pool for managing async SQLite connections, where each connection is keyed by a unique string identifier. diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index 94f15f1f2f..c57ca532f1 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -58,17 +58,30 @@ impl ConstructibleDb { } } + // handle to get or initialize db + pub async fn get_or_initialize(&self, db_id: Option<&str>) -> InitDbResult> { + self.get_or_initialize_impl(db_id, false).await + } + + // handle to get or initialize shared db + pub async fn get_or_initialize_shared(&self, db_id: Option<&str>) -> InitDbResult> { + self.get_or_initialize_impl(db_id, true).await + } + /// Locks the given mutex and checks if the inner database is initialized already or not, /// initializes it if it's required, and returns the locked instance. - pub async fn get_or_initialize(&self, db_id: Option<&str>) -> InitDbResult> { - // TODO: caller might be calling for shared_db instead so handle default case, shouldn't be elf.ctx.rmd160_hex() but ctx.shared_db_id() instead - let db_id = db_id.map(|id| id.to_owned()).unwrap_or_else(|| self.ctx.rmd160_hex()); + async fn get_or_initialize_impl(&self, db_id: Option<&str>, is_shared: bool) -> InitDbResult> { + let default_db_id = match is_shared { + true => hex::encode(self.ctx.shared_db_id().as_slice()), + false => self.ctx.rmd160_hex(), + }; + let db_id = db_id.map(|id| id.to_owned()).unwrap_or_else(|| default_db_id); let mut connections = self.mutexes.lock().await; if let Some(connection) = connections.get_mut(&db_id) { let mut locked_db = connection.clone().lock_owned().await; // check and return found connection if already initialized. - if &*locked_db.is_some() { + if locked_db.is_some() { return Ok(unwrap_db_instance(locked_db)); }; diff --git a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs index 4e26350a66..3be805723b 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs @@ -69,7 +69,7 @@ impl WasmAccountStorage { async fn lock_db_mutex(&self) -> AccountStorageResult { self.account_db - .get_or_initialize(None) + .get_or_initialize_shared(None) .await .mm_err(AccountStorageError::from) } From 6c93752a4f14145eb20fc62a4d9721b4192a2cdc Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sat, 11 May 2024 23:03:52 +0100 Subject: [PATCH 106/186] fix wasm clippy 0 --- mm2src/mm2_db/src/indexed_db/db_lock.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index c57ca532f1..b4e19ed6bb 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -10,6 +10,7 @@ pub type DbLocked = OwnedMappedMutexGuard, Db>; pub type SharedDb = Arc>; pub type WeakDb = Weak>; +#[allow(clippy::type_complexity)] pub struct ConstructibleDb { /// It's better to use something like [`Constructible`], but it doesn't provide a method to get the inner value by the mutable reference. mutexes: Arc>>>>>, @@ -40,7 +41,7 @@ impl ConstructibleDb { /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new_shared_db(ctx: &MmArc) -> Self { let db_id = hex::encode(ctx.shared_db_id().as_slice()); - let conns = HashMap::from([(db_id.to_owned(), Arc::new(AsyncMutex::new(None)))]); + let conns = HashMap::from([(db_id, Arc::new(AsyncMutex::new(None)))]); ConstructibleDb { mutexes: Arc::new(AsyncMutex::new(conns)), db_namespace: ctx.db_namespace, From db397e5f6e3cdc6cb7494bb101c850c709bd5248 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sun, 12 May 2024 04:47:39 +0100 Subject: [PATCH 107/186] fix wasm clippy --- mm2src/mm2_db/src/indexed_db/db_lock.rs | 33 ++++++++++++++-------- mm2src/mm2_main/src/lp_swap/saved_swap.rs | 34 +++++++++++++---------- 2 files changed, 40 insertions(+), 27 deletions(-) diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index b4e19ed6bb..32c7933f7b 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -15,7 +15,10 @@ pub struct ConstructibleDb { /// It's better to use something like [`Constructible`], but it doesn't provide a method to get the inner value by the mutable reference. mutexes: Arc>>>>>, db_namespace: DbNamespaceId, - ctx: MmArc, + // Default mm2 d_id derive from passphrase rmd160 + db_id: String, + // Default mm2 shared_db_id derive from passphrase + shared_db_id: String, } impl ConstructibleDb { @@ -24,15 +27,17 @@ impl ConstructibleDb { /// Creates a new uninitialized `Db` instance from other Iguana and/or HD accounts. /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new(ctx: &MmArc, db_id: Option<&str>) -> Self { - let rmd = hex::encode(ctx.rmd160().as_slice()); - let db_id = db_id.unwrap_or(&rmd); + let db_id_ = hex::encode(ctx.rmd160().as_slice()); + let shared_db_id = hex::encode(ctx.shared_db_id().as_slice()); + let db_id = db_id.unwrap_or(&db_id_); let conns = HashMap::from([(db_id.to_owned(), Arc::new(AsyncMutex::new(None)))]); ConstructibleDb { mutexes: Arc::new(AsyncMutex::new(conns)), db_namespace: ctx.db_namespace, - ctx: ctx.clone(), + db_id: db_id.to_string(), + shared_db_id, } } @@ -40,22 +45,27 @@ impl ConstructibleDb { /// derived from the same passphrase. /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new_shared_db(ctx: &MmArc) -> Self { - let db_id = hex::encode(ctx.shared_db_id().as_slice()); - let conns = HashMap::from([(db_id, Arc::new(AsyncMutex::new(None)))]); + let db_id = hex::encode(ctx.rmd160().as_slice()); + let shared_db_id = hex::encode(ctx.shared_db_id().as_slice()); + let conns = HashMap::from([(shared_db_id.clone(), Arc::new(AsyncMutex::new(None)))]); ConstructibleDb { mutexes: Arc::new(AsyncMutex::new(conns)), db_namespace: ctx.db_namespace, - ctx: ctx.clone(), + db_id, + shared_db_id, } } /// Creates a new uninitialized `Db` instance shared between all wallets/seed. /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new_global_db(ctx: &MmArc) -> Self { + let db_id = hex::encode(ctx.rmd160().as_slice()); + let shared_db_id = hex::encode(ctx.shared_db_id().as_slice()); ConstructibleDb { mutexes: Arc::new(AsyncMutex::new(HashMap::default())), db_namespace: ctx.db_namespace, - ctx: ctx.clone(), + db_id, + shared_db_id, } } @@ -72,11 +82,10 @@ impl ConstructibleDb { /// Locks the given mutex and checks if the inner database is initialized already or not, /// initializes it if it's required, and returns the locked instance. async fn get_or_initialize_impl(&self, db_id: Option<&str>, is_shared: bool) -> InitDbResult> { - let default_db_id = match is_shared { - true => hex::encode(self.ctx.shared_db_id().as_slice()), - false => self.ctx.rmd160_hex(), + let db_id = match is_shared { + true => db_id.map(|id| id.to_owned()).unwrap_or_else(|| self.shared_db_id.to_owned()), + false => db_id.map(|id| id.to_owned()).unwrap_or_else(|| self.db_id.to_owned()), }; - let db_id = db_id.map(|id| id.to_owned()).unwrap_or_else(|| default_db_id); let mut connections = self.mutexes.lock().await; if let Some(connection) = connections.get_mut(&db_id) { diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index cefd412b38..c725fcdc11 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -112,11 +112,11 @@ impl SavedSwap { SavedSwap::Maker(saved) => { let (maker_swap, _) = try_s!(MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, saved)); Ok(try_s!(maker_swap.recover_funds().await)) - }, + } SavedSwap::Taker(saved) => { let (taker_swap, _) = try_s!(TakerSwap::load_from_saved(ctx, maker_coin, taker_coin, saved).await); Ok(try_s!(taker_swap.recover_funds().await)) - }, + } } } @@ -136,14 +136,14 @@ impl SavedSwap { data.p2p_privkey = None; } } - }, + } SavedSwap::Taker(swap) => { if let Some(ref mut event) = swap.events.first_mut() { if let TakerSwapEvent::Started(ref mut data) = event.event { data.p2p_privkey = None; } } - }, + } }; } @@ -214,7 +214,7 @@ mod native_impl { FsJsonError::Serializing(serializing) => SavedSwapError::ErrorSerializing(serializing.to_string()), FsJsonError::Deserializing(deserializing) => { SavedSwapError::ErrorDeserializing(deserializing.to_string()) - }, + } } } } @@ -281,11 +281,11 @@ mod native_impl { SavedSwap::Maker(maker) => { let path = stats_maker_swap_file_path(ctx, db_id, &maker.uuid); write_json(self, &path, USE_TMP_FILE).await?; - }, + } SavedSwap::Taker(taker) => { let path = stats_taker_swap_file_path(ctx, db_id, &taker.uuid); write_json(self, &path, USE_TMP_FILE).await?; - }, + } } Ok(()) } @@ -350,20 +350,20 @@ mod wasm_impl { None => { warn!("No MySwapsFiltersTable for {}", swap.uuid()); continue; - }, + } }; filter_record.swap_type = LEGACY_SWAP_TYPE; filter_record.is_finished = swap.is_finished().into(); filters_table.replace_item(filter_id, &filter_record).await?; } - }, + } 1 => break, unsupported => { return MmError::err(SavedSwapError::InternalError(format!( "Unsupported migration {}", unsupported - ))) - }, + ))); + } } migration += 1; migration_table.add_item(&SwapsMigrationTable { migration }).await?; @@ -394,10 +394,10 @@ mod wasm_impl { DbTransactionError::ErrorSerializingItem(_) => SavedSwapError::ErrorSerializing(desc), DbTransactionError::ErrorGettingItems(_) | DbTransactionError::ErrorCountingItems(_) => { SavedSwapError::ErrorLoading(desc) - }, + } DbTransactionError::ErrorUploadingItem(_) | DbTransactionError::ErrorDeletingItems(_) => { SavedSwapError::ErrorSaving(desc) - }, + } } } } @@ -420,8 +420,12 @@ mod wasm_impl { let table = transaction.table::().await?; let saved_swap_json = match table.get_item_by_unique_index("uuid", uuid).await? { - Some((_item_id, SavedSwapTable { saved_swap, .. })) => saved_swap, - None => return Ok(None), + Some((_item_id, SavedSwapTable { saved_swap, .. })) => { + saved_swap + } + None => return { + Ok(None) + }, }; json::from_value(saved_swap_json).map_to_mm(|e| SavedSwapError::ErrorDeserializing(e.to_string())) From 393e825221e059e9925bf03bfa235ce7fdede599 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sun, 12 May 2024 04:49:23 +0100 Subject: [PATCH 108/186] cargo fmt --- mm2src/mm2_db/src/indexed_db/db_lock.rs | 4 ++- mm2src/mm2_main/src/lp_swap/saved_swap.rs | 32 ++++++++++------------- 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index 32c7933f7b..bb9c04a6f4 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -83,7 +83,9 @@ impl ConstructibleDb { /// initializes it if it's required, and returns the locked instance. async fn get_or_initialize_impl(&self, db_id: Option<&str>, is_shared: bool) -> InitDbResult> { let db_id = match is_shared { - true => db_id.map(|id| id.to_owned()).unwrap_or_else(|| self.shared_db_id.to_owned()), + true => db_id + .map(|id| id.to_owned()) + .unwrap_or_else(|| self.shared_db_id.to_owned()), false => db_id.map(|id| id.to_owned()).unwrap_or_else(|| self.db_id.to_owned()), }; diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index c725fcdc11..5bfe299ca3 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -112,11 +112,11 @@ impl SavedSwap { SavedSwap::Maker(saved) => { let (maker_swap, _) = try_s!(MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, saved)); Ok(try_s!(maker_swap.recover_funds().await)) - } + }, SavedSwap::Taker(saved) => { let (taker_swap, _) = try_s!(TakerSwap::load_from_saved(ctx, maker_coin, taker_coin, saved).await); Ok(try_s!(taker_swap.recover_funds().await)) - } + }, } } @@ -136,14 +136,14 @@ impl SavedSwap { data.p2p_privkey = None; } } - } + }, SavedSwap::Taker(swap) => { if let Some(ref mut event) = swap.events.first_mut() { if let TakerSwapEvent::Started(ref mut data) = event.event { data.p2p_privkey = None; } } - } + }, }; } @@ -214,7 +214,7 @@ mod native_impl { FsJsonError::Serializing(serializing) => SavedSwapError::ErrorSerializing(serializing.to_string()), FsJsonError::Deserializing(deserializing) => { SavedSwapError::ErrorDeserializing(deserializing.to_string()) - } + }, } } } @@ -281,11 +281,11 @@ mod native_impl { SavedSwap::Maker(maker) => { let path = stats_maker_swap_file_path(ctx, db_id, &maker.uuid); write_json(self, &path, USE_TMP_FILE).await?; - } + }, SavedSwap::Taker(taker) => { let path = stats_taker_swap_file_path(ctx, db_id, &taker.uuid); write_json(self, &path, USE_TMP_FILE).await?; - } + }, } Ok(()) } @@ -350,20 +350,20 @@ mod wasm_impl { None => { warn!("No MySwapsFiltersTable for {}", swap.uuid()); continue; - } + }, }; filter_record.swap_type = LEGACY_SWAP_TYPE; filter_record.is_finished = swap.is_finished().into(); filters_table.replace_item(filter_id, &filter_record).await?; } - } + }, 1 => break, unsupported => { return MmError::err(SavedSwapError::InternalError(format!( "Unsupported migration {}", unsupported ))); - } + }, } migration += 1; migration_table.add_item(&SwapsMigrationTable { migration }).await?; @@ -394,10 +394,10 @@ mod wasm_impl { DbTransactionError::ErrorSerializingItem(_) => SavedSwapError::ErrorSerializing(desc), DbTransactionError::ErrorGettingItems(_) | DbTransactionError::ErrorCountingItems(_) => { SavedSwapError::ErrorLoading(desc) - } + }, DbTransactionError::ErrorUploadingItem(_) | DbTransactionError::ErrorDeletingItems(_) => { SavedSwapError::ErrorSaving(desc) - } + }, } } } @@ -420,12 +420,8 @@ mod wasm_impl { let table = transaction.table::().await?; let saved_swap_json = match table.get_item_by_unique_index("uuid", uuid).await? { - Some((_item_id, SavedSwapTable { saved_swap, .. })) => { - saved_swap - } - None => return { - Ok(None) - }, + Some((_item_id, SavedSwapTable { saved_swap, .. })) => saved_swap, + None => return { Ok(None) }, }; json::from_value(saved_swap_json).map_to_mm(|e| SavedSwapError::ErrorDeserializing(e.to_string())) From 6e515ae1055169aa8d36c667add016c318fd711e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Sun, 12 May 2024 04:58:27 +0100 Subject: [PATCH 109/186] cargo fmt --- mm2src/mm2_main/src/lp_swap/saved_swap.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index 5bfe299ca3..d973b0808c 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -421,7 +421,7 @@ mod wasm_impl { let saved_swap_json = match table.get_item_by_unique_index("uuid", uuid).await? { Some((_item_id, SavedSwapTable { saved_swap, .. })) => saved_swap, - None => return { Ok(None) }, + None => return Ok(None), }; json::from_value(saved_swap_json).map_to_mm(|e| SavedSwapError::ErrorDeserializing(e.to_string())) From b036c62ee6cf5c7cfd5f8025cbc3fb48294426a9 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 14 May 2024 05:33:45 +0100 Subject: [PATCH 110/186] implement account db_id for tendermint coin --- mm2src/coins/lp_coins.rs | 4 ++-- mm2src/coins/tendermint/tendermint_coin.rs | 11 +++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index daea16fc61..53e37188f6 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -3130,7 +3130,7 @@ pub trait MmCoin: // BCH cash address format has colon after prefix, e.g. bitcoincash: // Colon can't be used in file names on Windows so it should be escaped let my_address = my_address.replace(':', "_"); - ctx.dbdir(None) + ctx.dbdir(self.account_db_id().as_deref()) .join("TRANSACTIONS") .join(format!("{}_{}.json", self.ticker(), my_address)) } @@ -3142,7 +3142,7 @@ pub trait MmCoin: // BCH cash address format has colon after prefix, e.g. bitcoincash: // Colon can't be used in file names on Windows so it should be escaped let my_address = my_address.replace(':', "_"); - ctx.dbdir(None) + ctx.dbdir(self.account_db_id().as_deref()) .join("TRANSACTIONS") .join(format!("{}_{}_migration", self.ticker(), my_address)) } diff --git a/mm2src/coins/tendermint/tendermint_coin.rs b/mm2src/coins/tendermint/tendermint_coin.rs index 9ea6bcdaaa..f8ca50cb76 100644 --- a/mm2src/coins/tendermint/tendermint_coin.rs +++ b/mm2src/coins/tendermint/tendermint_coin.rs @@ -2286,6 +2286,17 @@ impl MmCoin for TendermintCoin { fn on_disabled(&self) -> Result<(), AbortedError> { AbortableSystem::abort_all(&self.abortable_system) } fn on_token_deactivated(&self, _ticker: &str) {} + + fn account_db_id(&self) -> Option { + if let Ok(public_key) = self.activation_policy.public_key() { + let address_hash = dhash160(&public_key.to_bytes()); + let address_rmd160_hex = hex::encode(address_hash.as_slice()); + + return Some(address_rmd160_hex); + }; + + None + } } #[async_trait] From 9dcb662429d118a17196616e988cfc50876426b8 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 14 May 2024 06:56:01 +0100 Subject: [PATCH 111/186] implement account db_id for tendermint coin --- mm2src/coins/hd_wallet/storage/mod.rs | 4 ---- mm2src/coins/lightning/ln_utils.rs | 4 +++- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/mm2src/coins/hd_wallet/storage/mod.rs b/mm2src/coins/hd_wallet/storage/mod.rs index f994b35f06..9304da7234 100644 --- a/mm2src/coins/hd_wallet/storage/mod.rs +++ b/mm2src/coins/hd_wallet/storage/mod.rs @@ -355,10 +355,6 @@ mod tests { let morty_device0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "MORTY".to_owned(), device0_rmd160) .await .expect("!HDWalletCoinStorage::new"); - println!( - "morty_device0_db {}", - hex::encode(morty_device0_db.hd_wallet_rmd160.as_slice()) - ); rick_device0_db .upload_new_account(rick_device0_account0.clone()) diff --git a/mm2src/coins/lightning/ln_utils.rs b/mm2src/coins/lightning/ln_utils.rs index 530337f989..3420bfa30f 100644 --- a/mm2src/coins/lightning/ln_utils.rs +++ b/mm2src/coins/lightning/ln_utils.rs @@ -3,6 +3,7 @@ use crate::lightning::ln_db::LightningDB; use crate::lightning::ln_platform::{get_best_header, ln_best_block_update_loop, update_best_block}; use crate::lightning::ln_sql::SqliteLightningDB; use crate::lightning::ln_storage::{LightningStorage, NodesAddressesMap}; +use crate::lp_coinfind_any; use crate::utxo::rpc_clients::BestBlock as RpcBestBlock; use bitcoin::hash_types::BlockHash; use bitcoin_hashes::{sha256d, Hash}; @@ -69,7 +70,8 @@ pub async fn init_persister( } pub async fn init_db(ctx: &MmArc, ticker: String) -> EnableLightningResult { - // TODO db_id + let ticker_to_mm_coin = lp_coinfind_any(ctx, &ticker) + .map_to_mm(|err| EnableLightningError::InvalidRequest(format!("{ticker} is not activated yet!")))?; let shared = ctx.sqlite_conn_opt(None).or_mm_err(|| { EnableLightningError::DbError("'MmCtx::sqlite_connection' is not found or initialized".to_owned()) })?; From 0ac682d31d1a43548687df02737015dbb37ac187 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 14 May 2024 07:11:38 +0100 Subject: [PATCH 112/186] use coin db_id in init_db and a minor change to account_db_ids loop --- mm2src/coins/lightning/ln_utils.rs | 14 ++++++++++---- mm2src/coins/lp_coins.rs | 3 +-- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/mm2src/coins/lightning/ln_utils.rs b/mm2src/coins/lightning/ln_utils.rs index 3420bfa30f..96666daf2d 100644 --- a/mm2src/coins/lightning/ln_utils.rs +++ b/mm2src/coins/lightning/ln_utils.rs @@ -71,10 +71,16 @@ pub async fn init_persister( pub async fn init_db(ctx: &MmArc, ticker: String) -> EnableLightningResult { let ticker_to_mm_coin = lp_coinfind_any(ctx, &ticker) - .map_to_mm(|err| EnableLightningError::InvalidRequest(format!("{ticker} is not activated yet!")))?; - let shared = ctx.sqlite_conn_opt(None).or_mm_err(|| { - EnableLightningError::DbError("'MmCtx::sqlite_connection' is not found or initialized".to_owned()) - })?; + .await + .map_to_mm(|err| { + EnableLightningError::InvalidRequest(format!("{ticker} not found or is not activated yet! err=({err})")) + })? + .ok_or_else(|| EnableLightningError::InvalidRequest(format!("{ticker} not found or is not activated yet!")))?; + let shared = ctx + .sqlite_conn_opt(ticker_to_mm_coin.inner.account_db_id().as_deref()) + .or_mm_err(|| { + EnableLightningError::DbError("'MmCtx::sqlite_connection' is not found or initialized".to_owned()) + })?; let db = SqliteLightningDB::new(ticker, shared); diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 53e37188f6..ed39def503 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -4497,9 +4497,8 @@ async fn find_unique_account_ids(ctx: &MmArc, active_only: bool) -> Result>(); - for coin in coins.iter() { + for coin in coins.values() { if let Some(account) = coin.inner.account_db_id() { if active_only && coin.is_available() { account_ids.insert(account.clone()); From c409eccfbc22dfcf3e2c7253595b994cb7a0ec65 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 14 May 2024 07:14:17 +0100 Subject: [PATCH 113/186] cleanup test mod in hd_wallet/storage/mod --- mm2src/coins/hd_wallet/storage/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mm2src/coins/hd_wallet/storage/mod.rs b/mm2src/coins/hd_wallet/storage/mod.rs index 9304da7234..5fa96b9a42 100644 --- a/mm2src/coins/hd_wallet/storage/mod.rs +++ b/mm2src/coins/hd_wallet/storage/mod.rs @@ -347,11 +347,9 @@ mod tests { let rick_device0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device0_rmd160) .await .expect("!HDWalletCoinStorage::new"); - let rick_device1_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device1_rmd160) .await .expect("!HDWalletCoinStorage::new"); - let morty_device0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "MORTY".to_owned(), device0_rmd160) .await .expect("!HDWalletCoinStorage::new"); @@ -384,7 +382,7 @@ mod tests { rick_device0_account0.clone(), rick_device0_account1.clone(), rick_device1_account0.clone(), - morty_device0_account0.clone(), + morty_device0_account0.clone() ]); let mut actual = rick_device0_db From 23bb80384f1c585d2e3702e82f503b53f3e0ec1e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 14 May 2024 14:38:07 +0100 Subject: [PATCH 114/186] drop connections as early as possible --- mm2src/mm2_db/src/indexed_db/db_lock.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index bb9c04a6f4..05eb4e0e85 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -82,16 +82,14 @@ impl ConstructibleDb { /// Locks the given mutex and checks if the inner database is initialized already or not, /// initializes it if it's required, and returns the locked instance. async fn get_or_initialize_impl(&self, db_id: Option<&str>, is_shared: bool) -> InitDbResult> { - let db_id = match is_shared { - true => db_id - .map(|id| id.to_owned()) - .unwrap_or_else(|| self.shared_db_id.to_owned()), - false => db_id.map(|id| id.to_owned()).unwrap_or_else(|| self.db_id.to_owned()), - }; + let default_id = if is_shared { &self.shared_db_id } else { &self.db_id }; + let db_id = db_id.unwrap_or(default_id).to_owned(); let mut connections = self.mutexes.lock().await; if let Some(connection) = connections.get_mut(&db_id) { let mut locked_db = connection.clone().lock_owned().await; + // Drop connections lock as soon as possible. + drop(connections); // check and return found connection if already initialized. if locked_db.is_some() { return Ok(unwrap_db_instance(locked_db)); @@ -107,6 +105,8 @@ impl ConstructibleDb { let db = Db::init(DbIdentifier::new::(self.db_namespace, Some(db_id.clone()))).await?; let db = Arc::new(AsyncMutex::new(Some(db))); connections.insert(db_id, db.clone()); + // Drop connections lock as soon as possible. + drop(connections); let locked_db = db.lock_owned().await; Ok(unwrap_db_instance(locked_db)) From 828160912a530d73482df92b4976cc9e63db90e4 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 15 May 2024 09:55:44 +0100 Subject: [PATCH 115/186] refactor db_lock and substitute Mutex for RwLock --- mm2src/mm2_db/src/indexed_db/db_lock.rs | 28 ++++++++++++------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index 05eb4e0e85..ffc4fff5a1 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -2,18 +2,18 @@ use super::{DbIdentifier, DbInstance, InitDbResult}; use mm2_core::{mm_ctx::MmArc, DbNamespaceId}; use std::collections::HashMap; use std::sync::{Arc, Weak}; -use tokio::sync::{Mutex as AsyncMutex, OwnedMappedMutexGuard, OwnedMutexGuard}; +use tokio::sync::{OwnedRwLockMappedWriteGuard, OwnedRwLockWriteGuard, RwLock}; /// The mapped mutex guard. /// This implements `Deref`. -pub type DbLocked = OwnedMappedMutexGuard, Db>; +pub type DbLocked = OwnedRwLockMappedWriteGuard, Db>; pub type SharedDb = Arc>; pub type WeakDb = Weak>; #[allow(clippy::type_complexity)] pub struct ConstructibleDb { /// It's better to use something like [`Constructible`], but it doesn't provide a method to get the inner value by the mutable reference. - mutexes: Arc>>>>>, + locks: Arc>>>>>, db_namespace: DbNamespaceId, // Default mm2 d_id derive from passphrase rmd160 db_id: String, @@ -31,10 +31,10 @@ impl ConstructibleDb { let shared_db_id = hex::encode(ctx.shared_db_id().as_slice()); let db_id = db_id.unwrap_or(&db_id_); - let conns = HashMap::from([(db_id.to_owned(), Arc::new(AsyncMutex::new(None)))]); + let conns = HashMap::from([(db_id.to_owned(), Arc::new(RwLock::new(None)))]); ConstructibleDb { - mutexes: Arc::new(AsyncMutex::new(conns)), + locks: Arc::new(RwLock::new(conns)), db_namespace: ctx.db_namespace, db_id: db_id.to_string(), shared_db_id, @@ -47,9 +47,9 @@ impl ConstructibleDb { pub fn new_shared_db(ctx: &MmArc) -> Self { let db_id = hex::encode(ctx.rmd160().as_slice()); let shared_db_id = hex::encode(ctx.shared_db_id().as_slice()); - let conns = HashMap::from([(shared_db_id.clone(), Arc::new(AsyncMutex::new(None)))]); + let conns = HashMap::from([(shared_db_id.clone(), Arc::new(RwLock::new(None)))]); ConstructibleDb { - mutexes: Arc::new(AsyncMutex::new(conns)), + locks: Arc::new(RwLock::new(conns)), db_namespace: ctx.db_namespace, db_id, shared_db_id, @@ -62,7 +62,7 @@ impl ConstructibleDb { let db_id = hex::encode(ctx.rmd160().as_slice()); let shared_db_id = hex::encode(ctx.shared_db_id().as_slice()); ConstructibleDb { - mutexes: Arc::new(AsyncMutex::new(HashMap::default())), + locks: Arc::new(RwLock::new(HashMap::default())), db_namespace: ctx.db_namespace, db_id, shared_db_id, @@ -85,9 +85,9 @@ impl ConstructibleDb { let default_id = if is_shared { &self.shared_db_id } else { &self.db_id }; let db_id = db_id.unwrap_or(default_id).to_owned(); - let mut connections = self.mutexes.lock().await; + let mut connections = self.locks.write().await; if let Some(connection) = connections.get_mut(&db_id) { - let mut locked_db = connection.clone().lock_owned().await; + let mut locked_db = connection.clone().write_owned().await; // Drop connections lock as soon as possible. drop(connections); // check and return found connection if already initialized. @@ -103,12 +103,12 @@ impl ConstructibleDb { // No connection found so we create a new connection with immediate initialization let db = Db::init(DbIdentifier::new::(self.db_namespace, Some(db_id.clone()))).await?; - let db = Arc::new(AsyncMutex::new(Some(db))); + let db = Arc::new(RwLock::new(Some(db))); connections.insert(db_id, db.clone()); // Drop connections lock as soon as possible. drop(connections); - let locked_db = db.lock_owned().await; + let locked_db = db.write_owned().await; Ok(unwrap_db_instance(locked_db)) } } @@ -116,8 +116,8 @@ impl ConstructibleDb { /// # Panics /// /// This function will `panic!()` if the inner value of the `guard` is `None`. -fn unwrap_db_instance(guard: OwnedMutexGuard>) -> DbLocked { - OwnedMutexGuard::map(guard, |wrapped_db| { +fn unwrap_db_instance(guard: OwnedRwLockWriteGuard>) -> DbLocked { + OwnedRwLockWriteGuard::map(guard, |wrapped_db| { wrapped_db .as_mut() .expect("The locked 'Option' must contain a value") From 115a87352f76ae7249b146140d9615e2fb8f06f1 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 15 May 2024 13:17:00 +0100 Subject: [PATCH 116/186] revert db_lock connection list lock handling to mutex --- mm2src/mm2_db/src/indexed_db/db_lock.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index ffc4fff5a1..ce3f5f64a7 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -2,18 +2,18 @@ use super::{DbIdentifier, DbInstance, InitDbResult}; use mm2_core::{mm_ctx::MmArc, DbNamespaceId}; use std::collections::HashMap; use std::sync::{Arc, Weak}; -use tokio::sync::{OwnedRwLockMappedWriteGuard, OwnedRwLockWriteGuard, RwLock}; +use tokio::sync::{Mutex as AsyncMutex, OwnedMappedMutexGuard, OwnedMutexGuard, RwLock}; /// The mapped mutex guard. /// This implements `Deref`. -pub type DbLocked = OwnedRwLockMappedWriteGuard, Db>; +pub type DbLocked = OwnedMappedMutexGuard, Db>; pub type SharedDb = Arc>; pub type WeakDb = Weak>; #[allow(clippy::type_complexity)] pub struct ConstructibleDb { /// It's better to use something like [`Constructible`], but it doesn't provide a method to get the inner value by the mutable reference. - locks: Arc>>>>>, + locks: Arc>>>>>, db_namespace: DbNamespaceId, // Default mm2 d_id derive from passphrase rmd160 db_id: String, @@ -31,7 +31,7 @@ impl ConstructibleDb { let shared_db_id = hex::encode(ctx.shared_db_id().as_slice()); let db_id = db_id.unwrap_or(&db_id_); - let conns = HashMap::from([(db_id.to_owned(), Arc::new(RwLock::new(None)))]); + let conns = HashMap::from([(db_id.to_owned(), Arc::new(AsyncMutex::new(None)))]); ConstructibleDb { locks: Arc::new(RwLock::new(conns)), @@ -47,7 +47,7 @@ impl ConstructibleDb { pub fn new_shared_db(ctx: &MmArc) -> Self { let db_id = hex::encode(ctx.rmd160().as_slice()); let shared_db_id = hex::encode(ctx.shared_db_id().as_slice()); - let conns = HashMap::from([(shared_db_id.clone(), Arc::new(RwLock::new(None)))]); + let conns = HashMap::from([(shared_db_id.clone(), Arc::new(AsyncMutex::new(None)))]); ConstructibleDb { locks: Arc::new(RwLock::new(conns)), db_namespace: ctx.db_namespace, @@ -87,7 +87,7 @@ impl ConstructibleDb { let mut connections = self.locks.write().await; if let Some(connection) = connections.get_mut(&db_id) { - let mut locked_db = connection.clone().write_owned().await; + let mut locked_db = connection.clone().lock_owned().await; // Drop connections lock as soon as possible. drop(connections); // check and return found connection if already initialized. @@ -103,12 +103,12 @@ impl ConstructibleDb { // No connection found so we create a new connection with immediate initialization let db = Db::init(DbIdentifier::new::(self.db_namespace, Some(db_id.clone()))).await?; - let db = Arc::new(RwLock::new(Some(db))); + let db = Arc::new(AsyncMutex::new(Some(db))); connections.insert(db_id, db.clone()); // Drop connections lock as soon as possible. drop(connections); - let locked_db = db.write_owned().await; + let locked_db = db.lock_owned().await; Ok(unwrap_db_instance(locked_db)) } } @@ -116,8 +116,8 @@ impl ConstructibleDb { /// # Panics /// /// This function will `panic!()` if the inner value of the `guard` is `None`. -fn unwrap_db_instance(guard: OwnedRwLockWriteGuard>) -> DbLocked { - OwnedRwLockWriteGuard::map(guard, |wrapped_db| { +fn unwrap_db_instance(guard: OwnedMutexGuard>) -> DbLocked { + OwnedMutexGuard::map(guard, |wrapped_db| { wrapped_db .as_mut() .expect("The locked 'Option' must contain a value") From 9062beedda253b13559c212f86ee5cc6f98a5b5f Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 16 May 2024 07:08:09 +0100 Subject: [PATCH 117/186] improve my_orders_storage --- mm2src/coins/tendermint/tendermint_coin.rs | 1 + mm2src/mm2_core/src/mm_ctx.rs | 2 +- mm2src/mm2_main/src/database/my_orders.rs | 16 +- mm2src/mm2_main/src/lp_ordermatch.rs | 69 +++--- .../src/lp_ordermatch/my_orders_storage.rs | 209 ++++++++++-------- 5 files changed, 162 insertions(+), 135 deletions(-) diff --git a/mm2src/coins/tendermint/tendermint_coin.rs b/mm2src/coins/tendermint/tendermint_coin.rs index f8ca50cb76..8de2f2894b 100644 --- a/mm2src/coins/tendermint/tendermint_coin.rs +++ b/mm2src/coins/tendermint/tendermint_coin.rs @@ -2292,6 +2292,7 @@ impl MmCoin for TendermintCoin { let address_hash = dhash160(&public_key.to_bytes()); let address_rmd160_hex = hex::encode(address_hash.as_slice()); + println!("address_rmd160_hex: {address_rmd160_hex:?}"); return Some(address_rmd160_hex); }; diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 205bfa76b4..687634e800 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -324,7 +324,7 @@ impl MmCtx { pub fn is_watcher(&self) -> bool { self.conf["is_watcher"].as_bool().unwrap_or_default() } - pub fn use_watchers(&self) -> bool { self.conf["use_watchers"].as_bool().unwrap_or(true) } + pub fn use_watchers(&self) -> bool { self.conf["use_watchers"].as_bool().unwrap_or(false) } pub fn netid(&self) -> u16 { let netid = self.conf["netid"].as_u64().unwrap_or(0); diff --git a/mm2src/mm2_main/src/database/my_orders.rs b/mm2src/mm2_main/src/database/my_orders.rs index 36ca83f40e..0082174c11 100644 --- a/mm2src/mm2_main/src/database/my_orders.rs +++ b/mm2src/mm2_main/src/database/my_orders.rs @@ -24,8 +24,8 @@ pub const CREATE_MY_ORDERS_TABLE: &str = "CREATE TABLE IF NOT EXISTS my_orders ( rel VARCHAR(255) NOT NULL, price DECIMAL NOT NULL, volume DECIMAL NOT NULL, - created_at INTEGER NOT NULL, - last_updated INTEGER NOT NULL, + created_at INTEGER NOT NULL, + last_updated INTEGER NOT NULL, was_taker INTEGER NOT NULL, status VARCHAR(255) NOT NULL );"; @@ -41,7 +41,7 @@ const UPDATE_ORDER_STATUS: &str = "UPDATE my_orders SET last_updated = ?2, statu const SELECT_STATUS_BY_UUID: &str = "SELECT status FROM my_orders WHERE uuid = ?1"; -pub fn insert_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder, db_id: Option<&str>) -> SqlResult<()> { +pub fn insert_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder) -> SqlResult<()> { debug!("Inserting new order {} to the SQLite database", uuid); let params = vec![ uuid.to_string(), @@ -56,13 +56,13 @@ pub fn insert_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder, db_id: Op 0.to_string(), "Created".to_string(), ]; - let conn = ctx.sqlite_connection(db_id); + let conn = ctx.sqlite_connection(order.db_id.as_deref()); let conn = conn.lock().unwrap(); conn.execute(INSERT_MY_ORDER, params_from_iter(params.iter())) .map(|_| ()) } -pub fn insert_taker_order(ctx: &MmArc, uuid: Uuid, order: &TakerOrder, db_id: Option<&str>) -> SqlResult<()> { +pub fn insert_taker_order(ctx: &MmArc, uuid: Uuid, order: &TakerOrder) -> SqlResult<()> { debug!("Inserting new order {} to the SQLite database", uuid); let price = order.request.rel_amount.to_decimal() / order.request.base_amount.to_decimal(); let initial_action = match order.request.action { @@ -82,13 +82,13 @@ pub fn insert_taker_order(ctx: &MmArc, uuid: Uuid, order: &TakerOrder, db_id: Op 0.to_string(), "Created".to_string(), ]; - let conn = ctx.sqlite_connection(db_id); + let conn = ctx.sqlite_connection(order.db_id.as_deref()); let conn = conn.lock().unwrap(); conn.execute(INSERT_MY_ORDER, params_from_iter(params.iter())) .map(|_| ()) } -pub fn update_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder, db_id: Option<&str>) -> SqlResult<()> { +pub fn update_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder) -> SqlResult<()> { debug!("Updating order {} in the SQLite database", uuid); let params = vec![ uuid.to_string(), @@ -97,7 +97,7 @@ pub fn update_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder, db_id: Op order.updated_at.unwrap_or(0).to_string(), "Updated".to_string(), ]; - let conn = ctx.sqlite_connection(db_id); + let conn = ctx.sqlite_connection(order.db_id.as_deref()); let conn = conn.lock().unwrap(); conn.execute(UPDATE_MY_ORDER, params_from_iter(params.iter())) .map(|_| ()) diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 0b4681315e..b3470fc7b3 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -1580,7 +1580,7 @@ pub struct TakerOrder { /// A custom priv key for more privacy to prevent linking orders of the same node between each other /// Commonly used with privacy coins (ARRR, ZCash, etc.) p2p_privkey: Option, - db_id: Option, + pub db_id: Option, } /// Result of match_reserved function @@ -1714,7 +1714,7 @@ pub struct MakerOrder { /// A custom priv key for more privacy to prevent linking orders of the same node between each other /// Commonly used with privacy coins (ARRR, ZCash, etc.) p2p_privkey: Option, - db_id: Option, + pub db_id: Option, } pub struct MakerOrderBuilder<'a> { @@ -3380,8 +3380,7 @@ pub async fn clean_memory_loop(ctx_weak: MmWeak) { /// The function locks the [`OrdermatchContext::my_maker_orders`] and [`OrdermatchContext::my_taker_orders`] mutexes. async fn handle_timed_out_taker_orders(ctx: MmArc, ordermatch_ctx: &OrdermatchContext) { let mut my_taker_orders = ordermatch_ctx.my_taker_orders.lock().await; - // TODO: db_id - let storage = MyOrdersStorage::new(ctx.clone(), None); + let storage = MyOrdersStorage::new(ctx.clone()); let mut my_actual_taker_orders = HashMap::with_capacity(my_taker_orders.len()); for (uuid, order) in my_taker_orders.drain() { @@ -3416,7 +3415,7 @@ async fn handle_timed_out_taker_orders(ctx: MmArc, ordermatch_ctx: &OrdermatchCo .error_log_with_msg("!save_new_active_maker_order"); if maker_order.save_in_history { storage - .update_was_taker_in_filtering_history(uuid) + .update_was_taker_in_filtering_history(uuid, maker_order.db_id().as_deref()) .await .error_log_with_msg("!update_was_taker_in_filtering_history"); } @@ -3471,8 +3470,7 @@ async fn check_balance_for_maker_orders(ctx: MmArc, ordermatch_ctx: &OrdermatchC /// The function locks the [`OrdermatchContext::my_maker_orders`] mutex. async fn handle_timed_out_maker_matches(ctx: MmArc, ordermatch_ctx: &OrdermatchContext) { let now = now_ms(); - // TODO: db_id - let storage = MyOrdersStorage::new(ctx.clone(), None); + let storage = MyOrdersStorage::new(ctx.clone()); let my_maker_orders = ordermatch_ctx.maker_orders_ctx.lock().orders.clone(); for (_, order) in my_maker_orders.iter() { @@ -3586,7 +3584,7 @@ async fn process_maker_reserved(ctx: MmArc, from_pubkey: H256Json, reserved_msg: my_order .matches .insert(taker_match.reserved.maker_order_uuid, taker_match); - MyOrdersStorage::new(ctx, base_coin.account_db_id()) + MyOrdersStorage::new(ctx) .update_active_taker_order(my_order) .await .error_log_with_msg("!update_active_taker_order"); @@ -3663,8 +3661,7 @@ async fn process_taker_request(ctx: MmArc, from_pubkey: H256Json, taker_request: } let ordermatch_ctx = OrdermatchContext::from_ctx(&ctx).unwrap(); - // TODO: db_id - let storage = MyOrdersStorage::new(ctx.clone(), None); + let storage = MyOrdersStorage::new(ctx.clone()); let mut my_orders = ordermatch_ctx.maker_orders_ctx.lock().orders.clone(); let filtered = my_orders .iter_mut() @@ -3813,8 +3810,7 @@ async fn process_taker_connect(ctx: MmArc, sender_pubkey: PublicKey, connect_msg updated_msg.with_new_max_volume(my_order.available_amount().into()); maker_order_updated_p2p_notify(ctx.clone(), topic, updated_msg, my_order.p2p_keypair()); } - // TODO: db_id - MyOrdersStorage::new(ctx, None) + MyOrdersStorage::new(ctx) .update_active_maker_order(&my_order) .await .error_log_with_msg("!update_active_maker_order"); @@ -4965,10 +4961,10 @@ pub async fn order_status(ctx: MmArc, req: Json) -> Result>, St .map_err(|e| ERRL!("{}", e)); } - let storage = MyOrdersStorage::new(ctx.clone(), db_id.clone()); + let storage = MyOrdersStorage::new(ctx.clone()); if let (Ok(order), Ok(cancellation_reason)) = ( - storage.load_order_from_history(req.uuid).await, - &storage.select_order_status(req.uuid).await, + storage.load_order_from_history(req.uuid, db_id.as_deref()).await, + &storage.select_order_status(req.uuid, db_id.as_deref()).await, ) { info!("Order with UUID=({})", req.uuid); let res = json!(OrderForRpcWithCancellationReason { @@ -5040,6 +5036,13 @@ impl Order { Order::Taker(taker) => taker.request.uuid, } } + + pub fn db_id(&self) -> Option { + match self { + Order::Maker(maker) => maker.db_id(), + Order::Taker(taker) => taker.db_id(), + } + } } #[derive(Serialize)] @@ -5079,8 +5082,8 @@ pub async fn orders_history_by_filter(ctx: MmArc, req: Json) -> Result Result Result>, String> { } #[cfg(not(target_arch = "wasm32"))] -pub fn my_maker_orders_dir(ctx: &MmArc) -> PathBuf { ctx.dbdir(None).join("ORDERS").join("MY").join("MAKER") } +pub fn my_maker_orders_dir(ctx: &MmArc, db_id: Option<&str>) -> PathBuf { + ctx.dbdir(db_id).join("ORDERS").join("MY").join("MAKER") +} #[cfg(not(target_arch = "wasm32"))] -fn my_taker_orders_dir(ctx: &MmArc) -> PathBuf { ctx.dbdir(None).join("ORDERS").join("MY").join("TAKER") } +fn my_taker_orders_dir(ctx: &MmArc, db_id: Option<&str>) -> PathBuf { + ctx.dbdir(db_id).join("ORDERS").join("MY").join("TAKER") +} #[cfg(not(target_arch = "wasm32"))] -fn my_orders_history_dir(ctx: &MmArc) -> PathBuf { ctx.dbdir(None).join("ORDERS").join("MY").join("HISTORY") } +fn my_orders_history_dir(ctx: &MmArc, db_id: Option<&str>) -> PathBuf { + ctx.dbdir(db_id).join("ORDERS").join("MY").join("HISTORY") +} #[cfg(not(target_arch = "wasm32"))] -pub fn my_maker_order_file_path(ctx: &MmArc, uuid: &Uuid) -> PathBuf { - my_maker_orders_dir(ctx).join(format!("{}.json", uuid)) +pub fn my_maker_order_file_path(ctx: &MmArc, uuid: &Uuid, db_id: Option<&str>) -> PathBuf { + my_maker_orders_dir(ctx, db_id).join(format!("{}.json", uuid)) } #[cfg(not(target_arch = "wasm32"))] -fn my_taker_order_file_path(ctx: &MmArc, uuid: &Uuid) -> PathBuf { - my_taker_orders_dir(ctx).join(format!("{}.json", uuid)) +fn my_taker_order_file_path(ctx: &MmArc, uuid: &Uuid, db_id: Option<&str>) -> PathBuf { + my_taker_orders_dir(ctx, db_id).join(format!("{}.json", uuid)) } #[cfg(not(target_arch = "wasm32"))] -fn my_order_history_file_path(ctx: &MmArc, uuid: &Uuid) -> PathBuf { - my_orders_history_dir(ctx).join(format!("{}.json", uuid)) +fn my_order_history_file_path(ctx: &MmArc, uuid: &Uuid, db_id: Option<&str>) -> PathBuf { + my_orders_history_dir(ctx, db_id).join(format!("{}.json", uuid)) } #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] @@ -5410,9 +5419,9 @@ pub struct HistoricalOrder { pub async fn orders_kick_start(ctx: &MmArc) -> Result, String> { let ordermatch_ctx = try_s!(OrdermatchContext::from_ctx(ctx)); let db_id: Option = None; // TODO - let storage = MyOrdersStorage::new(ctx.clone(), db_id); - let saved_maker_orders = try_s!(storage.load_active_maker_orders().await); - let saved_taker_orders = try_s!(storage.load_active_taker_orders().await); + let storage = MyOrdersStorage::new(ctx.clone()); + let saved_maker_orders = try_s!(storage.load_active_maker_orders(db_id.as_deref()).await); + let saved_taker_orders = try_s!(storage.load_active_taker_orders(db_id.as_deref()).await); let mut coins = HashSet::with_capacity((saved_maker_orders.len() * 2) + (saved_taker_orders.len() * 2)); { diff --git a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs index 6fbacbe123..f4ae0fc6b5 100644 --- a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs +++ b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs @@ -36,7 +36,7 @@ pub enum MyOrdersError { } pub async fn save_my_new_maker_order(ctx: MmArc, order: &MakerOrder) -> MyOrdersResult<()> { - let storage = MyOrdersStorage::new(ctx, order.db_id()); + let storage = MyOrdersStorage::new(ctx); storage .save_new_active_maker_order(order) .await @@ -49,7 +49,7 @@ pub async fn save_my_new_maker_order(ctx: MmArc, order: &MakerOrder) -> MyOrders } pub async fn save_my_new_taker_order(ctx: MmArc, order: &TakerOrder) -> MyOrdersResult<()> { - let storage = MyOrdersStorage::new(ctx, order.db_id()); + let storage = MyOrdersStorage::new(ctx); storage .save_new_active_taker_order(order) .await @@ -62,8 +62,7 @@ pub async fn save_my_new_taker_order(ctx: MmArc, order: &TakerOrder) -> MyOrders } pub async fn save_maker_order_on_update(ctx: MmArc, order: &MakerOrder) -> MyOrdersResult<()> { - let db_id = order.db_id(); - let storage = MyOrdersStorage::new(ctx, db_id); + let storage = MyOrdersStorage::new(ctx); storage.update_active_maker_order(order).await?; if order.save_in_history { @@ -77,10 +76,11 @@ pub fn delete_my_taker_order(ctx: MmArc, order: TakerOrder, reason: TakerOrderCa let fut = async move { let uuid = order.request.uuid; let save_in_history = order.save_in_history; + let db_id = order.db_id(); - let storage = MyOrdersStorage::new(ctx, order.db_id()); + let storage = MyOrdersStorage::new(ctx); storage - .delete_active_taker_order(uuid) + .delete_active_taker_order(uuid, db_id.as_deref()) .await .error_log_with_msg("!delete_active_taker_order"); @@ -98,7 +98,7 @@ pub fn delete_my_taker_order(ctx: MmArc, order: TakerOrder, reason: TakerOrderCa if save_in_history { storage - .update_order_status_in_filtering_history(uuid, reason.to_string()) + .update_order_status_in_filtering_history(uuid, reason.to_string(), db_id.as_deref()) .await .error_log_with_msg("!update_order_status_in_filtering_history"); } @@ -114,14 +114,17 @@ pub fn delete_my_maker_order(ctx: MmArc, order: MakerOrder, reason: MakerOrderCa let uuid = order_to_save.uuid; let save_in_history = order_to_save.save_in_history; - let storage = MyOrdersStorage::new(ctx, order_to_save.db_id()); + let storage = MyOrdersStorage::new(ctx); if order_to_save.was_updated() { - if let Ok(order_from_file) = storage.load_active_maker_order(order_to_save.uuid).await { + if let Ok(order_from_file) = storage + .load_active_maker_order(order_to_save.uuid, order_to_save.db_id().as_deref()) + .await + { order_to_save = order_from_file; } } storage - .delete_active_maker_order(uuid) + .delete_active_maker_order(uuid, order_to_save.db_id().as_deref()) .await .error_log_with_msg("!delete_active_maker_order"); @@ -131,7 +134,7 @@ pub fn delete_my_maker_order(ctx: MmArc, order: MakerOrder, reason: MakerOrderCa .await .error_log_with_msg("!save_order_in_history"); storage - .update_order_status_in_filtering_history(uuid, reason.to_string()) + .update_order_status_in_filtering_history(uuid, reason.to_string(), order_to_save.db_id().as_deref()) .await .error_log_with_msg("!update_order_status_in_filtering_history"); } @@ -142,11 +145,11 @@ pub fn delete_my_maker_order(ctx: MmArc, order: MakerOrder, reason: MakerOrderCa #[async_trait] pub trait MyActiveOrders { - async fn load_active_maker_orders(&self) -> MyOrdersResult>; + async fn load_active_maker_orders(&self, db_id: Option<&str>) -> MyOrdersResult>; - async fn load_active_maker_order(&self, uuid: Uuid) -> MyOrdersResult; + async fn load_active_maker_order(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult; - async fn load_active_taker_orders(&self) -> MyOrdersResult>; + async fn load_active_taker_orders(&self, db_id: Option<&str>) -> MyOrdersResult>; async fn save_new_active_order(&self, order: &Order) -> MyOrdersResult<()> { match order { @@ -159,9 +162,9 @@ pub trait MyActiveOrders { async fn save_new_active_taker_order(&self, order: &TakerOrder) -> MyOrdersResult<()>; - async fn delete_active_maker_order(&self, uuid: Uuid) -> MyOrdersResult<()>; + async fn delete_active_maker_order(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult<()>; - async fn delete_active_taker_order(&self, uuid: Uuid) -> MyOrdersResult<()>; + async fn delete_active_taker_order(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult<()>; async fn update_active_maker_order(&self, order: &MakerOrder) -> MyOrdersResult<()>; @@ -172,7 +175,7 @@ pub trait MyActiveOrders { pub trait MyOrdersHistory { async fn save_order_in_history(&self, order: &Order) -> MyOrdersResult<()>; - async fn load_order_from_history(&self, uuid: Uuid) -> MyOrdersResult; + async fn load_order_from_history(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult; } #[async_trait] @@ -181,9 +184,10 @@ pub trait MyOrdersFilteringHistory { &self, filter: &MyOrdersFilter, paging_options: Option<&PagingOptions>, + db_id: Option<&str>, ) -> MyOrdersResult; - async fn select_order_status(&self, uuid: Uuid) -> MyOrdersResult; + async fn select_order_status(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult; async fn save_order_in_filtering_history(&self, order: &Order) -> MyOrdersResult<()> { match order { @@ -198,9 +202,14 @@ pub trait MyOrdersFilteringHistory { async fn update_maker_order_in_filtering_history(&self, order: &MakerOrder) -> MyOrdersResult<()>; - async fn update_order_status_in_filtering_history(&self, uuid: Uuid, status: String) -> MyOrdersResult<()>; + async fn update_order_status_in_filtering_history( + &self, + uuid: Uuid, + status: String, + db_id: Option<&str>, + ) -> MyOrdersResult<()>; - async fn update_was_taker_in_filtering_history(&self, uuid: Uuid) -> MyOrdersResult<()>; + async fn update_was_taker_in_filtering_history(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult<()>; } #[cfg(not(target_arch = "wasm32"))] @@ -231,54 +240,53 @@ mod native_impl { #[derive(Clone)] pub struct MyOrdersStorage { ctx: MmArc, - pub db_id: Option, } impl MyOrdersStorage { - pub fn new(ctx: MmArc, db_id: Option) -> MyOrdersStorage { MyOrdersStorage { ctx, db_id } } + pub fn new(ctx: MmArc) -> MyOrdersStorage { MyOrdersStorage { ctx } } } #[async_trait] impl MyActiveOrders for MyOrdersStorage { - async fn load_active_maker_orders(&self) -> MyOrdersResult> { - let dir_path = my_maker_orders_dir(&self.ctx); + async fn load_active_maker_orders(&self, db_id: Option<&str>) -> MyOrdersResult> { + let dir_path = my_maker_orders_dir(&self.ctx, db_id); Ok(read_dir_json(&dir_path).await?) } - async fn load_active_maker_order(&self, uuid: Uuid) -> MyOrdersResult { - let path = my_maker_order_file_path(&self.ctx, &uuid); + async fn load_active_maker_order(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult { + let path = my_maker_order_file_path(&self.ctx, &uuid, db_id); read_json(&path) .await? .or_mm_err(|| MyOrdersError::NoSuchOrder { uuid }) } - async fn load_active_taker_orders(&self) -> MyOrdersResult> { - let dir_path = my_taker_orders_dir(&self.ctx); + async fn load_active_taker_orders(&self, db_id: Option<&str>) -> MyOrdersResult> { + let dir_path = my_taker_orders_dir(&self.ctx, db_id); Ok(read_dir_json(&dir_path).await?) } async fn save_new_active_maker_order(&self, order: &MakerOrder) -> MyOrdersResult<()> { - let path = my_maker_order_file_path(&self.ctx, &order.uuid); + let path = my_maker_order_file_path(&self.ctx, &order.uuid, order.db_id().as_deref()); write_json(order, &path, USE_TMP_FILE).await?; Ok(()) } async fn save_new_active_taker_order(&self, order: &TakerOrder) -> MyOrdersResult<()> { - let path = my_taker_order_file_path(&self.ctx, &order.request.uuid); + let path = my_taker_order_file_path(&self.ctx, &order.request.uuid, order.db_id().as_deref()); write_json(order, &path, USE_TMP_FILE).await?; Ok(()) } - async fn delete_active_maker_order(&self, uuid: Uuid) -> MyOrdersResult<()> { - let path = my_maker_order_file_path(&self.ctx, &uuid); + async fn delete_active_maker_order(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult<()> { + let path = my_maker_order_file_path(&self.ctx, &uuid, db_id); remove_file_async(&path) .await .mm_err(|e| MyOrdersError::ErrorSaving(e.to_string()))?; Ok(()) } - async fn delete_active_taker_order(&self, uuid: Uuid) -> MyOrdersResult<()> { - let path = my_taker_order_file_path(&self.ctx, &uuid); + async fn delete_active_taker_order(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult<()> { + let path = my_taker_order_file_path(&self.ctx, &uuid, db_id); remove_file_async(&path) .await .mm_err(|e| MyOrdersError::ErrorSaving(e.to_string()))?; @@ -297,13 +305,13 @@ mod native_impl { #[async_trait] impl MyOrdersHistory for MyOrdersStorage { async fn save_order_in_history(&self, order: &Order) -> MyOrdersResult<()> { - let path = my_order_history_file_path(&self.ctx, &order.uuid()); + let path = my_order_history_file_path(&self.ctx, &order.uuid(), order.db_id().as_deref()); write_json(order, &path, USE_TMP_FILE).await?; Ok(()) } - async fn load_order_from_history(&self, uuid: Uuid) -> MyOrdersResult { - let path = my_order_history_file_path(&self.ctx, &uuid); + async fn load_order_from_history(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult { + let path = my_order_history_file_path(&self.ctx, &uuid, db_id); read_json(&path) .await? .or_mm_err(|| MyOrdersError::NoSuchOrder { uuid }) @@ -316,42 +324,44 @@ mod native_impl { &self, filter: &MyOrdersFilter, paging_options: Option<&PagingOptions>, + db_id: Option<&str>, ) -> MyOrdersResult { - let conn = self.ctx.sqlite_connection(self.db_id.as_deref()); + let conn = self.ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); select_orders_by_filter(&conn, filter, paging_options) .map_to_mm(|e| MyOrdersError::ErrorLoading(e.to_string())) } - async fn select_order_status(&self, uuid: Uuid) -> MyOrdersResult { - let conn = self.ctx.sqlite_connection(self.db_id.as_deref()); + async fn select_order_status(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult { + let conn = self.ctx.sqlite_connection(db_id); let conn = conn.lock().unwrap(); select_status_by_uuid(&conn, &uuid).map_to_mm(|e| MyOrdersError::ErrorLoading(e.to_string())) } async fn save_maker_order_in_filtering_history(&self, order: &MakerOrder) -> MyOrdersResult<()> { - insert_maker_order(&self.ctx, order.uuid, order, self.db_id.as_deref()) - .map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) + insert_maker_order(&self.ctx, order.uuid, order).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) } async fn save_taker_order_in_filtering_history(&self, order: &TakerOrder) -> MyOrdersResult<()> { - insert_taker_order(&self.ctx, order.request.uuid, order, self.db_id.as_deref()) + insert_taker_order(&self.ctx, order.request.uuid, order) .map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) } async fn update_maker_order_in_filtering_history(&self, order: &MakerOrder) -> MyOrdersResult<()> { - update_maker_order(&self.ctx, order.uuid, order, self.db_id.as_deref()) - .map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) + update_maker_order(&self.ctx, order.uuid, order).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) } - async fn update_order_status_in_filtering_history(&self, uuid: Uuid, status: String) -> MyOrdersResult<()> { - update_order_status(&self.ctx, uuid, status, self.db_id.as_deref()) - .map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) + async fn update_order_status_in_filtering_history( + &self, + uuid: Uuid, + status: String, + db_id: Option<&str>, + ) -> MyOrdersResult<()> { + update_order_status(&self.ctx, uuid, status, db_id).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) } - async fn update_was_taker_in_filtering_history(&self, uuid: Uuid) -> MyOrdersResult<()> { - update_was_taker(&self.ctx, uuid, self.db_id.as_deref()) - .map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) + async fn update_was_taker_in_filtering_history(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult<()> { + update_was_taker(&self.ctx, uuid, db_id).map_to_mm(|e| MyOrdersError::ErrorSaving(e.to_string())) } } } @@ -399,22 +409,20 @@ mod wasm_impl { #[derive(Clone)] pub struct MyOrdersStorage { ctx: Arc, - db_id: Option, } impl MyOrdersStorage { - pub fn new(ctx: MmArc, db_id: Option) -> MyOrdersStorage { + pub fn new(ctx: MmArc) -> MyOrdersStorage { MyOrdersStorage { ctx: OrdermatchContext::from_ctx(&ctx).expect("!OrdermatchContext::from_ctx"), - db_id, } } } #[async_trait] impl MyActiveOrders for MyOrdersStorage { - async fn load_active_maker_orders(&self) -> MyOrdersResult> { - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + async fn load_active_maker_orders(&self, db_id: Option<&str>) -> MyOrdersResult> { + let db = self.ctx.ordermatch_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; let maker_orders = table.get_all_items().await?; @@ -424,8 +432,8 @@ mod wasm_impl { .collect()) } - async fn load_active_maker_order(&self, uuid: Uuid) -> MyOrdersResult { - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + async fn load_active_maker_order(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult { + let db = self.ctx.ordermatch_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -436,8 +444,8 @@ mod wasm_impl { .or_mm_err(|| MyOrdersError::NoSuchOrder { uuid }) } - async fn load_active_taker_orders(&self) -> MyOrdersResult> { - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + async fn load_active_taker_orders(&self, db_id: Option<&str>) -> MyOrdersResult> { + let db = self.ctx.ordermatch_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; let maker_orders = table.get_all_items().await?; @@ -448,7 +456,7 @@ mod wasm_impl { } async fn save_new_active_maker_order(&self, order: &MakerOrder) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + let db = self.ctx.ordermatch_db(order.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -461,7 +469,7 @@ mod wasm_impl { } async fn save_new_active_taker_order(&self, order: &TakerOrder) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + let db = self.ctx.ordermatch_db(order.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -473,16 +481,16 @@ mod wasm_impl { Ok(()) } - async fn delete_active_maker_order(&self, uuid: Uuid) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + async fn delete_active_maker_order(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult<()> { + let db = self.ctx.ordermatch_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; table.delete_item_by_unique_index("uuid", uuid).await?; Ok(()) } - async fn delete_active_taker_order(&self, uuid: Uuid) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + async fn delete_active_taker_order(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult<()> { + let db = self.ctx.ordermatch_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; table.delete_item_by_unique_index("uuid", uuid).await?; @@ -490,7 +498,7 @@ mod wasm_impl { } async fn update_active_maker_order(&self, order: &MakerOrder) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + let db = self.ctx.ordermatch_db(order.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -503,7 +511,7 @@ mod wasm_impl { } async fn update_active_taker_order(&self, order: &TakerOrder) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + let db = self.ctx.ordermatch_db(order.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -521,7 +529,7 @@ mod wasm_impl { #[async_trait] impl MyOrdersHistory for MyOrdersStorage { async fn save_order_in_history(&self, order: &Order) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + let db = self.ctx.ordermatch_db(order.db_id().as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -533,8 +541,8 @@ mod wasm_impl { Ok(()) } - async fn load_order_from_history(&self, uuid: Uuid) -> MyOrdersResult { - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + async fn load_order_from_history(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult { + let db = self.ctx.ordermatch_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -552,6 +560,7 @@ mod wasm_impl { &self, _filter: &MyOrdersFilter, _paging_options: Option<&PagingOptions>, + _db_id: Option<&str>, ) -> MyOrdersResult { warn!("'select_orders_by_filter' not supported in WASM yet"); MmError::err(MyOrdersError::InternalError( @@ -559,8 +568,8 @@ mod wasm_impl { )) } - async fn select_order_status(&self, uuid: Uuid) -> MyOrdersResult { - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + async fn select_order_status(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult { + let db = self.ctx.ordermatch_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -574,7 +583,7 @@ mod wasm_impl { async fn save_maker_order_in_filtering_history(&self, order: &MakerOrder) -> MyOrdersResult<()> { let item = maker_order_to_filtering_history_item(order, "Created".to_owned(), false)?; - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + let db = self.ctx.ordermatch_db(order.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; table.add_item(&item).await?; @@ -584,7 +593,7 @@ mod wasm_impl { async fn save_taker_order_in_filtering_history(&self, order: &TakerOrder) -> MyOrdersResult<()> { let item = taker_order_to_filtering_history_item(order, "Created".to_owned())?; - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + let db = self.ctx.ordermatch_db(order.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; table.add_item(&item).await?; @@ -592,7 +601,7 @@ mod wasm_impl { } async fn update_maker_order_in_filtering_history(&self, order: &MakerOrder) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + let db = self.ctx.ordermatch_db(order.db_id.as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; // get the previous item to see if the order was taker @@ -606,8 +615,13 @@ mod wasm_impl { Ok(()) } - async fn update_order_status_in_filtering_history(&self, uuid: Uuid, status: String) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + async fn update_order_status_in_filtering_history( + &self, + uuid: Uuid, + status: String, + db_id: Option<&str>, + ) -> MyOrdersResult<()> { + let db = self.ctx.ordermatch_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -621,8 +635,8 @@ mod wasm_impl { Ok(()) } - async fn update_was_taker_in_filtering_history(&self, uuid: Uuid) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(self.db_id.as_deref()).await?; + async fn update_was_taker_in_filtering_history(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult<()> { + let db = self.ctx.ordermatch_db(db_id).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -786,7 +800,7 @@ mod tests { #[wasm_bindgen_test] async fn test_delete_my_maker_order() { let ctx = MmCtxBuilder::new().with_test_db_namespace().into_mm_arc(); - let storage = MyOrdersStorage::new(ctx.clone(), None); + let storage = MyOrdersStorage::new(ctx.clone()); let maker1 = maker_order_for_test(); @@ -804,12 +818,12 @@ mod tests { .unwrap(); let actual_active_maker_orders = storage - .load_active_taker_orders() + .load_active_taker_orders(None) .await .expect("!MyOrdersStorage::load_active_taker_orders"); assert!(actual_active_maker_orders.is_empty()); let actual_history_order = storage - .load_order_from_history(maker1.uuid) + .load_order_from_history(maker1.uuid, None) .await .expect("!MyOrdersStorage::load_order_from_history"); assert_eq!(actual_history_order, Order::Maker(maker1.clone())); @@ -822,7 +836,7 @@ mod tests { #[wasm_bindgen_test] async fn test_delete_my_taker_order() { let ctx = MmCtxBuilder::new().with_test_db_namespace().into_mm_arc(); - let storage = MyOrdersStorage::new(ctx.clone(), None); + let storage = MyOrdersStorage::new(ctx.clone()); let taker1 = taker_order_for_test(); let taker2 = TakerOrder { @@ -841,12 +855,12 @@ mod tests { .unwrap(); let actual_active_taker_orders = storage - .load_active_taker_orders() + .load_active_taker_orders(None) .await .expect("!MyOrdersStorage::load_active_taker_orders"); assert!(actual_active_taker_orders.is_empty()); let actual_history_order = storage - .load_order_from_history(taker1.request.uuid) + .load_order_from_history(taker1.request.uuid, None) .await .expect("!MyOrdersStorage::load_order_from_history"); assert_eq!(actual_history_order, Order::Taker(taker1.clone())); @@ -865,11 +879,14 @@ mod tests { .unwrap(); let actual_active_taker_orders = storage - .load_active_taker_orders() + .load_active_taker_orders(None) .await .expect("!MyOrdersStorage::load_active_taker_orders"); assert!(actual_active_taker_orders.is_empty()); - let error = storage.load_order_from_history(taker2.request.uuid).await.expect_err( + let error = storage + .load_order_from_history(taker2.request.uuid, None) + .await + .expect_err( "!MyOrdersStorage::load_order_from_history should have failed with the 'MyOrdersError::NoSuchOrder' error", ); assert_eq!(error.into_inner(), MyOrdersError::NoSuchOrder { @@ -880,7 +897,7 @@ mod tests { #[wasm_bindgen_test] async fn test_load_active_maker_taker_orders() { let ctx = MmCtxBuilder::new().with_test_db_namespace().into_mm_arc(); - let storage = MyOrdersStorage::new(ctx.clone(), None); + let storage = MyOrdersStorage::new(ctx.clone()); let maker1 = maker_order_for_test(); let mut maker2 = MakerOrder { @@ -904,7 +921,7 @@ mod tests { .expect("!MyOrdersStorage::update_active_maker_order"); let actual_maker_orders: Vec<_> = storage - .load_active_maker_orders() + .load_active_maker_orders(None) .await .expect("!MyOrdersStorage::load_active_maker_orders") .into_iter() @@ -917,7 +934,7 @@ mod tests { assert_eq!(actual_maker_orders, expected_maker_orders); let actual_taker_orders: Vec<_> = storage - .load_active_taker_orders() + .load_active_taker_orders(None) .await .expect("!MyOrdersStorage::load_active_taker_orders"); let expected_taker_orders = vec![taker1]; @@ -927,7 +944,7 @@ mod tests { #[wasm_bindgen_test] async fn test_filtering_history() { let ctx = MmCtxBuilder::new().with_test_db_namespace().into_mm_arc(); - let storage = MyOrdersStorage::new(ctx.clone(), None); + let storage = MyOrdersStorage::new(ctx.clone()); let maker1 = maker_order_for_test(); let mut maker2 = MakerOrder { @@ -951,7 +968,7 @@ mod tests { .expect("!MyOrdersStorage::save_taker_order_in_filtering_history"); storage - .update_order_status_in_filtering_history(taker1.request.uuid, "MyCustomStatus".to_owned()) + .update_order_status_in_filtering_history(taker1.request.uuid, "MyCustomStatus".to_owned(), None) .await .expect("!MyOrdersStorage::update_order_status_in_filtering_history"); @@ -962,7 +979,7 @@ mod tests { .expect("MyOrdersStorage::update_maker_order_in_filtering_history"); storage - .update_was_taker_in_filtering_history(maker1.uuid) + .update_was_taker_in_filtering_history(maker1.uuid, None) .await .expect("MyOrdersStorage::update_was_taker_in_filtering_history"); @@ -984,14 +1001,14 @@ mod tests { assert_eq!(actual_items, expected_items); let taker1_status = storage - .select_order_status(taker1.request.uuid) + .select_order_status(taker1.request.uuid, None) .await .expect("!MyOrdersStorage::select_order_status"); assert_eq!(taker1_status, "MyCustomStatus"); let unknown_uuid = new_uuid(); let err = storage - .select_order_status(unknown_uuid) + .select_order_status(unknown_uuid, None) .await .expect_err("!MyOrdersStorage::select_order_status should have failed"); assert_eq!(err.into_inner(), MyOrdersError::NoSuchOrder { uuid: unknown_uuid }); From b370bec74069a71636c8695fc6c284f82fa3dce9 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 17 May 2024 00:38:15 +0100 Subject: [PATCH 118/186] implement DbMigrationWatcher --- Cargo.toml | 2 +- mm2src/coins/tendermint/tendermint_coin.rs | 1 - .../src/tendermint_with_assets_activation.rs | 17 ++++++ mm2src/crypto/src/lib.rs | 2 +- mm2src/mm2_core/src/mm_ctx.rs | 19 +++++- mm2src/mm2_core/src/sql_connection_pool.rs | 59 ++++++++++++++++++- mm2src/mm2_main/Cargo.toml | 27 ++++++--- mm2src/mm2_main/src/database.rs | 20 +++++-- mm2src/mm2_main/src/lp_native_dex.rs | 51 +++++++++++++--- 9 files changed, 173 insertions(+), 25 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b87feeb988..34dd176a09 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,4 +70,4 @@ debug = 1 debug-assertions = false panic = 'unwind' incremental = true -codegen-units = 256 \ No newline at end of file +codegen-units = 256 diff --git a/mm2src/coins/tendermint/tendermint_coin.rs b/mm2src/coins/tendermint/tendermint_coin.rs index 8de2f2894b..f8ca50cb76 100644 --- a/mm2src/coins/tendermint/tendermint_coin.rs +++ b/mm2src/coins/tendermint/tendermint_coin.rs @@ -2292,7 +2292,6 @@ impl MmCoin for TendermintCoin { let address_hash = dhash160(&public_key.to_bytes()); let address_rmd160_hex = hex::encode(address_hash.as_slice()); - println!("address_rmd160_hex: {address_rmd160_hex:?}"); return Some(address_rmd160_hex); }; diff --git a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs index d413175364..2b1d0a09af 100644 --- a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs +++ b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs @@ -15,9 +15,12 @@ use coins::tendermint::{tendermint_priv_key_policy, TendermintActivationPolicy, TendermintConf, TendermintInitError, TendermintInitErrorKind, TendermintProtocolInfo, TendermintPublicKey, TendermintToken, TendermintTokenActivationParams, TendermintTokenInitError, TendermintTokenProtocolInfo}; +#[cfg(not(target_arch = "wasm32"))] use coins::utxo::dhash160; use coins::{CoinBalance, CoinProtocol, MarketCoinOps, MmCoin, MmCoinEnum, PrivKeyBuildPolicy}; use common::executor::{AbortSettings, SpawnAbortable}; +#[cfg(not(target_arch = "wasm32"))] use common::log::debug; use common::{true_f, Future01CompatExt}; +#[cfg(not(target_arch = "wasm32"))] use futures::SinkExt; use mm2_core::mm_ctx::MmArc; use mm2_err_handle::prelude::*; use mm2_event_stream::behaviour::{EventBehaviour, EventInitStatus}; @@ -243,6 +246,20 @@ impl PlatformCoinWithTokensActivationOps for TendermintCoin { }); } + #[cfg(not(target_arch = "wasm32"))] + { + // Send migration request + let address_hash = dhash160(&pubkey.to_bytes()); + let address_rmd160_hex = hex::encode(address_hash.as_slice()); + let db_migration_sender = ctx.db_migration_watcher().await.get_sender().await; + let mut db_migration_sender = db_migration_sender.lock().await; + if db_migration_sender.send(address_rmd160_hex.clone()).await.is_ok() { + debug!("Sending migration request for db_id: {address_rmd160_hex:?}"); + }; + + // TODO: handle for shared_db_id. + } + TendermintActivationPolicy::with_public_key(pubkey) } else { let private_key_policy = diff --git a/mm2src/crypto/src/lib.rs b/mm2src/crypto/src/lib.rs index e2651a54ea..0968ea36a3 100644 --- a/mm2src/crypto/src/lib.rs +++ b/mm2src/crypto/src/lib.rs @@ -12,7 +12,7 @@ pub mod hw_rpc_task; mod key_derivation; pub mod mnemonic; pub mod privkey; -mod shared_db_id; +pub mod shared_db_id; mod slip21; mod standard_hd_path; mod xpub; diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 687634e800..43b5b4fa61 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -29,7 +29,7 @@ cfg_wasm32! { cfg_native! { use db_common::sqlite::rusqlite::Connection; - use crate::sql_connection_pool::{AsyncSqliteConnPool, SqliteConnPool}; + use crate::sql_connection_pool::{AsyncSqliteConnPool, SqliteConnPool, DbMigrationWatcher}; use rustls::ServerName; use mm2_metrics::prometheus; use mm2_metrics::MmMetricsError; @@ -139,6 +139,8 @@ pub struct MmCtx { pub db_namespace: DbNamespaceId, /// The context belonging to the `nft` mod: `NftCtx`. pub nft_ctx: Mutex>>, + #[cfg(not(target_arch = "wasm32"))] + pub db_migration_watcher: Constructible>, } impl MmCtx { @@ -187,6 +189,8 @@ impl MmCtx { #[cfg(target_arch = "wasm32")] db_namespace: DbNamespaceId::Main, nft_ctx: Mutex::new(None), + #[cfg(not(target_arch = "wasm32"))] + db_migration_watcher: Constructible::default(), } } @@ -382,6 +386,19 @@ impl MmCtx { .clone(); pool.sqlite_conn(self, db_id) } + + #[cfg(not(target_arch = "wasm32"))] + pub async fn init_db_migration_watcher(&self) -> Result, String> { + DbMigrationWatcher::init(self).await + } + + #[cfg(not(target_arch = "wasm32"))] + pub async fn db_migration_watcher(&self) -> Arc { + self.db_migration_watcher + .as_option() + .expect("Db migration watcher isn't intialized yet!") + .clone() + } } impl Default for MmCtx { diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 71ef9331a7..090fe43fb9 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -2,9 +2,10 @@ use crate::mm_ctx::{log_sqlite_file_open_attempt, MmCtx}; use common::log::error; use db_common::async_sql_conn::AsyncConnection; use db_common::sqlite::rusqlite::Connection; +use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::lock::Mutex as AsyncMutex; use gstuff::try_s; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::sync::{Arc, Mutex}; pub const ASYNC_SQLITE_DB_ID: &str = "KOMODEFI.db"; @@ -258,3 +259,59 @@ impl AsyncSqliteConnPool { )) } } + +pub type DbMigrationHandler = Arc>>; +pub type DbMigrationSender = Arc>>; + +pub fn create_db_migration_watcher() -> (Sender, Receiver) { + let (sender, receiver) = channel(1); + (sender, receiver) +} + +pub struct DbMigrationWatcher { + migrations: Arc>>, + sender: DbMigrationSender, + receiver: DbMigrationHandler, +} + +impl DbMigrationWatcher { + pub async fn init(ctx: &MmCtx) -> Result, String> { + let (sender, receiver) = create_db_migration_watcher(); + + let selfi = Arc::new(Self { + migrations: Default::default(), + sender: Arc::new(AsyncMutex::new(sender)), + receiver: Arc::new(AsyncMutex::new(receiver)), + }); + try_s!(ctx.db_migration_watcher.pin(selfi.clone())); + + Ok(selfi) + } + + pub async fn is_db_migrated(&self, db_id: Option<&str>) -> bool { + if let Some(db_id) = db_id { + let guard = self.migrations.lock().await; + if guard.get(db_id).is_some() { + // migration hasn'been been ran for db with this id + return true; + }; + + // migration hasn't been ran for db with this id + return false; + } + + // migration hasn been when no db id is provided we assume it's the default db id + true + } + + pub async fn db_id_migrated(&self, db_id: Option<&str>) { + if let Some(db_id) = db_id { + let mut guard = self.migrations.lock().await; + guard.insert(db_id.to_owned()); + } + } + + pub async fn get_receiver(&self) -> DbMigrationHandler { self.receiver.clone() } + + pub async fn get_sender(&self) -> DbMigrationSender { self.sender.clone() } +} diff --git a/mm2src/mm2_main/Cargo.toml b/mm2src/mm2_main/Cargo.toml index 1803fa7021..d9cfdba348 100644 --- a/mm2src/mm2_main/Cargo.toml +++ b/mm2src/mm2_main/Cargo.toml @@ -12,7 +12,8 @@ edition = "2018" doctest = false [features] -custom-swap-locktime = [] # only for testing purposes, should never be activated on release builds. +custom-swap-locktime = [ +] # only for testing purposes, should never be activated on release builds. native = [] # Deprecated track-ctx-pointer = ["common/track-ctx-pointer"] zhtlc-native-tests = ["coins/zhtlc-native-tests"] @@ -20,7 +21,9 @@ run-docker-tests = ["coins/run-docker-tests"] # TODO enable-solana = [] default = [] -trezor-udp = ["crypto/trezor-udp"] # use for tests to connect to trezor emulator over udp +trezor-udp = [ + "crypto/trezor-udp", +] # use for tests to connect to trezor emulator over udp run-device-tests = [] enable-sia = [] @@ -41,11 +44,17 @@ crypto = { path = "../crypto" } db_common = { path = "../db_common" } derive_more = "0.99" either = "1.6" -ethereum-types = { version = "0.13", default-features = false, features = ["std", "serialize"] } +ethereum-types = { version = "0.13", default-features = false, features = [ + "std", + "serialize", +] } enum_derives = { path = "../derives/enum_derives" } enum-primitive-derive = "0.2" futures01 = { version = "0.1", package = "futures" } -futures = { version = "0.3.1", package = "futures", features = ["compat", "async-await"] } +futures = { version = "0.3.1", package = "futures", features = [ + "compat", + "async-await", +] } gstuff = { version = "0.7", features = ["nightly"] } hash256-std-hasher = "0.15.2" hash-db = "0.15.2" @@ -67,7 +76,7 @@ mm2-libp2p = { path = "../mm2_p2p", package = "mm2_p2p" } mm2_metrics = { path = "../mm2_metrics" } mm2_net = { path = "../mm2_net", features = ["event-stream", "p2p"] } mm2_number = { path = "../mm2_number" } -mm2_rpc = { path = "../mm2_rpc", features = ["rpc_facilities"]} +mm2_rpc = { path = "../mm2_rpc", features = ["rpc_facilities"] } mm2_state_machine = { path = "../mm2_state_machine" } num-traits = "0.2" parity-util-mem = "0.11" @@ -92,7 +101,9 @@ ser_error_derive = { path = "../derives/ser_error_derive" } serialization = { path = "../mm2_bitcoin/serialization" } serialization_derive = { path = "../mm2_bitcoin/serialization_derive" } spv_validation = { path = "../mm2_bitcoin/spv_validation" } -sp-runtime-interface = { version = "6.0.0", default-features = false, features = ["disable_target_static_assertions"] } +sp-runtime-interface = { version = "6.0.0", default-features = false, features = [ + "disable_target_static_assertions", +] } sp-trie = { version = "6.0", default-features = false } trie-db = { version = "0.23.1", default-features = false } trie-root = "0.16.0" @@ -126,7 +137,9 @@ coins_activation = { path = "../coins_activation", features = ["for-tests"] } mm2_test_helpers = { path = "../mm2_test_helpers" } mocktopus = "0.8.0" testcontainers = "0.15.0" -web3 = { git = "https://github.com/KomodoPlatform/rust-web3", tag = "v0.19.0", default-features = false, features = ["http"] } +web3 = { git = "https://github.com/KomodoPlatform/rust-web3", tag = "v0.19.0", default-features = false, features = [ + "http", +] } ethabi = { version = "17.0.0" } [build-dependencies] diff --git a/mm2src/mm2_main/src/database.rs b/mm2src/mm2_main/src/database.rs index 28e63b3404..e61dba342f 100644 --- a/mm2src/mm2_main/src/database.rs +++ b/mm2src/mm2_main/src/database.rs @@ -42,11 +42,17 @@ pub async fn init_and_migrate_sql_db(ctx: &MmArc, db_id: Option<&str>) -> SqlRes }, }; - info!("Trying to initialize the SQLite database"); + info!( + "Trying to initialize the SQLite database for {}:db", + db_id.unwrap_or("default") + ); init_db(ctx, db_id)?; migrate_sqlite_database(ctx, 1, db_id).await?; - info!("SQLite database initialization is successful"); + info!( + "SQLite {} database initialization is successful", + db_id.unwrap_or("default") + ); Ok(()) } @@ -145,7 +151,10 @@ async fn statements_for_migration(ctx: &MmArc, current_migration: i64) -> Option pub async fn migrate_sqlite_database(ctx: &MmArc, current_migration: i64, db_id: Option<&str>) -> SqlResult<()> { let mut current_migration = current_migration; - info!("migrate_sqlite_database current migration {current_migration}"); + info!( + "{}:db migrate_sqlite_database current migration {current_migration}", + db_id.unwrap_or("default") + ); while let Some(statements_with_params) = statements_for_migration(ctx, current_migration).await { // `statements_for_migration` locks the [`MmCtx::sqlite_connection`] mutex, // so we can't create a transaction outside of this loop. @@ -163,7 +172,10 @@ pub async fn migrate_sqlite_database(ctx: &MmArc, current_migration: i64, db_id: ])?; transaction.commit()?; } - info!("migrate_sqlite_database complete migrated to {current_migration}"); + info!( + "{}:db migrate_sqlite_database complete migrated to {current_migration}", + db_id.unwrap_or("default") + ); Ok(()) } diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index a366fa94b0..4f9fc9dcf4 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -30,10 +30,11 @@ use crate::mm2::rpc::spawn_rpc; use bitcrypto::sha256; use coins::register_balance_update_handler; use common::executor::{SpawnFuture, Timer}; -use common::log::{info, warn}; +use common::log::{debug, info, warn}; use crypto::{from_hw_error, CryptoCtx, HwError, HwProcessingError, HwRpcError, WithHwRpcError}; use derive_more::Display; use enum_derives::EnumFromTrait; +use futures::StreamExt; use mm2_core::mm_ctx::{MmArc, MmCtx}; use mm2_err_handle::common_errors::InternalError; use mm2_err_handle::prelude::*; @@ -453,6 +454,44 @@ fn init_wasm_event_streaming(ctx: &MmArc) { } } +#[cfg(not(target_arch = "wasm32"))] +async fn init_db_migration_watcher_loop(ctx: MmArc) { + let db_migration_watcher = &ctx + .init_db_migration_watcher() + .await + .expect("db_migration_watcher initialization failed"); + + let watcher_clone = db_migration_watcher.clone(); + let receiver = db_migration_watcher.get_receiver().await; + let mut guard = receiver.lock().await; + + while let Some(db_id) = guard.next().await { + if watcher_clone.is_db_migrated(Some(&db_id)).await { + debug!("{db_id} migrated, skipping migration.."); + continue; + } + if let Err(err) = run_db_migration_impl(&ctx, Some(&db_id), None).await { + common::log::error!("db_migration failed for {db_id}, err: {err:?}"); + continue; + }; + watcher_clone.db_id_migrated(Some(&db_id)).await; + } +} + +#[cfg(not(target_arch = "wasm32"))] +async fn run_db_migration_impl(ctx: &MmArc, db_id: Option<&str>, shared_db_id: Option<&str>) -> MmInitResult<()> { + fix_directories(ctx, db_id, shared_db_id)?; + AsyncSqliteConnPool::init(ctx, db_id) + .await + .map_to_mm(MmInitError::ErrorSqliteInitializing)?; + SqliteConnPool::init(ctx, db_id).map_to_mm(MmInitError::ErrorSqliteInitializing)?; + SqliteConnPool::init_shared(ctx, db_id).map_to_mm(MmInitError::ErrorSqliteInitializing)?; + init_and_migrate_sql_db(ctx, db_id).await?; + migrate_db(ctx, db_id)?; + + Ok(()) +} + pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { init_ordermatch_context(&ctx)?; init_p2p(ctx.clone()).await?; @@ -463,14 +502,8 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { #[cfg(not(target_arch = "wasm32"))] { - fix_directories(&ctx, None, None)?; - AsyncSqliteConnPool::init(&ctx, None) - .await - .map_to_mm(MmInitError::ErrorSqliteInitializing)?; - SqliteConnPool::init(&ctx, None).map_to_mm(MmInitError::ErrorSqliteInitializing)?; - SqliteConnPool::init_shared(&ctx, None).map_to_mm(MmInitError::ErrorSqliteInitializing)?; - init_and_migrate_sql_db(&ctx, None).await?; - migrate_db(&ctx, None)?; + run_db_migration_impl(&ctx, None, None).await?; + ctx.spawner().spawn(init_db_migration_watcher_loop(ctx.clone())); } init_message_service(&ctx).await?; From 46f87122880a02d15e1e9e965a28f272a5505fe1 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 17 May 2024 01:37:06 +0100 Subject: [PATCH 119/186] handle shared_db_id and minor fixes --- .../src/tendermint_with_assets_activation.rs | 36 +++++++--- mm2src/mm2_core/src/sql_connection_pool.rs | 16 +++-- mm2src/mm2_main/src/lp_native_dex.rs | 12 ++-- mm2src/mm2_test_helpers/src/for_tests.rs | 70 +++++++++---------- 4 files changed, 77 insertions(+), 57 deletions(-) diff --git a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs index 2b1d0a09af..39fb018269 100644 --- a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs +++ b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs @@ -18,10 +18,14 @@ use coins::tendermint::{tendermint_priv_key_policy, TendermintActivationPolicy, #[cfg(not(target_arch = "wasm32"))] use coins::utxo::dhash160; use coins::{CoinBalance, CoinProtocol, MarketCoinOps, MmCoin, MmCoinEnum, PrivKeyBuildPolicy}; use common::executor::{AbortSettings, SpawnAbortable}; -#[cfg(not(target_arch = "wasm32"))] use common::log::debug; +#[cfg(not(target_arch = "wasm32"))] use common::log::info; use common::{true_f, Future01CompatExt}; +#[cfg(not(target_arch = "wasm32"))] +use crypto::shared_db_id::shared_db_id_from_seed; #[cfg(not(target_arch = "wasm32"))] use futures::SinkExt; use mm2_core::mm_ctx::MmArc; +#[cfg(not(target_arch = "wasm32"))] +use mm2_core::sql_connection_pool::DbIds; use mm2_err_handle::prelude::*; use mm2_event_stream::behaviour::{EventBehaviour, EventInitStatus}; use mm2_event_stream::EventStreamConfiguration; @@ -246,18 +250,32 @@ impl PlatformCoinWithTokensActivationOps for TendermintCoin { }); } + // Send migration request #[cfg(not(target_arch = "wasm32"))] { - // Send migration request - let address_hash = dhash160(&pubkey.to_bytes()); - let address_rmd160_hex = hex::encode(address_hash.as_slice()); + let db_id = hex::encode(dhash160(pubkey.to_bytes().as_slice())); + let shared_db_id = shared_db_id_from_seed(&pubkey.to_hex()) + .mm_err(|err| TendermintInitError { + ticker: ticker.clone(), + kind: TendermintInitErrorKind::Internal(err.to_string()), + })? + .to_string(); + let db_migration_sender = ctx.db_migration_watcher().await.get_sender().await; let mut db_migration_sender = db_migration_sender.lock().await; - if db_migration_sender.send(address_rmd160_hex.clone()).await.is_ok() { - debug!("Sending migration request for db_id: {address_rmd160_hex:?}"); - }; - - // TODO: handle for shared_db_id. + db_migration_sender + .send(DbIds { + db_id: db_id.clone(), + shared_db_id: shared_db_id.clone(), + }) + .await + .map_to_mm(|err| TendermintInitError { + ticker: ticker.clone(), + kind: TendermintInitErrorKind::Internal(err.to_string()), + })?; + + info!("Public key hash: {db_id}"); + info!("Shared Database ID: {shared_db_id}"); } TendermintActivationPolicy::with_public_key(pubkey) diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 090fe43fb9..038291f045 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -260,10 +260,14 @@ impl AsyncSqliteConnPool { } } -pub type DbMigrationHandler = Arc>>; -pub type DbMigrationSender = Arc>>; +pub struct DbIds { + pub db_id: String, + pub shared_db_id: String, +} +pub type DbMigrationHandler = Arc>>; +pub type DbMigrationSender = Arc>>; -pub fn create_db_migration_watcher() -> (Sender, Receiver) { +pub fn create_db_migration_watcher() -> (Sender, Receiver) { let (sender, receiver) = channel(1); (sender, receiver) } @@ -292,15 +296,13 @@ impl DbMigrationWatcher { if let Some(db_id) = db_id { let guard = self.migrations.lock().await; if guard.get(db_id).is_some() { - // migration hasn'been been ran for db with this id + // migration has been ran for db with id return true; }; - // migration hasn't been ran for db with this id return false; } - - // migration hasn been when no db id is provided we assume it's the default db id + // migration has been ran when no db id is provided we assume it's the default db id true } diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 4f9fc9dcf4..2a389a5f61 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -465,16 +465,16 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { let receiver = db_migration_watcher.get_receiver().await; let mut guard = receiver.lock().await; - while let Some(db_id) = guard.next().await { - if watcher_clone.is_db_migrated(Some(&db_id)).await { - debug!("{db_id} migrated, skipping migration.."); + while let Some(ids) = guard.next().await { + if watcher_clone.is_db_migrated(Some(&ids.db_id)).await { + debug!("{} migrated, skipping migration..", ids.db_id); continue; } - if let Err(err) = run_db_migration_impl(&ctx, Some(&db_id), None).await { - common::log::error!("db_migration failed for {db_id}, err: {err:?}"); + if let Err(err) = run_db_migration_impl(&ctx, Some(&ids.db_id), Some(&ids.shared_db_id)).await { + common::log::error!("{err:?}"); continue; }; - watcher_clone.db_id_migrated(Some(&db_id)).await; + watcher_clone.db_id_migrated(Some(&ids.db_id)).await; } } diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index 35e53aae14..1ddc413457 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -417,10 +417,10 @@ impl Mm2TestConfForSwap { Mm2InitPrivKeyPolicy::Iguana => { let bob_passphrase = crate::get_passphrase!(".env.seed", "BOB_PASSPHRASE").unwrap(); Mm2TestConf::seednode(&bob_passphrase, coins) - } + }, Mm2InitPrivKeyPolicy::GlobalHDAccount => { Mm2TestConf::seednode_with_hd_account(Self::BOB_HD_PASSPHRASE, coins) - } + }, } } @@ -429,10 +429,10 @@ impl Mm2TestConfForSwap { Mm2InitPrivKeyPolicy::Iguana => { let alice_passphrase = crate::get_passphrase!(".env.client", "ALICE_PASSPHRASE").unwrap(); Mm2TestConf::light_node(&alice_passphrase, coins, &[bob_ip]) - } + }, Mm2InitPrivKeyPolicy::GlobalHDAccount => { Mm2TestConf::light_node_with_hd_account(Self::ALICE_HD_PASSPHRASE, coins, &[bob_ip]) - } + }, } } } @@ -1119,7 +1119,7 @@ impl RaiiKill { _ => { self.running = false; false - } + }, } } } @@ -1264,7 +1264,7 @@ impl MarketMakerIt { let dir = folder.join("DB"); conf["dbdir"] = dir.to_str().unwrap().into(); dir - } + }, }; try_s!(fs::create_dir(&folder)); @@ -1279,7 +1279,7 @@ impl MarketMakerIt { let path = folder.join("mm2.log"); conf["log"] = path.to_str().unwrap().into(); path - } + }, }; // If `local` is provided @@ -1371,8 +1371,8 @@ impl MarketMakerIt { /// Busy-wait on the log until the `pred` returns `true` or `timeout_sec` expires. #[cfg(not(target_arch = "wasm32"))] pub async fn wait_for_log(&mut self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { let start = now_float(); let ms = 50.min((timeout_sec * 1000.) as u64 / 20 + 10); @@ -1398,8 +1398,8 @@ impl MarketMakerIt { /// after process is stopped #[cfg(not(target_arch = "wasm32"))] pub async fn wait_for_log_after_stop(&self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { use common::try_or_ready_err; @@ -1412,19 +1412,19 @@ impl MarketMakerIt { } Retry(()) }) - .repeat_every_ms(ms) - .with_timeout_secs(timeout_sec) - .await - .map_err(|e| ERRL!("{:?}", e)) - // Convert `Result, String>` to `Result<(), String>` - .flatten() + .repeat_every_ms(ms) + .with_timeout_secs(timeout_sec) + .await + .map_err(|e| ERRL!("{:?}", e)) + // Convert `Result, String>` to `Result<(), String>` + .flatten() } /// Busy-wait on the instance in-memory log until the `pred` returns `true` or `timeout_sec` expires. #[cfg(target_arch = "wasm32")] pub async fn wait_for_log(&mut self, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, + where + F: Fn(&str) -> bool, { wait_for_log(&self.ctx, timeout_sec, pred).await } @@ -1448,7 +1448,7 @@ impl MarketMakerIt { let body_str = json::to_string(&body).unwrap_or_else(|_| panic!("Response {:?} is not a valid JSON", body)); Ok((status_code, body_str, HeaderMap::new())) - } + }, Err(e) => Ok((StatusCode::INTERNAL_SERVER_ERROR, e, HeaderMap::new())), } } @@ -1514,7 +1514,7 @@ impl MarketMakerIt { } else { return ERR!("{}", err); } - } + }, }; if status != StatusCode::OK { return ERR!("MM didn't accept a stop. body: {}", body); @@ -1537,10 +1537,10 @@ impl MarketMakerIt { } Retry(()) }) - .repeat_every_secs(0.05) - .with_timeout_ms(timeout_ms) - .await - .map_err(|e| ERRL!("{:?}", e)) + .repeat_every_secs(0.05) + .with_timeout_ms(timeout_ms) + .await + .map_err(|e| ERRL!("{:?}", e)) } /// Currently, we cannot wait for the `Completed IAmrelay handling for peer` log entry on WASM node, @@ -1645,8 +1645,8 @@ impl Drop for MarketMakerIt { /// Busy-wait on the log until the `pred` returns `true` or `timeout_sec` expires. pub async fn wait_for_log(ctx: &MmArc, timeout_sec: f64, pred: F) -> Result<(), String> - where - F: Fn(&str) -> bool, +where + F: Fn(&str) -> bool, { let start = now_float(); let ms = 50.min((timeout_sec * 1000.) as u64 / 20 + 10); @@ -1746,7 +1746,7 @@ pub fn mm_spat() -> (&'static str, MarketMakerIt, RaiiDump, RaiiDump) { "pass".into(), None, ) - .unwrap(); + .unwrap(); let (dump_log, dump_dashboard) = mm_dump(&mm.log_path); (passphrase, mm, dump_log, dump_dashboard) } @@ -1823,10 +1823,10 @@ pub fn from_env_file(env: Vec) -> (Option, Option) { match cap.get(1) { Some(name) if name.as_bytes() == b"PASSPHRASE" => { passphrase = cap.get(2).map(|v| String::from_utf8(v.as_bytes().into()).unwrap()) - } + }, Some(name) if name.as_bytes() == b"USERPASS" => { userpass = cap.get(2).map(|v| String::from_utf8(v.as_bytes().into()).unwrap()) - } + }, _ => (), } } @@ -2179,7 +2179,7 @@ pub async fn init_lightning_status(mm: &MarketMakerIt, task_id: u64) -> Json { pub fn new_mm2_temp_folder_path(ip: Option) -> PathBuf { let now = common::now_ms(); #[allow(deprecated)] - let now = Local.timestamp((now / 1000) as i64, (now % 1000) as u32 * 1_000_000); + let now = Local.timestamp((now / 1000) as i64, (now % 1000) as u32 * 1_000_000); let folder = match ip { Some(ip) => format!("mm2_{}_{}", now.format("%Y-%m-%d_%H-%M-%S-%3f"), ip), None => format!("mm2_{}", now.format("%Y-%m-%d_%H-%M-%S-%3f")), @@ -3283,7 +3283,7 @@ pub async fn wait_for_swaps_finish_and_check_status( BigDecimal::try_from(volume).unwrap(), BigDecimal::try_from(volume * maker_price).unwrap(), ) - .await; + .await; log!("Checking maker status.."); check_my_swap_status( @@ -3292,7 +3292,7 @@ pub async fn wait_for_swaps_finish_and_check_status( BigDecimal::try_from(volume).unwrap(), BigDecimal::try_from(volume * maker_price).unwrap(), ) - .await; + .await; } } @@ -3317,8 +3317,8 @@ pub async fn test_qrc20_history_impl(local_start: Option) { "pass".into(), local_start, ) - .await - .unwrap(); + .await + .unwrap(); let (_dump_log, _dump_dashboard) = mm.mm_dump(); #[cfg(not(target_arch = "wasm32"))] From 5bde012a71e052b174ef3c07dbe55a113ffd35bc Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 17 May 2024 10:21:46 +0100 Subject: [PATCH 120/186] polish sql_connection_pool and fix review notes --- mm2src/coins/eth.rs | 59 +++-- mm2src/coins/lp_coins.rs | 2 +- mm2src/coins/nft/nft_structs.rs | 5 +- mm2src/mm2_core/src/mm_ctx.rs | 20 +- mm2src/mm2_core/src/sql_connection_pool.rs | 282 ++++++++++----------- 5 files changed, 181 insertions(+), 187 deletions(-) diff --git a/mm2src/coins/eth.rs b/mm2src/coins/eth.rs index af001faced..a57a3c7d18 100644 --- a/mm2src/coins/eth.rs +++ b/mm2src/coins/eth.rs @@ -549,16 +549,16 @@ async fn make_gas_station_request(url: &str) -> GasStationResult { impl EthCoinImpl { #[cfg(not(target_arch = "wasm32"))] - fn eth_traces_path(&self, ctx: &MmArc, my_address: Address) -> PathBuf { - ctx.dbdir(None) + fn eth_traces_path(&self, ctx: &MmArc, my_address: Address, db_id: Option<&str>) -> PathBuf { + ctx.dbdir(db_id) .join("TRANSACTIONS") .join(format!("{}_{:#02x}_trace.json", self.ticker, my_address)) } /// Load saved ETH traces from local DB #[cfg(not(target_arch = "wasm32"))] - fn load_saved_traces(&self, ctx: &MmArc, my_address: Address) -> Option { - let content = gstuff::slurp(&self.eth_traces_path(ctx, my_address)); + fn load_saved_traces(&self, ctx: &MmArc, my_address: Address, db_id: Option<&str>) -> Option { + let content = gstuff::slurp(&self.eth_traces_path(ctx, my_address, db_id)); if content.is_empty() { None } else { @@ -571,54 +571,59 @@ impl EthCoinImpl { /// Load saved ETH traces from local DB #[cfg(target_arch = "wasm32")] - fn load_saved_traces(&self, _ctx: &MmArc, _my_address: Address) -> Option { + fn load_saved_traces(&self, _ctx: &MmArc, _my_address: Address, _db_id: Option<&str>) -> Option { common::panic_w("'load_saved_traces' is not implemented in WASM"); unreachable!() } /// Store ETH traces to local DB #[cfg(not(target_arch = "wasm32"))] - fn store_eth_traces(&self, ctx: &MmArc, my_address: Address, traces: &SavedTraces) { + fn store_eth_traces(&self, ctx: &MmArc, my_address: Address, traces: &SavedTraces, db_id: Option<&str>) { let content = json::to_vec(traces).unwrap(); - let tmp_file = format!("{}.tmp", self.eth_traces_path(ctx, my_address).display()); + let tmp_file = format!("{}.tmp", self.eth_traces_path(ctx, my_address, db_id).display()); std::fs::write(&tmp_file, content).unwrap(); - std::fs::rename(tmp_file, self.eth_traces_path(ctx, my_address)).unwrap(); + std::fs::rename(tmp_file, self.eth_traces_path(ctx, my_address, db_id)).unwrap(); } /// Store ETH traces to local DB #[cfg(target_arch = "wasm32")] - fn store_eth_traces(&self, _ctx: &MmArc, _my_address: Address, _traces: &SavedTraces) { + fn store_eth_traces(&self, _ctx: &MmArc, _my_address: Address, _traces: &SavedTraces, _db_id: Option<&str>) { common::panic_w("'store_eth_traces' is not implemented in WASM"); unreachable!() } #[cfg(not(target_arch = "wasm32"))] - fn erc20_events_path(&self, ctx: &MmArc, my_address: Address) -> PathBuf { - ctx.dbdir(None) + fn erc20_events_path(&self, ctx: &MmArc, my_address: Address, db_id: Option<&str>) -> PathBuf { + ctx.dbdir(db_id) .join("TRANSACTIONS") .join(format!("{}_{:#02x}_events.json", self.ticker, my_address)) } /// Store ERC20 events to local DB #[cfg(not(target_arch = "wasm32"))] - fn store_erc20_events(&self, ctx: &MmArc, my_address: Address, events: &SavedErc20Events) { + fn store_erc20_events(&self, ctx: &MmArc, my_address: Address, events: &SavedErc20Events, db_id: Option<&str>) { let content = json::to_vec(events).unwrap(); - let tmp_file = format!("{}.tmp", self.erc20_events_path(ctx, my_address).display()); + let tmp_file = format!("{}.tmp", self.erc20_events_path(ctx, my_address, db_id).display()); std::fs::write(&tmp_file, content).unwrap(); - std::fs::rename(tmp_file, self.erc20_events_path(ctx, my_address)).unwrap(); + std::fs::rename(tmp_file, self.erc20_events_path(ctx, my_address, db_id)).unwrap(); } /// Store ERC20 events to local DB #[cfg(target_arch = "wasm32")] - fn store_erc20_events(&self, _ctx: &MmArc, _my_address: Address, _events: &SavedErc20Events) { + fn store_erc20_events(&self, _ctx: &MmArc, _my_address: Address, _events: &SavedErc20Events, _db_id: Option<&str>) { common::panic_w("'store_erc20_events' is not implemented in WASM"); unreachable!() } /// Load saved ERC20 events from local DB #[cfg(not(target_arch = "wasm32"))] - fn load_saved_erc20_events(&self, ctx: &MmArc, my_address: Address) -> Option { - let content = gstuff::slurp(&self.erc20_events_path(ctx, my_address)); + fn load_saved_erc20_events( + &self, + ctx: &MmArc, + my_address: Address, + db_id: Option<&str>, + ) -> Option { + let content = gstuff::slurp(&self.erc20_events_path(ctx, my_address, db_id)); if content.is_empty() { None } else { @@ -631,7 +636,12 @@ impl EthCoinImpl { /// Load saved ERC20 events from local DB #[cfg(target_arch = "wasm32")] - fn load_saved_erc20_events(&self, _ctx: &MmArc, _my_address: Address) -> Option { + fn load_saved_erc20_events( + &self, + _ctx: &MmArc, + _my_address: Address, + _db_id: Option<&str>, + ) -> Option { common::panic_w("'load_saved_erc20_events' is not implemented in WASM"); unreachable!() } @@ -2646,7 +2656,7 @@ impl EthCoin { }, }; - let mut saved_traces = match self.load_saved_traces(ctx, my_address) { + let mut saved_traces = match self.load_saved_traces(ctx, my_address, self.account_db_id().as_deref()) { Some(traces) => traces, None => SavedTraces { traces: vec![], @@ -2736,7 +2746,7 @@ impl EthCoin { } else { 0.into() }; - self.store_eth_traces(ctx, my_address, &saved_traces); + self.store_eth_traces(ctx, my_address, &saved_traces, self.account_db_id().as_deref()); } if current_block > saved_traces.latest_block { @@ -2792,7 +2802,7 @@ impl EthCoin { saved_traces.traces.extend(to_traces_after_latest); saved_traces.latest_block = current_block; - self.store_eth_traces(ctx, my_address, &saved_traces); + self.store_eth_traces(ctx, my_address, &saved_traces, self.account_db_id().as_deref()); } saved_traces.traces.sort_by(|a, b| b.block_number.cmp(&a.block_number)); for trace in saved_traces.traces { @@ -3013,7 +3023,8 @@ impl EthCoin { }, }; - let mut saved_events = match self.load_saved_erc20_events(ctx, my_address) { + let mut saved_events = match self.load_saved_erc20_events(ctx, my_address, self.account_db_id().as_deref()) + { Some(events) => events, None => SavedErc20Events { events: vec![], @@ -3092,7 +3103,7 @@ impl EthCoin { } else { 0.into() }; - self.store_erc20_events(ctx, my_address, &saved_events); + self.store_erc20_events(ctx, my_address, &saved_events, self.account_db_id().as_deref()); } if current_block > saved_events.latest_block { @@ -3149,7 +3160,7 @@ impl EthCoin { saved_events.events.extend(from_events_after_latest); saved_events.events.extend(to_events_after_latest); saved_events.latest_block = current_block; - self.store_erc20_events(ctx, my_address, &saved_events); + self.store_erc20_events(ctx, my_address, &saved_events, self.account_db_id().as_deref()); } let all_events: HashMap<_, _> = saved_events diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index ed39def503..dff6ad3dbc 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -4498,7 +4498,7 @@ async fn find_unique_account_ids(ctx: &MmArc, active_only: bool) -> Result, #[cfg(not(target_arch = "wasm32"))] pub(crate) nft_cache_dbs: AsyncSqliteConnPool, - #[cfg(not(target_arch = "wasm32"))] - ctx: MmArc, } impl NftCtx { @@ -743,7 +741,6 @@ impl NftCtx { .ok_or("async_sqlite_connection is not initialized".to_owned())?; Ok(NftCtx { nft_cache_dbs: async_sqlite_connection.clone(), - ctx: ctx.clone(), }) }))) } @@ -763,7 +760,7 @@ impl NftCtx { &self, db_id: Option<&str>, ) -> MmResult { - let locked = self.nft_cache_dbs.async_sqlite_conn(&self.ctx, db_id).await; + let locked = self.nft_cache_dbs.async_sqlite_conn(db_id).await; Ok(locked.lock_owned().await) } diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 43b5b4fa61..1d70976815 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -359,22 +359,20 @@ impl MmCtx { /// Returns `None` if the connection pool is not initialized. #[cfg(not(target_arch = "wasm32"))] pub fn shared_sqlite_conn_opt(&self, db_id: Option<&str>) -> Option>> { - if let Some(pool) = self.sqlite_conn_pool.as_option().cloned() { - return Some(pool.sqlite_conn_shared(self, db_id)); - }; - - None + self.sqlite_conn_pool + .as_option() + .cloned() + .map(|pool| pool.sqlite_conn_shared(db_id)) } /// Retrieves an optional connection from the pool for the specified database ID. /// Returns `None` if the connection pool is not initialized. #[cfg(not(target_arch = "wasm32"))] pub fn sqlite_conn_opt(&self, db_id: Option<&str>) -> Option>> { - if let Some(pool) = self.sqlite_conn_pool.as_option().cloned() { - return Some(pool.sqlite_conn(self, db_id)); - }; - - None + self.sqlite_conn_pool + .as_option() + .cloned() + .map(|pool| pool.sqlite_conn(db_id)) } /// Obtains a connection from the pool for the specified database ID, panicking if the pool is not initialized. @@ -384,7 +382,7 @@ impl MmCtx { .sqlite_conn_pool .or(&|| panic!("sqlite_connection is not initialized")) .clone(); - pool.sqlite_conn(self, db_id) + pool.sqlite_conn(db_id) } #[cfg(not(target_arch = "wasm32"))] diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 038291f045..ec6a3ef800 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -1,15 +1,18 @@ -use crate::mm_ctx::{log_sqlite_file_open_attempt, MmCtx}; +use crate::mm_ctx::{log_sqlite_file_open_attempt, path_to_dbdir, MmCtx}; use common::log::error; use db_common::async_sql_conn::AsyncConnection; use db_common::sqlite::rusqlite::Connection; use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::lock::Mutex as AsyncMutex; use gstuff::try_s; +use primitives::hash::H160; use std::collections::{HashMap, HashSet}; +use std::path::PathBuf; use std::sync::{Arc, Mutex}; pub const ASYNC_SQLITE_DB_ID: &str = "KOMODEFI.db"; const SYNC_SQLITE_DB_ID: &str = "MM2.db"; +const SQLITE_SHARED_DB_ID: &str = "MM2-shared.db"; /// Represents the kind of database connection ID: either shared or single-user. enum DbIdConnKind { @@ -19,7 +22,14 @@ enum DbIdConnKind { /// A pool for managing SQLite connections, where each connection is keyed by a unique string identifier. #[derive(Clone)] -pub struct SqliteConnPool(Arc>>>>); +pub struct SqliteConnPool { + connections: Arc>>>>, + // default db_id + rmd160_hex: String, + // default shared_db_id + shared_db_id: H160, + db_dir: String, +} impl SqliteConnPool { /// Initializes a single-user database connection. @@ -34,23 +44,36 @@ impl SqliteConnPool { /// Internal implementation to initialize a database connection. fn init_impl(ctx: &MmCtx, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind) -> Result<(), String> { - let db_id = Self::db_id(ctx, db_id, &db_id_conn_kind); - - match ctx.sqlite_conn_pool.as_option() { - // if connection pool is not already initialized, create new connection pool. - None => { - let conn = Self::open_connection(ctx, &db_id, &db_id_conn_kind); - let store = Arc::new(Mutex::new(HashMap::from([(db_id, conn)]))); - try_s!(ctx.sqlite_conn_pool.pin(Self(store))); - }, - // if connection pool is already initialized, insert new connection. - Some(pool) => { - let conn = Self::open_connection(ctx, &db_id, &db_id_conn_kind); - let mut pool = pool.0.lock().unwrap(); - pool.insert(db_id, conn); - }, + let db_id_default = match db_id_conn_kind { + DbIdConnKind::Shared => hex::encode(ctx.shared_db_id().as_slice()), + DbIdConnKind::Single => ctx.rmd160_hex(), + }; + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| db_id_default); + + let sqlite_file_path = match db_id_conn_kind { + DbIdConnKind::Shared => ctx.shared_dbdir(Some(&db_id)).join(SQLITE_SHARED_DB_ID), + DbIdConnKind::Single => ctx.dbdir(Some(&db_id)).join(SYNC_SQLITE_DB_ID), }; + // Connection pool is already initialized, insert new connection. + if let Some(pool) = ctx.sqlite_conn_pool.as_option() { + let conn = Self::open_connection(sqlite_file_path); + let mut pool = pool.connections.lock().unwrap(); + pool.insert(db_id, conn); + + return Ok(()); + } + + // Connection pool is not already initialized, create new connection pool. + let conn = Self::open_connection(sqlite_file_path); + let connections = Arc::new(Mutex::new(HashMap::from([(db_id, conn)]))); + try_s!(ctx.sqlite_conn_pool.pin(Self { + connections, + rmd160_hex: ctx.rmd160_hex(), + shared_db_id: *ctx.shared_db_id(), + db_dir: ctx.conf["dbdir"].to_string() + })); + Ok(()) } @@ -62,112 +85,105 @@ impl SqliteConnPool { /// Internal test implementation to initialize a database connection in-memory. fn init_impl_test(ctx: &MmCtx, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind) -> Result<(), String> { - let db_id = Self::db_id(ctx, db_id, &db_id_conn_kind); - - match ctx.sqlite_conn_pool.as_option() { - None => { - let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); - let store = Arc::new(Mutex::new(HashMap::from([(db_id, connection)]))); - try_s!(ctx.sqlite_conn_pool.pin(Self(store))); - }, - Some(pool) => { - let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); - let mut pool = pool.0.lock().unwrap(); - pool.insert(db_id, connection); - }, + let db_id_default = match db_id_conn_kind { + DbIdConnKind::Shared => hex::encode(ctx.shared_db_id().as_slice()), + DbIdConnKind::Single => ctx.rmd160_hex(), + }; + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| db_id_default); + + if let Some(pool) = ctx.sqlite_conn_pool.as_option() { + let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); + let mut pool = pool.connections.lock().unwrap(); + pool.insert(db_id, connection); + + return Ok(()); } + + let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); + let connections = Arc::new(Mutex::new(HashMap::from([(db_id, connection)]))); + try_s!(ctx.sqlite_conn_pool.pin(Self { + connections, + rmd160_hex: ctx.rmd160_hex(), + shared_db_id: *ctx.shared_db_id(), + db_dir: ctx.conf["dbdir"].to_string() + })); + Ok(()) } /// Retrieves a single-user connection from the pool. - pub fn sqlite_conn(&self, ctx: &MmCtx, db_id: Option<&str>) -> Arc> { - self.sqlite_conn_impl(ctx, db_id, DbIdConnKind::Single) + pub fn sqlite_conn(&self, db_id: Option<&str>) -> Arc> { + self.sqlite_conn_impl(db_id, DbIdConnKind::Single) } /// Retrieves a shared connection from the pool. - pub fn sqlite_conn_shared(&self, ctx: &MmCtx, db_id: Option<&str>) -> Arc> { - self.sqlite_conn_impl(ctx, db_id, DbIdConnKind::Shared) + pub fn sqlite_conn_shared(&self, db_id: Option<&str>) -> Arc> { + self.sqlite_conn_impl(db_id, DbIdConnKind::Shared) } /// Internal implementation to retrieve or create a connection. - fn sqlite_conn_impl( - &self, - ctx: &MmCtx, - db_id: Option<&str>, - db_id_conn_kind: DbIdConnKind, - ) -> Arc> { - let db_id = Self::db_id(ctx, db_id, &db_id_conn_kind); - let mut connections = self.0.lock().unwrap(); - return if let Some(connection) = connections.get(&db_id) { - connection.clone() - } else { - let conn = Self::open_connection(ctx, &db_id, &db_id_conn_kind); - connections.insert(db_id, conn.clone()); - // TODO: run migration and fix directions - conn - }; - } + fn sqlite_conn_impl(&self, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind) -> Arc> { + let mut connections = self.connections.lock().unwrap(); - /// Generates a database ID based on the connection kind and optional database ID. - fn db_id(ctx: &MmCtx, db_id: Option<&str>, db_id_conn_kind: &DbIdConnKind) -> String { let db_id_default = match db_id_conn_kind { - DbIdConnKind::Shared => hex::encode(ctx.shared_db_id().as_slice()), - DbIdConnKind::Single => ctx.rmd160_hex(), + DbIdConnKind::Shared => hex::encode(self.shared_db_id.as_slice()), + DbIdConnKind::Single => self.rmd160_hex.clone(), }; + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| db_id_default); + if let Some(connection) = connections.get(&db_id) { + return Arc::clone(connection); + } - db_id.map(|e| e.to_owned()).unwrap_or_else(|| db_id_default) - } - - /// Opens a database connection based on the database ID and connection kind. - #[cfg(all(test))] - fn open_connection(ctx: &MmCtx, db_id: &str, db_id_conn_kind: &DbIdConnKind) -> Arc> { let sqlite_file_path = match db_id_conn_kind { - DbIdConnKind::Shared => ctx.shared_dbdir(Some(db_id)).join("MM2-shared.db"), - DbIdConnKind::Single => ctx.dbdir(Some(db_id)).join(SYNC_SQLITE_DB_ID), + DbIdConnKind::Shared => self.db_dir(&db_id).join(SQLITE_SHARED_DB_ID), + DbIdConnKind::Single => self.db_dir(&db_id).join(SYNC_SQLITE_DB_ID), }; + let connection = Self::open_connection(sqlite_file_path); + connections.insert(db_id, Arc::clone(&connection)); + connection + } + + /// Opens a database connection based on the database ID and connection kind. + fn open_connection(sqlite_file_path: PathBuf) -> Arc> { log_sqlite_file_open_attempt(&sqlite_file_path); Arc::new(Mutex::new( Connection::open(sqlite_file_path).expect("failed to open db"), )) } - /// Opens a database connection based on the database ID and connection kind. - #[cfg(not(test))] - fn open_connection(ctx: &MmCtx, db_id: &str, db_id_conn_kind: &DbIdConnKind) -> Arc> { - let sqlite_file_path = match db_id_conn_kind { - DbIdConnKind::Shared => ctx.shared_dbdir(Some(db_id)).join("MM2-shared.db"), - DbIdConnKind::Single => ctx.dbdir(Some(db_id)).join(SYNC_SQLITE_DB_ID), - }; - - log_sqlite_file_open_attempt(&sqlite_file_path); - Arc::new(Mutex::new(Connection::open_in_memory().expect("failed to open db"))) - } + fn db_dir(&self, db_id: &str) -> PathBuf { path_to_dbdir(Some(&self.db_dir), db_id) } } /// A pool for managing async SQLite connections, where each connection is keyed by a unique string identifier. #[derive(Clone)] -pub struct AsyncSqliteConnPool(Arc>>>>); +pub struct AsyncSqliteConnPool { + connections: Arc>>>>, + sqlite_file_path: PathBuf, + rmd160_hex: String, +} impl AsyncSqliteConnPool { /// Initialize a database connection. pub async fn init(ctx: &MmCtx, db_id: Option<&str>) -> Result<(), String> { let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160_hex()); - match ctx.async_sqlite_conn_pool.as_option() { - // if connection pool is not already initialized, create new connection pool. - None => { - let conn = Self::open_connection(ctx, &db_id).await; - let store = Arc::new(AsyncMutex::new(HashMap::from([(db_id, conn)]))); - try_s!(ctx.async_sqlite_conn_pool.pin(Self(store))); - }, - // if connection pool is already initialized, insert new connection. - Some(pool) => { - let conn = Self::open_connection(ctx, &db_id).await; - let mut pool = pool.0.lock().await; - pool.insert(db_id, conn); - }, - }; + if let Some(pool) = ctx.async_sqlite_conn_pool.as_option() { + let conn = Self::open_connection(&pool.sqlite_file_path).await; + let mut pool = pool.connections.lock().await; + pool.insert(db_id, conn); + + return Ok(()); + } + + let sqlite_file_path = ctx.dbdir(Some(&db_id)).join(ASYNC_SQLITE_DB_ID); + let conn = Self::open_connection(&sqlite_file_path).await; + let connections = Arc::new(AsyncMutex::new(HashMap::from([(db_id, conn)]))); + try_s!(ctx.async_sqlite_conn_pool.pin(Self { + connections, + sqlite_file_path, + rmd160_hex: ctx.rmd160_hex(), + })); Ok(()) } @@ -176,70 +192,43 @@ impl AsyncSqliteConnPool { pub async fn init_test(ctx: &MmCtx, db_id: Option<&str>) -> Result<(), String> { let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160_hex()); - match ctx.async_sqlite_conn_pool.as_option() { - // if connection pool is not already initialized, create new connection pool. - None => { - let conn = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); - // extra connection to test accessing different db test - let conn2 = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); - let connections = HashMap::from([(db_id, conn), ("TEST_DB_ID".to_owned(), conn2)]); - let store = Arc::new(AsyncMutex::new(connections)); - try_s!(ctx.async_sqlite_conn_pool.pin(Self(store))); - }, - // if connection pool is already initialized, insert new connection. - Some(pool) => { - let mut pool = pool.0.lock().await; - let conn = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); - pool.insert(db_id, conn); - }, - }; + if let Some(pool) = ctx.async_sqlite_conn_pool.as_option() { + let mut pool = pool.connections.lock().await; + let conn = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); + pool.insert(db_id, conn); - Ok(()) - } + return Ok(()); + } - /// Retrieve or create a connection optionally. - pub async fn async_sqlite_conn_opt( - &self, - ctx: &MmCtx, - db_id: Option<&str>, - ) -> Option>> { - if let Some(connections) = ctx.async_sqlite_conn_pool.as_option() { - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160_hex()); - let mut connections = connections.0.lock().await; - return if let Some(connection) = connections.get(&db_id) { - Some(connection.clone()) - } else { - let conn = Self::open_connection(ctx, &db_id).await; - connections.insert(db_id, conn.clone()); - // TODO: run migration and fix directions - Some(conn) - }; - }; - None + let conn = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); + // extra connection to test accessing different db test + let conn2 = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); + let connections = HashMap::from([(db_id, conn), ("TEST_DB_ID".to_owned(), conn2)]); + let connections = Arc::new(AsyncMutex::new(connections)); + try_s!(ctx.async_sqlite_conn_pool.pin(Self { + connections, + sqlite_file_path: PathBuf::new(), + rmd160_hex: ctx.rmd160_hex(), + })); + Ok(()) } /// Retrieve or create a connection. - pub async fn async_sqlite_conn(&self, ctx: &MmCtx, db_id: Option<&str>) -> Arc> { - let mut connections = ctx - .async_sqlite_conn_pool - .or(&|| panic!("async_sqlite_conn_pool is not initialized")) - .0 - .lock() - .await; - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160_hex()); + pub async fn async_sqlite_conn(&self, db_id: Option<&str>) -> Arc> { + let mut connections = self.connections.lock().await; + let db_id = db_id.unwrap_or(&self.rmd160_hex); - if let Some(connection) = connections.get(&db_id) { - connection.clone() - } else { - let conn = Self::open_connection(ctx, &db_id).await; - connections.insert(db_id, conn.clone()); - // TODO: run migration and fix directions - conn - } + if let Some(connection) = connections.get(db_id) { + return Arc::clone(connection); + }; + + let connection = Self::open_connection(&self.sqlite_file_path).await; + connections.insert(db_id.to_owned(), Arc::clone(&connection)); + connection } pub async fn close_connections(&self) { - let mut connections = self.0.lock().await; + let mut connections = self.connections.lock().await; for (id, connection) in connections.iter_mut() { let mut connection = connection.lock().await; if let Err(e) = connection.close().await { @@ -248,9 +237,8 @@ impl AsyncSqliteConnPool { } } - async fn open_connection(ctx: &MmCtx, db_id: &str) -> Arc> { - let sqlite_file_path = ctx.dbdir(Some(db_id)).join(ASYNC_SQLITE_DB_ID); - log_sqlite_file_open_attempt(&sqlite_file_path); + async fn open_connection(sqlite_file_path: &PathBuf) -> Arc> { + log_sqlite_file_open_attempt(sqlite_file_path); Arc::new(AsyncMutex::new( AsyncConnection::open(sqlite_file_path) From e03638394fc4a71a41277fd2638f80f39459b55b Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 17 May 2024 11:28:21 +0100 Subject: [PATCH 121/186] cargo fmt --- .../tests/mm2_tests/tendermint_tests.rs | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/mm2src/mm2_main/tests/mm2_tests/tendermint_tests.rs b/mm2src/mm2_main/tests/mm2_tests/tendermint_tests.rs index eade48b2c6..6a9bc55d28 100644 --- a/mm2src/mm2_main/tests/mm2_tests/tendermint_tests.rs +++ b/mm2src/mm2_main/tests/mm2_tests/tendermint_tests.rs @@ -657,7 +657,7 @@ mod swap { "IRIS-TEST", &["IRIS-NIMDA", "USDC-IBC-IRIS"], IRIS_TESTNET_RPC_URLS, - false + false, ))); dbg!(block_on(enable_tendermint( @@ -665,7 +665,7 @@ mod swap { "IRIS-TEST", &["IRIS-NIMDA", "USDC-IBC-IRIS"], IRIS_TESTNET_RPC_URLS, - false + false, ))); block_on(trade_base_rel_tendermint( @@ -731,7 +731,7 @@ mod swap { "NUCLEUS-TEST", &[], NUCLEUS_TESTNET_RPC_URLS, - false + false, ))); dbg!(block_on(enable_tendermint( @@ -739,12 +739,12 @@ mod swap { "NUCLEUS-TEST", &[], NUCLEUS_TESTNET_RPC_URLS, - false + false, ))); - dbg!(block_on(enable_electrum(&mm_bob, "DOC", false, DOC_ELECTRUM_ADDRS,))); + dbg!(block_on(enable_electrum(&mm_bob, "DOC", false, DOC_ELECTRUM_ADDRS))); - dbg!(block_on(enable_electrum(&mm_alice, "DOC", false, DOC_ELECTRUM_ADDRS,))); + dbg!(block_on(enable_electrum(&mm_alice, "DOC", false, DOC_ELECTRUM_ADDRS))); block_on(trade_base_rel_tendermint( mm_bob, @@ -809,7 +809,7 @@ mod swap { "NUCLEUS-TEST", &[], NUCLEUS_TESTNET_RPC_URLS, - false + false, ))); dbg!(block_on(enable_tendermint( @@ -817,7 +817,7 @@ mod swap { "NUCLEUS-TEST", &[], NUCLEUS_TESTNET_RPC_URLS, - false + false, ))); dbg!(block_on(enable_electrum(&mm_bob, "DOC", false, DOC_ELECTRUM_ADDRS))); @@ -887,7 +887,7 @@ mod swap { "NUCLEUS-TEST", &["IRIS-IBC-NUCLEUS-TEST"], NUCLEUS_TESTNET_RPC_URLS, - false + false, ))); dbg!(block_on(enable_tendermint( @@ -895,7 +895,7 @@ mod swap { "NUCLEUS-TEST", &["IRIS-IBC-NUCLEUS-TEST"], NUCLEUS_TESTNET_RPC_URLS, - false + false, ))); dbg!(block_on(enable_electrum(&mm_bob, "DOC", false, DOC_ELECTRUM_ADDRS))); @@ -965,7 +965,7 @@ mod swap { "NUCLEUS-TEST", &["IRIS-IBC-NUCLEUS-TEST"], NUCLEUS_TESTNET_RPC_URLS, - false + false, ))); dbg!(block_on(enable_tendermint( @@ -973,7 +973,7 @@ mod swap { "NUCLEUS-TEST", &["IRIS-IBC-NUCLEUS-TEST"], NUCLEUS_TESTNET_RPC_URLS, - false + false, ))); dbg!(block_on(enable_electrum(&mm_bob, "DOC", false, DOC_ELECTRUM_ADDRS))); @@ -1044,7 +1044,7 @@ mod swap { "IRIS-TEST", &[], IRIS_TESTNET_RPC_URLS, - false + false, ))); dbg!(block_on(enable_tendermint( @@ -1052,7 +1052,7 @@ mod swap { "IRIS-TEST", &[], IRIS_TESTNET_RPC_URLS, - false + false, ))); dbg!(block_on(enable_eth_coin( @@ -1061,7 +1061,7 @@ mod swap { TBNB_URLS, TBNB_SWAP_CONTRACT, None, - false + false, ))); dbg!(block_on(enable_eth_coin( @@ -1070,7 +1070,7 @@ mod swap { TBNB_URLS, TBNB_SWAP_CONTRACT, None, - false + false, ))); block_on(trade_base_rel_tendermint( From fab32e29f33f324015dd7d58ecd429399ff25be8 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 20 May 2024 03:31:11 +0100 Subject: [PATCH 122/186] abstract db migration from tendermint activation fn and fix other review notes --- .../src/tendermint_with_assets_activation.rs | 62 +++++++++++-------- mm2src/mm2_core/src/sql_connection_pool.rs | 41 ++++++------ 2 files changed, 55 insertions(+), 48 deletions(-) diff --git a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs index 39fb018269..915112ba5a 100644 --- a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs +++ b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs @@ -18,7 +18,7 @@ use coins::tendermint::{tendermint_priv_key_policy, TendermintActivationPolicy, #[cfg(not(target_arch = "wasm32"))] use coins::utxo::dhash160; use coins::{CoinBalance, CoinProtocol, MarketCoinOps, MmCoin, MmCoinEnum, PrivKeyBuildPolicy}; use common::executor::{AbortSettings, SpawnAbortable}; -#[cfg(not(target_arch = "wasm32"))] use common::log::info; +#[cfg(not(target_arch = "wasm32"))] use common::log::debug; use common::{true_f, Future01CompatExt}; #[cfg(not(target_arch = "wasm32"))] use crypto::shared_db_id::shared_db_id_from_seed; @@ -250,33 +250,8 @@ impl PlatformCoinWithTokensActivationOps for TendermintCoin { }); } - // Send migration request #[cfg(not(target_arch = "wasm32"))] - { - let db_id = hex::encode(dhash160(pubkey.to_bytes().as_slice())); - let shared_db_id = shared_db_id_from_seed(&pubkey.to_hex()) - .mm_err(|err| TendermintInitError { - ticker: ticker.clone(), - kind: TendermintInitErrorKind::Internal(err.to_string()), - })? - .to_string(); - - let db_migration_sender = ctx.db_migration_watcher().await.get_sender().await; - let mut db_migration_sender = db_migration_sender.lock().await; - db_migration_sender - .send(DbIds { - db_id: db_id.clone(), - shared_db_id: shared_db_id.clone(), - }) - .await - .map_to_mm(|err| TendermintInitError { - ticker: ticker.clone(), - kind: TendermintInitErrorKind::Internal(err.to_string()), - })?; - - info!("Public key hash: {db_id}"); - info!("Shared Database ID: {shared_db_id}"); - } + run_db_migraiton_for_new_tendermint_pubkey(&ctx, pubkey, ticker.clone()).await?; TendermintActivationPolicy::with_public_key(pubkey) } else { @@ -418,3 +393,36 @@ impl PlatformCoinWithTokensActivationOps for TendermintCoin { unimplemented!() } } + +#[cfg(not(target_arch = "wasm32"))] +async fn run_db_migraiton_for_new_tendermint_pubkey( + ctx: &MmArc, + pubkey: TendermintPublicKey, + ticker: String, +) -> MmResult<(), TendermintInitError> { + let db_id = hex::encode(dhash160(pubkey.to_bytes().as_slice())); + let shared_db_id = shared_db_id_from_seed(&pubkey.to_hex()) + .mm_err(|err| TendermintInitError { + ticker: ticker.to_string(), + kind: TendermintInitErrorKind::Internal(err.to_string()), + })? + .to_string(); + + let db_migration_sender = ctx.db_migration_watcher().await.get_sender().await; + let mut db_migration_sender = db_migration_sender.lock().await; + db_migration_sender + .send(DbIds { + db_id: db_id.clone(), + shared_db_id: shared_db_id.clone(), + }) + .await + .map_to_mm(|err| TendermintInitError { + ticker: ticker.to_string(), + kind: TendermintInitErrorKind::Internal(err.to_string()), + })?; + + debug!("Public key hash: {db_id}"); + debug!("Shared Database ID: {shared_db_id}"); + + Ok(()) +} diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index ec6a3ef800..d806dc97ec 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -1,4 +1,5 @@ use crate::mm_ctx::{log_sqlite_file_open_attempt, path_to_dbdir, MmCtx}; +use async_std::sync::RwLock as AsyncRwLock; use common::log::error; use db_common::async_sql_conn::AsyncConnection; use db_common::sqlite::rusqlite::Connection; @@ -8,7 +9,7 @@ use gstuff::try_s; use primitives::hash::H160; use std::collections::{HashMap, HashSet}; use std::path::PathBuf; -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, Mutex, RwLock}; pub const ASYNC_SQLITE_DB_ID: &str = "KOMODEFI.db"; const SYNC_SQLITE_DB_ID: &str = "MM2.db"; @@ -23,7 +24,7 @@ enum DbIdConnKind { /// A pool for managing SQLite connections, where each connection is keyed by a unique string identifier. #[derive(Clone)] pub struct SqliteConnPool { - connections: Arc>>>>, + connections: Arc>>>>, // default db_id rmd160_hex: String, // default shared_db_id @@ -58,7 +59,7 @@ impl SqliteConnPool { // Connection pool is already initialized, insert new connection. if let Some(pool) = ctx.sqlite_conn_pool.as_option() { let conn = Self::open_connection(sqlite_file_path); - let mut pool = pool.connections.lock().unwrap(); + let mut pool = pool.connections.write().unwrap(); pool.insert(db_id, conn); return Ok(()); @@ -66,7 +67,7 @@ impl SqliteConnPool { // Connection pool is not already initialized, create new connection pool. let conn = Self::open_connection(sqlite_file_path); - let connections = Arc::new(Mutex::new(HashMap::from([(db_id, conn)]))); + let connections = Arc::new(RwLock::new(HashMap::from([(db_id, conn)]))); try_s!(ctx.sqlite_conn_pool.pin(Self { connections, rmd160_hex: ctx.rmd160_hex(), @@ -93,14 +94,14 @@ impl SqliteConnPool { if let Some(pool) = ctx.sqlite_conn_pool.as_option() { let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); - let mut pool = pool.connections.lock().unwrap(); + let mut pool = pool.connections.write().unwrap(); pool.insert(db_id, connection); return Ok(()); } let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); - let connections = Arc::new(Mutex::new(HashMap::from([(db_id, connection)]))); + let connections = Arc::new(RwLock::new(HashMap::from([(db_id, connection)]))); try_s!(ctx.sqlite_conn_pool.pin(Self { connections, rmd160_hex: ctx.rmd160_hex(), @@ -123,17 +124,19 @@ impl SqliteConnPool { /// Internal implementation to retrieve or create a connection. fn sqlite_conn_impl(&self, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind) -> Arc> { - let mut connections = self.connections.lock().unwrap(); - let db_id_default = match db_id_conn_kind { DbIdConnKind::Shared => hex::encode(self.shared_db_id.as_slice()), DbIdConnKind::Single => self.rmd160_hex.clone(), }; let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| db_id_default); + + let connections = self.connections.read().unwrap(); if let Some(connection) = connections.get(&db_id) { return Arc::clone(connection); } + drop(connections); + let mut connections = self.connections.write().unwrap(); let sqlite_file_path = match db_id_conn_kind { DbIdConnKind::Shared => self.db_dir(&db_id).join(SQLITE_SHARED_DB_ID), DbIdConnKind::Single => self.db_dir(&db_id).join(SYNC_SQLITE_DB_ID), @@ -158,7 +161,7 @@ impl SqliteConnPool { /// A pool for managing async SQLite connections, where each connection is keyed by a unique string identifier. #[derive(Clone)] pub struct AsyncSqliteConnPool { - connections: Arc>>>>, + connections: Arc>>>>, sqlite_file_path: PathBuf, rmd160_hex: String, } @@ -170,7 +173,7 @@ impl AsyncSqliteConnPool { if let Some(pool) = ctx.async_sqlite_conn_pool.as_option() { let conn = Self::open_connection(&pool.sqlite_file_path).await; - let mut pool = pool.connections.lock().await; + let mut pool = pool.connections.write().await; pool.insert(db_id, conn); return Ok(()); @@ -178,7 +181,7 @@ impl AsyncSqliteConnPool { let sqlite_file_path = ctx.dbdir(Some(&db_id)).join(ASYNC_SQLITE_DB_ID); let conn = Self::open_connection(&sqlite_file_path).await; - let connections = Arc::new(AsyncMutex::new(HashMap::from([(db_id, conn)]))); + let connections = Arc::new(AsyncRwLock::new(HashMap::from([(db_id, conn)]))); try_s!(ctx.async_sqlite_conn_pool.pin(Self { connections, sqlite_file_path, @@ -193,7 +196,7 @@ impl AsyncSqliteConnPool { let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160_hex()); if let Some(pool) = ctx.async_sqlite_conn_pool.as_option() { - let mut pool = pool.connections.lock().await; + let mut pool = pool.connections.write().await; let conn = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); pool.insert(db_id, conn); @@ -204,7 +207,7 @@ impl AsyncSqliteConnPool { // extra connection to test accessing different db test let conn2 = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); let connections = HashMap::from([(db_id, conn), ("TEST_DB_ID".to_owned(), conn2)]); - let connections = Arc::new(AsyncMutex::new(connections)); + let connections = Arc::new(AsyncRwLock::new(connections)); try_s!(ctx.async_sqlite_conn_pool.pin(Self { connections, sqlite_file_path: PathBuf::new(), @@ -215,20 +218,21 @@ impl AsyncSqliteConnPool { /// Retrieve or create a connection. pub async fn async_sqlite_conn(&self, db_id: Option<&str>) -> Arc> { - let mut connections = self.connections.lock().await; let db_id = db_id.unwrap_or(&self.rmd160_hex); + let connections = self.connections.read().await; if let Some(connection) = connections.get(db_id) { return Arc::clone(connection); }; + let mut connections = self.connections.write().await; let connection = Self::open_connection(&self.sqlite_file_path).await; connections.insert(db_id.to_owned(), Arc::clone(&connection)); connection } pub async fn close_connections(&self) { - let mut connections = self.connections.lock().await; + let mut connections = self.connections.write().await; for (id, connection) in connections.iter_mut() { let mut connection = connection.lock().await; if let Err(e) = connection.close().await { @@ -255,11 +259,6 @@ pub struct DbIds { pub type DbMigrationHandler = Arc>>; pub type DbMigrationSender = Arc>>; -pub fn create_db_migration_watcher() -> (Sender, Receiver) { - let (sender, receiver) = channel(1); - (sender, receiver) -} - pub struct DbMigrationWatcher { migrations: Arc>>, sender: DbMigrationSender, @@ -268,7 +267,7 @@ pub struct DbMigrationWatcher { impl DbMigrationWatcher { pub async fn init(ctx: &MmCtx) -> Result, String> { - let (sender, receiver) = create_db_migration_watcher(); + let (sender, receiver) = channel(1); let selfi = Arc::new(Self { migrations: Default::default(), From 789ce8d1d63a4c34bf47e1e81102e293e66e0d4f Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 20 May 2024 05:19:44 +0100 Subject: [PATCH 123/186] minor changes to DbMigrationWatcher --- mm2src/mm2_core/src/sql_connection_pool.rs | 16 +++++++--------- mm2src/mm2_main/src/lp_native_dex.rs | 11 +++++------ 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index d806dc97ec..7585813ce2 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -279,27 +279,25 @@ impl DbMigrationWatcher { Ok(selfi) } - pub async fn is_db_migrated(&self, db_id: Option<&str>) -> bool { + /// This function verifies if a migration has already been executed for the provided + /// db_id. If the migration has not been run for the given db_id, it adds + /// the `db_id` to the list of migrated databases and returns `false`. If no db_id is provided, + /// it assumes that the migration has been run for the default db_id. + pub async fn check_db_id_is_migrated(&self, db_id: Option<&str>) -> bool { if let Some(db_id) = db_id { - let guard = self.migrations.lock().await; + let mut guard = self.migrations.lock().await; if guard.get(db_id).is_some() { // migration has been ran for db with id return true; }; // migration hasn't been ran for db with this id + guard.insert(db_id.to_owned()); return false; } // migration has been ran when no db id is provided we assume it's the default db id true } - pub async fn db_id_migrated(&self, db_id: Option<&str>) { - if let Some(db_id) = db_id { - let mut guard = self.migrations.lock().await; - guard.insert(db_id.to_owned()); - } - } - pub async fn get_receiver(&self) -> DbMigrationHandler { self.receiver.clone() } pub async fn get_sender(&self) -> DbMigrationSender { self.sender.clone() } diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 2a389a5f61..26cbbe18fc 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -30,7 +30,7 @@ use crate::mm2::rpc::spawn_rpc; use bitcrypto::sha256; use coins::register_balance_update_handler; use common::executor::{SpawnFuture, Timer}; -use common::log::{debug, info, warn}; +use common::log::{debug, error, info, warn}; use crypto::{from_hw_error, CryptoCtx, HwError, HwProcessingError, HwRpcError, WithHwRpcError}; use derive_more::Display; use enum_derives::EnumFromTrait; @@ -68,10 +68,10 @@ cfg_native! { #[path = "lp_init/init_hw.rs"] pub mod init_hw; cfg_wasm32! { - use mm2_net::wasm_event_stream::handle_worker_stream; - #[path = "lp_init/init_metamask.rs"] pub mod init_metamask; + + use mm2_net::wasm_event_stream::handle_worker_stream; } const DEFAULT_NETID_SEEDNODES: &[SeedNodeInfo] = &[ @@ -466,15 +466,14 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { let mut guard = receiver.lock().await; while let Some(ids) = guard.next().await { - if watcher_clone.is_db_migrated(Some(&ids.db_id)).await { + if watcher_clone.check_db_id_is_migrated(Some(&ids.db_id)).await { debug!("{} migrated, skipping migration..", ids.db_id); continue; } if let Err(err) = run_db_migration_impl(&ctx, Some(&ids.db_id), Some(&ids.shared_db_id)).await { - common::log::error!("{err:?}"); + error!("{err:?}"); continue; }; - watcher_clone.db_id_migrated(Some(&ids.db_id)).await; } } From ac5cc96d7d0b7713b9ac16253d6a20d52f71a8c1 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 20 May 2024 12:37:55 +0100 Subject: [PATCH 124/186] update coins_needed_for_kick_start for new db activation --- mm2src/mm2_main/src/lp_native_dex.rs | 11 +++++++++-- mm2src/mm2_main/src/lp_ordermatch.rs | 7 +++---- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 26cbbe18fc..c33e24ec79 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -470,10 +470,16 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { debug!("{} migrated, skipping migration..", ids.db_id); continue; } + // run db migration for db_id if new activated pubkey is unique. if let Err(err) = run_db_migration_impl(&ctx, Some(&ids.db_id), Some(&ids.shared_db_id)).await { error!("{err:?}"); continue; }; + // Fetch and extend ctx.coins_needed_for_kick_start from new intialized db. + if let Err(err) = kick_start(ctx.clone(), Some(&ids.db_id)).await { + error!("{err:?}"); + continue; + }; } } @@ -578,7 +584,7 @@ async fn kick_start(ctx: MmArc, db_id: Option<&str>) -> MmInitResult<()> { .await .map_to_mm(MmInitError::SwapsKickStartError)?; coins_needed_for_kick_start.extend( - orders_kick_start(&ctx) + orders_kick_start(&ctx, db_id) .await .map_to_mm(MmInitError::OrdersKickStartError)?, ); @@ -586,7 +592,8 @@ async fn kick_start(ctx: MmArc, db_id: Option<&str>) -> MmInitResult<()> { .coins_needed_for_kick_start .lock() .map_to_mm(|poison| MmInitError::Internal(poison.to_string()))?; - *lock = coins_needed_for_kick_start; + // extend existing coins list needed for kickstart so even there's a new pubkey activation, the coins will added + *lock.extend(coins_needed_for_kick_start); Ok(()) } diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index b3470fc7b3..0f4735cac6 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -5416,12 +5416,11 @@ pub struct HistoricalOrder { conf_settings: Option, } -pub async fn orders_kick_start(ctx: &MmArc) -> Result, String> { +pub async fn orders_kick_start(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { let ordermatch_ctx = try_s!(OrdermatchContext::from_ctx(ctx)); - let db_id: Option = None; // TODO let storage = MyOrdersStorage::new(ctx.clone()); - let saved_maker_orders = try_s!(storage.load_active_maker_orders(db_id.as_deref()).await); - let saved_taker_orders = try_s!(storage.load_active_taker_orders(db_id.as_deref()).await); + let saved_maker_orders = try_s!(storage.load_active_maker_orders(db_id).await); + let saved_taker_orders = try_s!(storage.load_active_taker_orders(db_id).await); let mut coins = HashSet::with_capacity((saved_maker_orders.len() * 2) + (saved_taker_orders.len() * 2)); { From d8caebd84536160ddfe78892417639ad7e156f42 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 20 May 2024 12:39:04 +0100 Subject: [PATCH 125/186] minor fix --- mm2src/mm2_main/src/lp_native_dex.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index c33e24ec79..5294312047 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -593,7 +593,7 @@ async fn kick_start(ctx: MmArc, db_id: Option<&str>) -> MmInitResult<()> { .lock() .map_to_mm(|poison| MmInitError::Internal(poison.to_string()))?; // extend existing coins list needed for kickstart so even there's a new pubkey activation, the coins will added - *lock.extend(coins_needed_for_kick_start); + lock.extend(coins_needed_for_kick_start); Ok(()) } From 76efdf3dcb0a73b7918d5cebc570ff32078a2d51 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 21 May 2024 06:49:57 +0100 Subject: [PATCH 126/186] comment out gui_storage_dispatcher and related --- mm2src/mm2_gui_storage/src/account/mod.rs | 3 + .../src/account/storage/mod.rs | 2 + mm2src/mm2_gui_storage/src/context.rs | 2 +- mm2src/mm2_gui_storage/src/rpc_commands.rs | 771 +++++++++--------- .../mm2_main/src/rpc/dispatcher/dispatcher.rs | 63 +- 5 files changed, 418 insertions(+), 423 deletions(-) diff --git a/mm2src/mm2_gui_storage/src/account/mod.rs b/mm2src/mm2_gui_storage/src/account/mod.rs index 4350b0c939..70dd845c07 100644 --- a/mm2src/mm2_gui_storage/src/account/mod.rs +++ b/mm2src/mm2_gui_storage/src/account/mod.rs @@ -8,8 +8,11 @@ use std::collections::BTreeSet; pub(crate) mod storage; +#[cfg(not(target_arch = "wasm32"))] pub const MAX_ACCOUNT_NAME_LENGTH: usize = 255; +#[cfg(not(target_arch = "wasm32"))] pub const MAX_ACCOUNT_DESCRIPTION_LENGTH: usize = 600; +#[cfg(not(target_arch = "wasm32"))] pub const MAX_TICKER_LENGTH: usize = 255; pub(crate) type HwPubkey = H160Json; diff --git a/mm2src/mm2_gui_storage/src/account/storage/mod.rs b/mm2src/mm2_gui_storage/src/account/storage/mod.rs index 73deb9a698..b288a9a4b0 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/mod.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/mod.rs @@ -19,6 +19,7 @@ const DEFAULT_DEVICE_PUB: HwPubkey = HwPubkey::const_default(); pub(crate) type AccountStorageBoxed = Box; pub type AccountStorageResult = MmResult; +#[allow(unused)] #[derive(Debug, Display)] pub enum AccountStorageError { #[display(fmt = "No such account {:?}", _0)] @@ -126,6 +127,7 @@ pub(crate) struct AccountStorageBuilder<'a> { db_id: Option<&'a str>, } +#[allow(unused)] impl<'a> AccountStorageBuilder<'a> { pub fn new(ctx: &'a MmArc, db_id: Option<&'a str>) -> Self { AccountStorageBuilder { ctx, db_id } } diff --git a/mm2src/mm2_gui_storage/src/context.rs b/mm2src/mm2_gui_storage/src/context.rs index 66914f7e09..55889101ff 100644 --- a/mm2src/mm2_gui_storage/src/context.rs +++ b/mm2src/mm2_gui_storage/src/context.rs @@ -8,9 +8,9 @@ pub(crate) struct AccountContext { db_id: Option, } +#[allow(unused)] impl AccountContext { /// Obtains a reference to this crate context, creating it if necessary. - /// TODO: this is only create/intiliaze once..need to find a way to manage multiple account contexts pub(crate) fn from_ctx(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { from_ctx(&ctx.account_ctx, move || { Ok(AccountContext { diff --git a/mm2src/mm2_gui_storage/src/rpc_commands.rs b/mm2src/mm2_gui_storage/src/rpc_commands.rs index b5b6c93fff..2deb62be24 100644 --- a/mm2src/mm2_gui_storage/src/rpc_commands.rs +++ b/mm2src/mm2_gui_storage/src/rpc_commands.rs @@ -1,391 +1,380 @@ -use crate::account::storage::AccountStorageError; -use crate::account::{AccountId, AccountInfo, AccountWithCoins, AccountWithEnabledFlag, EnabledAccountId, - MAX_ACCOUNT_DESCRIPTION_LENGTH, MAX_ACCOUNT_NAME_LENGTH, MAX_TICKER_LENGTH}; -use crate::context::AccountContext; -use common::{HttpStatusCode, StatusCode, SuccessResponse}; -use derive_more::Display; -use mm2_core::mm_ctx::MmArc; -use mm2_err_handle::prelude::*; -use mm2_number::BigDecimal; -use ser_error_derive::SerializeErrorType; -use serde::{Deserialize, Serialize}; -use std::collections::BTreeSet; - -#[derive(Display, Serialize, SerializeErrorType)] -#[serde(tag = "error_type", content = "error_data")] -pub enum AccountRpcError { - #[display(fmt = "Account name is too long, expected shorter or equal to {}", max_len)] - NameTooLong { max_len: usize }, - #[display(fmt = "Account description is too long, expected shorter or equal to {}", max_len)] - DescriptionTooLong { max_len: usize }, - #[display(fmt = "Coin ticker is too long, expected shorter or equal to {}", max_len)] - TickerTooLong { max_len: usize }, - #[display(fmt = "No such account {:?}", _0)] - NoSuchAccount(AccountId), - #[display(fmt = "No enabled account yet. Consider using 'enable_account' RPC")] - NoEnabledAccount, - #[display(fmt = "Account {:?} exists already", _0)] - AccountExistsAlready(AccountId), - #[display(fmt = "Error loading account: {}", _0)] - ErrorLoadingAccount(String), - #[display(fmt = "Error saving changes in accounts storage: {}", _0)] - ErrorSavingAccount(String), - #[display(fmt = "Internal error: {}", _0)] - Internal(String), -} - -impl From for AccountRpcError { - fn from(e: AccountStorageError) -> Self { - match e { - AccountStorageError::NoSuchAccount(account_id) => AccountRpcError::NoSuchAccount(account_id), - AccountStorageError::NoEnabledAccount => AccountRpcError::NoEnabledAccount, - AccountStorageError::AccountExistsAlready(account_id) => AccountRpcError::AccountExistsAlready(account_id), - AccountStorageError::ErrorDeserializing(e) | AccountStorageError::ErrorLoading(e) => { - AccountRpcError::ErrorLoadingAccount(e) - }, - AccountStorageError::ErrorSaving(e) | AccountStorageError::ErrorSerializing(e) => { - AccountRpcError::ErrorSavingAccount(e) - }, - AccountStorageError::Internal(internal) => AccountRpcError::Internal(internal), - } - } -} - -impl HttpStatusCode for AccountRpcError { - fn status_code(&self) -> StatusCode { - match self { - AccountRpcError::NameTooLong { .. } - | AccountRpcError::DescriptionTooLong { .. } - | AccountRpcError::TickerTooLong { .. } - | AccountRpcError::NoSuchAccount(_) - | AccountRpcError::NoEnabledAccount - | AccountRpcError::AccountExistsAlready(_) => StatusCode::BAD_REQUEST, - AccountRpcError::ErrorLoadingAccount(_) - | AccountRpcError::ErrorSavingAccount(_) - | AccountRpcError::Internal(_) => StatusCode::INTERNAL_SERVER_ERROR, - } - } -} - -#[derive(Deserialize)] -pub struct NewAccount { - account_id: Id, - name: String, - #[serde(default)] - description: String, - #[serde(default)] - balance_usd: BigDecimal, -} - -impl From> for AccountInfo -where - AccountId: From, -{ - fn from(orig: NewAccount) -> Self { - AccountInfo { - account_id: AccountId::from(orig.account_id), - name: orig.name, - description: orig.description, - balance_usd: orig.balance_usd, - } - } -} - -#[derive(Deserialize)] -pub struct EnableAccountRequest { - #[serde(flatten)] - policy: EnableAccountPolicy, -} - -#[derive(Deserialize)] -#[serde(tag = "policy")] -#[serde(rename_all = "snake_case")] -pub enum EnableAccountPolicy { - Existing(EnabledAccountId), - New(NewAccount), -} - -#[derive(Deserialize)] -pub struct AddAccountRequest { - #[serde(flatten)] - account: NewAccount, -} - -#[derive(Deserialize)] -pub struct DeleteAccountRequest { - account_id: AccountId, -} - -#[derive(Deserialize)] -pub struct SetAccountNameRequest { - account_id: AccountId, - name: String, -} - -#[derive(Deserialize)] -pub struct SetAccountDescriptionRequest { - account_id: AccountId, - description: String, -} - -#[derive(Deserialize)] -pub struct CoinRequest { - account_id: AccountId, - tickers: Vec, -} - -#[derive(Deserialize)] -pub struct GetAccountsRequest; - -#[derive(Deserialize)] -pub struct GetAccountCoinsRequest { - account_id: AccountId, -} - -#[derive(Serialize)] -pub struct GetAccountCoinsResponse { - account_id: AccountId, - coins: BTreeSet, -} - -#[derive(Deserialize)] -pub struct GetEnabledAccountRequest; - -#[derive(Deserialize)] -pub struct SetBalanceRequest { - account_id: AccountId, - balance_usd: BigDecimal, -} - -/// Sets the given account as an enabled (current active account). -/// The behaviour depends on [`EnableAccountRequest::policy`]: -/// * [`EnableAccountPolicy::Known`] => -/// 1) Checks whether the given account exists in the storage. -/// Returns [`AccountRpcError::NoSuchAccount`] if there is no account with the given `AccountId`; -/// 2) Sets the account as an enabled. -/// * [`EnableAccountPolicy::New`] => -/// 1) Tries to upload the given account info to the storage. -/// Returns [`AccountRpcError::AccountExistsAlready`] if there is an account with the same `AccountId` already; -/// 2) Sets the account as an enabled. -/// -/// # Important -/// -/// This RPC affects the storage **only**. It doesn't affect MarketMaker. -pub async fn enable_account(ctx: MmArc, req: EnableAccountRequest) -> MmResult { - // TODO db_id - let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; - let account_id = match req.policy { - EnableAccountPolicy::Existing(account_id) => account_id, - EnableAccountPolicy::New(new_account) => { - let account_id = new_account.account_id; - account_ctx - .storage() - .await? - .upload_account(AccountInfo::from(new_account)) - .await?; - account_id - }, - }; - account_ctx.storage().await?.enable_account(account_id).await?; - Ok(SuccessResponse::new()) -} - -/// Adds the given [`AddAccountRequest::account`] to the storage. -/// Returns [`AccountRpcError::AccountExistsAlready`] if there is an account with the same `AccountId` already. -/// -/// # Important -/// -/// This RPC affects the storage **only**. It doesn't affect MarketMaker. -pub async fn add_account(ctx: MmArc, req: AddAccountRequest) -> MmResult { - validate_new_account(&req.account)?; - // TODO db_id - let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; - account_ctx - .storage() - .await? - .upload_account(AccountInfo::from(req.account)) - .await?; - Ok(SuccessResponse::new()) -} - -/// Deletes the given [`AddAccountRequest::account_id`] account from the storage. -/// Returns [`AccountRpcError::NoSuchAccount`] if there is no account with the same `AccountId`. -/// -/// # Important -/// -/// This RPC affects the storage **only**. It doesn't affect MarketMaker. -pub async fn delete_account(ctx: MmArc, req: DeleteAccountRequest) -> MmResult { - // TODO db_id - let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; - account_ctx.storage().await?.delete_account(req.account_id).await?; - Ok(SuccessResponse::new()) -} - -/// Loads accounts from the storage and marks one account as enabled **only**. -/// If no account has been enabled yet, this RPC returns [`AccountRpcError::NoEnabledAccount`] error. -/// -/// # Note -/// -/// The returned accounts are sorted by `AccountId`. -pub async fn get_accounts( - ctx: MmArc, - _req: GetAccountsRequest, -) -> MmResult, AccountRpcError> { - // TODO db_id - let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; - let accounts = account_ctx - .storage() - .await? - .load_accounts_with_enabled_flag() - .await? - // The given `BTreeMap` accounts are sorted by `AccountId`. - .into_values() - .collect(); - Ok(accounts) -} - -/// Loads activated coins of the given `account_id` from the storage. -/// -/// # Note -/// -/// The returned coins are sorted. -pub async fn get_account_coins( - ctx: MmArc, - req: GetAccountCoinsRequest, -) -> MmResult { - // TODO db_id - let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; - let coins = account_ctx - .storage() - .await? - .load_account_coins(req.account_id.clone()) - .await?; - Ok(GetAccountCoinsResponse { - account_id: req.account_id, - coins, - }) -} - -/// Loads an enabled account with activated coins from the storage. -/// If no account has been enabled yet, this RPC returns [`AccountRpcError::NoEnabledAccount`] error. -/// -/// # Note -/// -/// The account coins are sorted. -pub async fn get_enabled_account( - ctx: MmArc, - _req: GetEnabledAccountRequest, -) -> MmResult { - // TODO db_id - let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; - let account = account_ctx.storage().await?.load_enabled_account_with_coins().await?; - Ok(account) -} - -/// Sets the account name. -pub async fn set_account_name(ctx: MmArc, req: SetAccountNameRequest) -> MmResult { - validate_account_name(&req.name)?; - // TODO db_id - let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; - account_ctx.storage().await?.set_name(req.account_id, req.name).await?; - Ok(SuccessResponse::new()) -} - -/// Sets the account description. -pub async fn set_account_description( - ctx: MmArc, - req: SetAccountDescriptionRequest, -) -> MmResult { - validate_account_desc(&req.description)?; - // TODO db_id - let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; - account_ctx - .storage() - .await? - .set_description(req.account_id, req.description) - .await?; - Ok(SuccessResponse::new()) -} - -/// Sets the account USD balance. -/// -/// # Important -/// -/// This RPC affects the storage **only**. It doesn't affect MarketMaker. -pub async fn set_account_balance(ctx: MmArc, req: SetBalanceRequest) -> MmResult { - // TODO db_id - let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; - account_ctx - .storage() - .await? - .set_balance(req.account_id, req.balance_usd) - .await?; - Ok(SuccessResponse::new()) -} - -/// Activates the given [`CoinRequest::tickers`] for the specified [`CoinRequest::account_id`] account. -/// -/// # Important -/// -/// This RPC affects the storage **only**. It doesn't affect MarketMaker. -pub async fn activate_coins(ctx: MmArc, req: CoinRequest) -> MmResult { - validate_tickers(&req.tickers)?; - // TODO db_id - let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; - account_ctx - .storage() - .await? - .activate_coins(req.account_id, req.tickers) - .await?; - Ok(SuccessResponse::new()) -} - -/// Deactivates the given [`CoinRequest::tickers`] for the specified [`CoinRequest::account_id`] account. -/// -/// # Important -/// -/// This RPC affects the storage **only**. It doesn't affect MarketMaker. -pub async fn deactivate_coins(ctx: MmArc, req: CoinRequest) -> MmResult { - // TODO db_id - let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; - account_ctx - .storage() - .await? - .deactivate_coins(req.account_id, req.tickers) - .await?; - Ok(SuccessResponse::new()) -} - -fn validate_new_account(account: &NewAccount) -> MmResult<(), AccountRpcError> { - validate_account_name(&account.name)?; - validate_account_desc(&account.description) -} - -fn validate_account_name(name: &str) -> MmResult<(), AccountRpcError> { - if name.len() > MAX_ACCOUNT_NAME_LENGTH { - return MmError::err(AccountRpcError::NameTooLong { - max_len: MAX_ACCOUNT_NAME_LENGTH, - }); - } - Ok(()) -} - -fn validate_account_desc(description: &str) -> MmResult<(), AccountRpcError> { - if description.len() > MAX_ACCOUNT_DESCRIPTION_LENGTH { - return MmError::err(AccountRpcError::DescriptionTooLong { - max_len: MAX_ACCOUNT_NAME_LENGTH, - }); - } - Ok(()) -} - -fn validate_tickers(tickers: &[String]) -> MmResult<(), AccountRpcError> { - for ticker in tickers { - if ticker.len() > MAX_TICKER_LENGTH { - return MmError::err(AccountRpcError::TickerTooLong { - max_len: MAX_TICKER_LENGTH, - }); - } - } - Ok(()) -} +// use crate::account::storage::AccountStorageError; +// use crate::account::{AccountId, AccountInfo, AccountWithCoins, AccountWithEnabledFlag, EnabledAccountId, +// MAX_ACCOUNT_DESCRIPTION_LENGTH, MAX_ACCOUNT_NAME_LENGTH, MAX_TICKER_LENGTH}; +// use crate::context::AccountContext; +// use common::{HttpStatusCode, StatusCode, SuccessResponse}; +// use derive_more::Display; +// use mm2_core::mm_ctx::MmArc; +// use mm2_err_handle::prelude::*; +// use mm2_number::BigDecimal; +// use ser_error_derive::SerializeErrorType; +// use serde::{Deserialize, Serialize}; +// use std::collections::BTreeSet; +// +// #[derive(Display, Serialize, SerializeErrorType)] +// #[serde(tag = "error_type", content = "error_data")] +// pub enum AccountRpcError { +// #[display(fmt = "Account name is too long, expected shorter or equal to {}", max_len)] +// NameTooLong { max_len: usize }, +// #[display(fmt = "Account description is too long, expected shorter or equal to {}", max_len)] +// DescriptionTooLong { max_len: usize }, +// #[display(fmt = "Coin ticker is too long, expected shorter or equal to {}", max_len)] +// TickerTooLong { max_len: usize }, +// #[display(fmt = "No such account {:?}", _0)] +// NoSuchAccount(AccountId), +// #[display(fmt = "No enabled account yet. Consider using 'enable_account' RPC")] +// NoEnabledAccount, +// #[display(fmt = "Account {:?} exists already", _0)] +// AccountExistsAlready(AccountId), +// #[display(fmt = "Error loading account: {}", _0)] +// ErrorLoadingAccount(String), +// #[display(fmt = "Error saving changes in accounts storage: {}", _0)] +// ErrorSavingAccount(String), +// #[display(fmt = "Internal error: {}", _0)] +// Internal(String), +// } +// +// impl From for AccountRpcError { +// fn from(e: AccountStorageError) -> Self { +// match e { +// AccountStorageError::NoSuchAccount(account_id) => AccountRpcError::NoSuchAccount(account_id), +// AccountStorageError::NoEnabledAccount => AccountRpcError::NoEnabledAccount, +// AccountStorageError::AccountExistsAlready(account_id) => AccountRpcError::AccountExistsAlready(account_id), +// AccountStorageError::ErrorDeserializing(e) | AccountStorageError::ErrorLoading(e) => { +// AccountRpcError::ErrorLoadingAccount(e) +// }, +// AccountStorageError::ErrorSaving(e) | AccountStorageError::ErrorSerializing(e) => { +// AccountRpcError::ErrorSavingAccount(e) +// }, +// AccountStorageError::Internal(internal) => AccountRpcError::Internal(internal), +// } +// } +// } +// +// impl HttpStatusCode for AccountRpcError { +// fn status_code(&self) -> StatusCode { +// match self { +// AccountRpcError::NameTooLong { .. } +// | AccountRpcError::DescriptionTooLong { .. } +// | AccountRpcError::TickerTooLong { .. } +// | AccountRpcError::NoSuchAccount(_) +// | AccountRpcError::NoEnabledAccount +// | AccountRpcError::AccountExistsAlready(_) => StatusCode::BAD_REQUEST, +// AccountRpcError::ErrorLoadingAccount(_) +// | AccountRpcError::ErrorSavingAccount(_) +// | AccountRpcError::Internal(_) => StatusCode::INTERNAL_SERVER_ERROR, +// } +// } +// } +// +// #[derive(Deserialize)] +// pub struct NewAccount { +// account_id: Id, +// name: String, +// #[serde(default)] +// description: String, +// #[serde(default)] +// balance_usd: BigDecimal, +// } +// +// impl From> for AccountInfo +// where +// AccountId: From, +// { +// fn from(orig: NewAccount) -> Self { +// AccountInfo { +// account_id: AccountId::from(orig.account_id), +// name: orig.name, +// description: orig.description, +// balance_usd: orig.balance_usd, +// } +// } +// } +// +// #[derive(Deserialize)] +// pub struct EnableAccountRequest { +// #[serde(flatten)] +// policy: EnableAccountPolicy, +// } +// +// #[derive(Deserialize)] +// #[serde(tag = "policy")] +// #[serde(rename_all = "snake_case")] +// pub enum EnableAccountPolicy { +// Existing(EnabledAccountId), +// New(NewAccount), +// } +// +// #[derive(Deserialize)] +// pub struct AddAccountRequest { +// #[serde(flatten)] +// account: NewAccount, +// } +// +// #[derive(Deserialize)] +// pub struct DeleteAccountRequest { +// account_id: AccountId, +// } +// +// #[derive(Deserialize)] +// pub struct SetAccountNameRequest { +// account_id: AccountId, +// name: String, +// } +// +// #[derive(Deserialize)] +// pub struct SetAccountDescriptionRequest { +// account_id: AccountId, +// description: String, +// } +// +// #[derive(Deserialize)] +// pub struct CoinRequest { +// account_id: AccountId, +// tickers: Vec, +// } +// +// #[derive(Deserialize)] +// pub struct GetAccountsRequest; +// +// #[derive(Deserialize)] +// pub struct GetAccountCoinsRequest { +// account_id: AccountId, +// } +// +// #[derive(Serialize)] +// pub struct GetAccountCoinsResponse { +// account_id: AccountId, +// coins: BTreeSet, +// } +// +// #[derive(Deserialize)] +// pub struct GetEnabledAccountRequest; +// +// #[derive(Deserialize)] +// pub struct SetBalanceRequest { +// account_id: AccountId, +// balance_usd: BigDecimal, +// } +// +// /// Sets the given account as an enabled (current active account). +// /// The behaviour depends on [`EnableAccountRequest::policy`]: +// /// * [`EnableAccountPolicy::Known`] => +// /// 1) Checks whether the given account exists in the storage. +// /// Returns [`AccountRpcError::NoSuchAccount`] if there is no account with the given `AccountId`; +// /// 2) Sets the account as an enabled. +// /// * [`EnableAccountPolicy::New`] => +// /// 1) Tries to upload the given account info to the storage. +// /// Returns [`AccountRpcError::AccountExistsAlready`] if there is an account with the same `AccountId` already; +// /// 2) Sets the account as an enabled. +// /// +// /// # Important +// /// +// /// This RPC affects the storage **only**. It doesn't affect MarketMaker. +// pub async fn enable_account(ctx: MmArc, req: EnableAccountRequest) -> MmResult { +// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; +// let account_id = match req.policy { +// EnableAccountPolicy::Existing(account_id) => account_id, +// EnableAccountPolicy::New(new_account) => { +// let account_id = new_account.account_id; +// account_ctx +// .storage() +// .await? +// .upload_account(AccountInfo::from(new_account)) +// .await?; +// account_id +// } +// }; +// account_ctx.storage().await?.enable_account(account_id).await?; +// Ok(SuccessResponse::new()) +// } +// +// /// Adds the given [`AddAccountRequest::account`] to the storage. +// /// Returns [`AccountRpcError::AccountExistsAlready`] if there is an account with the same `AccountId` already. +// /// +// /// # Important +// /// +// /// This RPC affects the storage **only**. It doesn't affect MarketMaker. +// pub async fn add_account(ctx: MmArc, req: AddAccountRequest) -> MmResult { +// validate_new_account(&req.account)?; +// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; +// account_ctx +// .storage() +// .await? +// .upload_account(AccountInfo::from(req.account)) +// .await?; +// Ok(SuccessResponse::new()) +// } +// +// /// Deletes the given [`AddAccountRequest::account_id`] account from the storage. +// /// Returns [`AccountRpcError::NoSuchAccount`] if there is no account with the same `AccountId`. +// /// +// /// # Important +// /// +// /// This RPC affects the storage **only**. It doesn't affect MarketMaker. +// pub async fn delete_account(ctx: MmArc, req: DeleteAccountRequest) -> MmResult { +// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; +// account_ctx.storage().await?.delete_account(req.account_id).await?; +// Ok(SuccessResponse::new()) +// } +// +// /// Loads accounts from the storage and marks one account as enabled **only**. +// /// If no account has been enabled yet, this RPC returns [`AccountRpcError::NoEnabledAccount`] error. +// /// +// /// # Note +// /// +// /// The returned accounts are sorted by `AccountId`. +// pub async fn get_accounts( +// ctx: MmArc, +// _req: GetAccountsRequest, +// ) -> MmResult, AccountRpcError> { +// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; +// let accounts = account_ctx +// .storage() +// .await? +// .load_accounts_with_enabled_flag() +// .await? +// // The given `BTreeMap` accounts are sorted by `AccountId`. +// .into_values() +// .collect(); +// Ok(accounts) +// } +// +// /// Loads activated coins of the given `account_id` from the storage. +// /// +// /// # Note +// /// +// /// The returned coins are sorted. +// pub async fn get_account_coins( +// ctx: MmArc, +// req: GetAccountCoinsRequest, +// ) -> MmResult { +// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; +// let coins = account_ctx +// .storage() +// .await? +// .load_account_coins(req.account_id.clone()) +// .await?; +// Ok(GetAccountCoinsResponse { +// account_id: req.account_id, +// coins, +// }) +// } +// +// /// Loads an enabled account with activated coins from the storage. +// /// If no account has been enabled yet, this RPC returns [`AccountRpcError::NoEnabledAccount`] error. +// /// +// /// # Note +// /// +// /// The account coins are sorted. +// pub async fn get_enabled_account( +// ctx: MmArc, +// _req: GetEnabledAccountRequest, +// ) -> MmResult { +// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; +// let account = account_ctx.storage().await?.load_enabled_account_with_coins().await?; +// Ok(account) +// } +// +// /// Sets the account name. +// pub async fn set_account_name(ctx: MmArc, req: SetAccountNameRequest) -> MmResult { +// validate_account_name(&req.name)?; +// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; +// account_ctx.storage().await?.set_name(req.account_id, req.name).await?; +// Ok(SuccessResponse::new()) +// } +// +// /// Sets the account description. +// pub async fn set_account_description( +// ctx: MmArc, +// req: SetAccountDescriptionRequest, +// ) -> MmResult { +// validate_account_desc(&req.description)?; +// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; +// account_ctx +// .storage() +// .await? +// .set_description(req.account_id, req.description) +// .await?; +// Ok(SuccessResponse::new()) +// } +// +// /// Sets the account USD balance. +// /// +// /// # Important +// /// +// /// This RPC affects the storage **only**. It doesn't affect MarketMaker. +// pub async fn set_account_balance(ctx: MmArc, req: SetBalanceRequest) -> MmResult { +// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; +// account_ctx +// .storage() +// .await? +// .set_balance(req.account_id, req.balance_usd) +// .await?; +// Ok(SuccessResponse::new()) +// } +// +// /// Activates the given [`CoinRequest::tickers`] for the specified [`CoinRequest::account_id`] account. +// /// +// /// # Important +// /// +// /// This RPC affects the storage **only**. It doesn't affect MarketMaker. +// pub async fn activate_coins(ctx: MmArc, req: CoinRequest) -> MmResult { +// validate_tickers(&req.tickers)?; +// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; +// account_ctx +// .storage() +// .await? +// .activate_coins(req.account_id, req.tickers) +// .await?; +// Ok(SuccessResponse::new()) +// } +// +// /// Deactivates the given [`CoinRequest::tickers`] for the specified [`CoinRequest::account_id`] account. +// /// +// /// # Important +// /// +// /// This RPC affects the storage **only**. It doesn't affect MarketMaker. +// pub async fn deactivate_coins(ctx: MmArc, req: CoinRequest) -> MmResult { +// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; +// account_ctx +// .storage() +// .await? +// .deactivate_coins(req.account_id, req.tickers) +// .await?; +// Ok(SuccessResponse::new()) +// } +// +// fn validate_new_account(account: &NewAccount) -> MmResult<(), AccountRpcError> { +// validate_account_name(&account.name)?; +// validate_account_desc(&account.description) +// } +// +// fn validate_account_name(name: &str) -> MmResult<(), AccountRpcError> { +// if name.len() > MAX_ACCOUNT_NAME_LENGTH { +// return MmError::err(AccountRpcError::NameTooLong { +// max_len: MAX_ACCOUNT_NAME_LENGTH, +// }); +// } +// Ok(()) +// } +// +// fn validate_account_desc(description: &str) -> MmResult<(), AccountRpcError> { +// if description.len() > MAX_ACCOUNT_DESCRIPTION_LENGTH { +// return MmError::err(AccountRpcError::DescriptionTooLong { +// max_len: MAX_ACCOUNT_NAME_LENGTH, +// }); +// } +// Ok(()) +// } +// +// fn validate_tickers(tickers: &[String]) -> MmResult<(), AccountRpcError> { +// for ticker in tickers { +// if ticker.len() > MAX_TICKER_LENGTH { +// return MmError::err(AccountRpcError::TickerTooLong { +// max_len: MAX_TICKER_LENGTH, +// }); +// } +// } +// Ok(()) +// } diff --git a/mm2src/mm2_main/src/rpc/dispatcher/dispatcher.rs b/mm2src/mm2_main/src/rpc/dispatcher/dispatcher.rs index 6c288f5967..866eb8ee30 100644 --- a/mm2src/mm2_main/src/rpc/dispatcher/dispatcher.rs +++ b/mm2src/mm2_main/src/rpc/dispatcher/dispatcher.rs @@ -146,10 +146,10 @@ async fn dispatcher_v2(request: MmRpcRequest, ctx: MmArc) -> DispatcherResult DispatcherResult>> { - use mm2_gui_storage::rpc_commands as gui_storage_rpc; - - match gui_storage_method { - "activate_coins" => handle_mmrpc(ctx, request, gui_storage_rpc::activate_coins).await, - "add_account" => handle_mmrpc(ctx, request, gui_storage_rpc::add_account).await, - "deactivate_coins" => handle_mmrpc(ctx, request, gui_storage_rpc::deactivate_coins).await, - "delete_account" => handle_mmrpc(ctx, request, gui_storage_rpc::delete_account).await, - "enable_account" => handle_mmrpc(ctx, request, gui_storage_rpc::enable_account).await, - "get_accounts" => handle_mmrpc(ctx, request, gui_storage_rpc::get_accounts).await, - "get_account_coins" => handle_mmrpc(ctx, request, gui_storage_rpc::get_account_coins).await, - "get_enabled_account" => handle_mmrpc(ctx, request, gui_storage_rpc::get_enabled_account).await, - "set_account_balance" => handle_mmrpc(ctx, request, gui_storage_rpc::set_account_balance).await, - "set_account_description" => handle_mmrpc(ctx, request, gui_storage_rpc::set_account_description).await, - "set_account_name" => handle_mmrpc(ctx, request, gui_storage_rpc::set_account_name).await, - _ => MmError::err(DispatcherError::NoSuchMethod), - } -} +// +// /// `gui_storage` dispatcher. +// /// +// /// # Note +// /// +// /// `gui_storage_method` is a method name with the `gui_storage::` prefix removed. +// async fn gui_storage_dispatcher( +// request: MmRpcRequest, +// ctx: MmArc, +// gui_storage_method: &str, +// ) -> DispatcherResult>> { +// use mm2_gui_storage::rpc_commands as gui_storage_rpc; +// +// match gui_storage_method { +// "activate_coins" => handle_mmrpc(ctx, request, gui_storage_rpc::activate_coins).await, +// "add_account" => handle_mmrpc(ctx, request, gui_storage_rpc::add_account).await, +// "deactivate_coins" => handle_mmrpc(ctx, request, gui_storage_rpc::deactivate_coins).await, +// "delete_account" => handle_mmrpc(ctx, request, gui_storage_rpc::delete_account).await, +// "enable_account" => handle_mmrpc(ctx, request, gui_storage_rpc::enable_account).await, +// "get_accounts" => handle_mmrpc(ctx, request, gui_storage_rpc::get_accounts).await, +// "get_account_coins" => handle_mmrpc(ctx, request, gui_storage_rpc::get_account_coins).await, +// "get_enabled_account" => handle_mmrpc(ctx, request, gui_storage_rpc::get_enabled_account).await, +// "set_account_balance" => handle_mmrpc(ctx, request, gui_storage_rpc::set_account_balance).await, +// "set_account_description" => handle_mmrpc(ctx, request, gui_storage_rpc::set_account_description).await, +// "set_account_name" => handle_mmrpc(ctx, request, gui_storage_rpc::set_account_name).await, +// _ => MmError::err(DispatcherError::NoSuchMethod), +// } +// } /// `lightning` dispatcher. /// From 6a0d0b6af9828c608a69e691cd1bb3fdc7a3168d Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 21 May 2024 13:53:02 +0100 Subject: [PATCH 127/186] comment out gui_storage_accounts_functionality unit tests --- .../tests/mm2_tests/mm2_tests_inner.rs | 540 +++++++++--------- 1 file changed, 270 insertions(+), 270 deletions(-) diff --git a/mm2src/mm2_main/tests/mm2_tests/mm2_tests_inner.rs b/mm2src/mm2_main/tests/mm2_tests/mm2_tests_inner.rs index 06abd46a92..ef80bc6542 100644 --- a/mm2src/mm2_main/tests/mm2_tests/mm2_tests_inner.rs +++ b/mm2src/mm2_main/tests/mm2_tests/mm2_tests_inner.rs @@ -5266,276 +5266,276 @@ fn test_no_login() { assert!(version.0.is_success(), "!version: {}", version.1); } -#[test] -#[cfg(not(target_arch = "wasm32"))] -fn test_gui_storage_accounts_functionality() { - let passphrase = "test_gui_storage passphrase"; - - let conf = Mm2TestConf::seednode(passphrase, &json!([])); - let mm = block_on(MarketMakerIt::start_async(conf.conf, conf.rpc_password, None)).unwrap(); - let (_bob_dump_log, _bob_dump_dashboard) = mm.mm_dump(); - log!("Log path: {}", mm.log_path.display()); - - let resp = block_on(mm.rpc(&json!({ - "userpass": mm.userpass, - "mmrpc": "2.0", - "method": "gui_storage::enable_account", - "params": { - "policy": "new", - "account_id": { - "type": "iguana" - }, - "name": "My Iguana wallet", - }, - }))) - .unwrap(); - assert!(resp.0.is_success(), "!gui_storage::enable_account: {}", resp.1); - - let resp = block_on(mm.rpc(&json!({ - "userpass": mm.userpass, - "mmrpc": "2.0", - "method": "gui_storage::add_account", - "params": { - "account_id": { - "type": "hw", - "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" - }, - "description": "Any description", - "name": "My HW", - }, - }))) - .unwrap(); - assert!(resp.0.is_success(), "!gui_storage::add_account: {}", resp.1); - - // Add `HD{1}` account that will be deleted later. - let resp = block_on(mm.rpc(&json!({ - "userpass": mm.userpass, - "mmrpc": "2.0", - "method": "gui_storage::add_account", - "params": { - "account_id": { - "type": "hd", - "account_idx": 1, - }, - "name": "An HD account" - }, - }))) - .unwrap(); - assert!(resp.0.is_success(), "!gui_storage::add_account: {}", resp.1); - - let resp = block_on(mm.rpc(&json!({ - "userpass": mm.userpass, - "mmrpc": "2.0", - "method": "gui_storage::delete_account", - "params": { - "account_id": { - "type": "hd", - "account_idx": 1, - } - }, - }))) - .unwrap(); - assert!(resp.0.is_success(), "!gui_storage::delete_account: {}", resp.1); - - let resp = block_on(mm.rpc(&json!({ - "userpass": mm.userpass, - "mmrpc": "2.0", - "method": "gui_storage::set_account_balance", - "params": { - "account_id": { - "type": "hw", - "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" - }, - "balance_usd": "123.567", - }, - }))) - .unwrap(); - assert!(resp.0.is_success(), "!gui_storage::set_account_balance: {}", resp.1); - - let resp = block_on(mm.rpc(&json!({ - "userpass": mm.userpass, - "mmrpc": "2.0", - "method": "gui_storage::set_account_name", - "params": { - "account_id": { - "type": "iguana" - }, - "name": "New Iguana account name", - }, - }))) - .unwrap(); - assert!(resp.0.is_success(), "!gui_storage::set_account_name: {}", resp.1); - - let resp = block_on(mm.rpc(&json!({ - "userpass": mm.userpass, - "mmrpc": "2.0", - "method": "gui_storage::set_account_description", - "params": { - "account_id": { - "type": "iguana" - }, - "description": "Another description", - }, - }))) - .unwrap(); - assert!(resp.0.is_success(), "!gui_storage::set_account_description: {}", resp.1); - - let resp = block_on(mm.rpc(&json!({ - "userpass": mm.userpass, - "mmrpc": "2.0", - "method": "gui_storage::get_accounts" - }))) - .unwrap(); - assert!(resp.0.is_success(), "!gui_storage::get_accounts: {}", resp.1); - - let actual: RpcV2Response> = json::from_str(&resp.1).unwrap(); - let expected = vec![ - gui_storage::AccountWithEnabledFlag { - account_id: gui_storage::AccountId::Iguana, - name: "New Iguana account name".to_string(), - description: "Another description".to_string(), - balance_usd: BigDecimal::from(0i32), - enabled: true, - }, - gui_storage::AccountWithEnabledFlag { - account_id: gui_storage::AccountId::HW { - device_pubkey: "1549128bbfb33b997949b4105b6a6371c998e212".to_string(), - }, - name: "My HW".to_string(), - description: "Any description".to_string(), - balance_usd: BigDecimal::from(123567i32) / BigDecimal::from(1000i32), - enabled: false, - }, - ]; - assert_eq!(actual.result, expected); -} - -#[test] -#[cfg(not(target_arch = "wasm32"))] -fn test_gui_storage_coins_functionality() { - let passphrase = "test_gui_storage passphrase"; - - let conf = Mm2TestConf::seednode(passphrase, &json!([])); - let mm = block_on(MarketMakerIt::start_async(conf.conf, conf.rpc_password, None)).unwrap(); - let (_bob_dump_log, _bob_dump_dashboard) = mm.mm_dump(); - log!("Log path: {}", mm.log_path.display()); - - let resp = block_on(mm.rpc(&json!({ - "userpass": mm.userpass, - "mmrpc": "2.0", - "method": "gui_storage::enable_account", - "params": { - "policy": "new", - "account_id": { - "type": "iguana" - }, - "name": "My Iguana wallet", - }, - }))) - .unwrap(); - assert!(resp.0.is_success(), "!gui_storage::enable_account: {}", resp.1); - - let resp = block_on(mm.rpc(&json!({ - "userpass": mm.userpass, - "mmrpc": "2.0", - "method": "gui_storage::add_account", - "params": { - "account_id": { - "type": "hw", - "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" - }, - "description": "Any description", - "name": "My HW", - }, - }))) - .unwrap(); - assert!(resp.0.is_success(), "!gui_storage::add_account: {}", resp.1); - - let resp = block_on(mm.rpc(&json!({ - "userpass": mm.userpass, - "mmrpc": "2.0", - "method": "gui_storage::activate_coins", - "params": { - "account_id": { - "type": "iguana" - }, - "tickers": ["RICK", "MORTY", "KMD"], - }, - }))) - .unwrap(); - assert!(resp.0.is_success(), "!gui_storage::activate_coins: {}", resp.1); - - let resp = block_on(mm.rpc(&json!({ - "userpass": mm.userpass, - "mmrpc": "2.0", - "method": "gui_storage::activate_coins", - "params": { - "account_id": { - "type": "hw", - "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" - }, - "tickers": ["KMD", "MORTY", "BCH"], - }, - }))) - .unwrap(); - assert!(resp.0.is_success(), "!gui_storage::activate_coins: {}", resp.1); - - let resp = block_on(mm.rpc(&json!({ - "userpass": mm.userpass, - "mmrpc": "2.0", - "method": "gui_storage::deactivate_coins", - "params": { - "account_id": { - "type": "hw", - "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" - }, - "tickers": ["BTC", "MORTY"], - }, - }))) - .unwrap(); - assert!(resp.0.is_success(), "!gui_storage::deactivate_coins: {}", resp.1); - - let resp = block_on(mm.rpc(&json!({ - "userpass": mm.userpass, - "mmrpc": "2.0", - "method": "gui_storage::get_enabled_account", - }))) - .unwrap(); - assert!(resp.0.is_success(), "!gui_storage::get_enabled_account: {}", resp.1); - let actual: RpcV2Response = json::from_str(&resp.1).unwrap(); - let expected = gui_storage::AccountWithCoins { - account_id: gui_storage::AccountId::Iguana, - name: "My Iguana wallet".to_string(), - description: String::new(), - balance_usd: BigDecimal::from(0i32), - coins: vec!["RICK".to_string(), "MORTY".to_string(), "KMD".to_string()] - .into_iter() - .collect(), - }; - assert_eq!(actual.result, expected); - - let resp = block_on(mm.rpc(&json!({ - "userpass": mm.userpass, - "mmrpc": "2.0", - "method": "gui_storage::get_account_coins", - "params": { - "account_id": { - "type": "hw", - "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" - } - } - }))) - .unwrap(); - assert!(resp.0.is_success(), "!gui_storage::get_enabled_account: {}", resp.1); - let actual: RpcV2Response = json::from_str(&resp.1).unwrap(); - let expected = gui_storage::AccountCoins { - account_id: gui_storage::AccountId::HW { - device_pubkey: "1549128bbfb33b997949b4105b6a6371c998e212".to_string(), - }, - coins: vec!["KMD".to_string(), "BCH".to_string()].into_iter().collect(), - }; - assert_eq!(actual.result, expected); -} +// #[test] +// #[cfg(not(target_arch = "wasm32"))] +// fn test_gui_storage_accounts_functionality() { +// let passphrase = "test_gui_storage passphrase"; + +// let conf = Mm2TestConf::seednode(passphrase, &json!([])); +// let mm = block_on(MarketMakerIt::start_async(conf.conf, conf.rpc_password, None)).unwrap(); +// let (_bob_dump_log, _bob_dump_dashboard) = mm.mm_dump(); +// log!("Log path: {}", mm.log_path.display()); + +// let resp = block_on(mm.rpc(&json!({ +// "userpass": mm.userpass, +// "mmrpc": "2.0", +// "method": "gui_storage::enable_account", +// "params": { +// "policy": "new", +// "account_id": { +// "type": "iguana" +// }, +// "name": "My Iguana wallet", +// }, +// }))) +// .unwrap(); +// assert!(resp.0.is_success(), "!gui_storage::enable_account: {}", resp.1); + +// let resp = block_on(mm.rpc(&json!({ +// "userpass": mm.userpass, +// "mmrpc": "2.0", +// "method": "gui_storage::add_account", +// "params": { +// "account_id": { +// "type": "hw", +// "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" +// }, +// "description": "Any description", +// "name": "My HW", +// }, +// }))) +// .unwrap(); +// assert!(resp.0.is_success(), "!gui_storage::add_account: {}", resp.1); + +// // Add `HD{1}` account that will be deleted later. +// let resp = block_on(mm.rpc(&json!({ +// "userpass": mm.userpass, +// "mmrpc": "2.0", +// "method": "gui_storage::add_account", +// "params": { +// "account_id": { +// "type": "hd", +// "account_idx": 1, +// }, +// "name": "An HD account" +// }, +// }))) +// .unwrap(); +// assert!(resp.0.is_success(), "!gui_storage::add_account: {}", resp.1); + +// let resp = block_on(mm.rpc(&json!({ +// "userpass": mm.userpass, +// "mmrpc": "2.0", +// "method": "gui_storage::delete_account", +// "params": { +// "account_id": { +// "type": "hd", +// "account_idx": 1, +// } +// }, +// }))) +// .unwrap(); +// assert!(resp.0.is_success(), "!gui_storage::delete_account: {}", resp.1); + +// let resp = block_on(mm.rpc(&json!({ +// "userpass": mm.userpass, +// "mmrpc": "2.0", +// "method": "gui_storage::set_account_balance", +// "params": { +// "account_id": { +// "type": "hw", +// "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" +// }, +// "balance_usd": "123.567", +// }, +// }))) +// .unwrap(); +// assert!(resp.0.is_success(), "!gui_storage::set_account_balance: {}", resp.1); + +// let resp = block_on(mm.rpc(&json!({ +// "userpass": mm.userpass, +// "mmrpc": "2.0", +// "method": "gui_storage::set_account_name", +// "params": { +// "account_id": { +// "type": "iguana" +// }, +// "name": "New Iguana account name", +// }, +// }))) +// .unwrap(); +// assert!(resp.0.is_success(), "!gui_storage::set_account_name: {}", resp.1); + +// let resp = block_on(mm.rpc(&json!({ +// "userpass": mm.userpass, +// "mmrpc": "2.0", +// "method": "gui_storage::set_account_description", +// "params": { +// "account_id": { +// "type": "iguana" +// }, +// "description": "Another description", +// }, +// }))) +// .unwrap(); +// assert!(resp.0.is_success(), "!gui_storage::set_account_description: {}", resp.1); + +// let resp = block_on(mm.rpc(&json!({ +// "userpass": mm.userpass, +// "mmrpc": "2.0", +// "method": "gui_storage::get_accounts" +// }))) +// .unwrap(); +// assert!(resp.0.is_success(), "!gui_storage::get_accounts: {}", resp.1); + +// let actual: RpcV2Response> = json::from_str(&resp.1).unwrap(); +// let expected = vec![ +// gui_storage::AccountWithEnabledFlag { +// account_id: gui_storage::AccountId::Iguana, +// name: "New Iguana account name".to_string(), +// description: "Another description".to_string(), +// balance_usd: BigDecimal::from(0i32), +// enabled: true, +// }, +// gui_storage::AccountWithEnabledFlag { +// account_id: gui_storage::AccountId::HW { +// device_pubkey: "1549128bbfb33b997949b4105b6a6371c998e212".to_string(), +// }, +// name: "My HW".to_string(), +// description: "Any description".to_string(), +// balance_usd: BigDecimal::from(123567i32) / BigDecimal::from(1000i32), +// enabled: false, +// }, +// ]; +// assert_eq!(actual.result, expected); +// } + +// #[test] +// #[cfg(not(target_arch = "wasm32"))] +// fn test_gui_storage_coins_functionality() { +// let passphrase = "test_gui_storage passphrase"; + +// let conf = Mm2TestConf::seednode(passphrase, &json!([])); +// let mm = block_on(MarketMakerIt::start_async(conf.conf, conf.rpc_password, None)).unwrap(); +// let (_bob_dump_log, _bob_dump_dashboard) = mm.mm_dump(); +// log!("Log path: {}", mm.log_path.display()); + +// let resp = block_on(mm.rpc(&json!({ +// "userpass": mm.userpass, +// "mmrpc": "2.0", +// "method": "gui_storage::enable_account", +// "params": { +// "policy": "new", +// "account_id": { +// "type": "iguana" +// }, +// "name": "My Iguana wallet", +// }, +// }))) +// .unwrap(); +// assert!(resp.0.is_success(), "!gui_storage::enable_account: {}", resp.1); + +// let resp = block_on(mm.rpc(&json!({ +// "userpass": mm.userpass, +// "mmrpc": "2.0", +// "method": "gui_storage::add_account", +// "params": { +// "account_id": { +// "type": "hw", +// "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" +// }, +// "description": "Any description", +// "name": "My HW", +// }, +// }))) +// .unwrap(); +// assert!(resp.0.is_success(), "!gui_storage::add_account: {}", resp.1); + +// let resp = block_on(mm.rpc(&json!({ +// "userpass": mm.userpass, +// "mmrpc": "2.0", +// "method": "gui_storage::activate_coins", +// "params": { +// "account_id": { +// "type": "iguana" +// }, +// "tickers": ["RICK", "MORTY", "KMD"], +// }, +// }))) +// .unwrap(); +// assert!(resp.0.is_success(), "!gui_storage::activate_coins: {}", resp.1); + +// let resp = block_on(mm.rpc(&json!({ +// "userpass": mm.userpass, +// "mmrpc": "2.0", +// "method": "gui_storage::activate_coins", +// "params": { +// "account_id": { +// "type": "hw", +// "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" +// }, +// "tickers": ["KMD", "MORTY", "BCH"], +// }, +// }))) +// .unwrap(); +// assert!(resp.0.is_success(), "!gui_storage::activate_coins: {}", resp.1); + +// let resp = block_on(mm.rpc(&json!({ +// "userpass": mm.userpass, +// "mmrpc": "2.0", +// "method": "gui_storage::deactivate_coins", +// "params": { +// "account_id": { +// "type": "hw", +// "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" +// }, +// "tickers": ["BTC", "MORTY"], +// }, +// }))) +// .unwrap(); +// assert!(resp.0.is_success(), "!gui_storage::deactivate_coins: {}", resp.1); + +// let resp = block_on(mm.rpc(&json!({ +// "userpass": mm.userpass, +// "mmrpc": "2.0", +// "method": "gui_storage::get_enabled_account", +// }))) +// .unwrap(); +// assert!(resp.0.is_success(), "!gui_storage::get_enabled_account: {}", resp.1); +// let actual: RpcV2Response = json::from_str(&resp.1).unwrap(); +// let expected = gui_storage::AccountWithCoins { +// account_id: gui_storage::AccountId::Iguana, +// name: "My Iguana wallet".to_string(), +// description: String::new(), +// balance_usd: BigDecimal::from(0i32), +// coins: vec!["RICK".to_string(), "MORTY".to_string(), "KMD".to_string()] +// .into_iter() +// .collect(), +// }; +// assert_eq!(actual.result, expected); + +// let resp = block_on(mm.rpc(&json!({ +// "userpass": mm.userpass, +// "mmrpc": "2.0", +// "method": "gui_storage::get_account_coins", +// "params": { +// "account_id": { +// "type": "hw", +// "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" +// } +// } +// }))) +// .unwrap(); +// assert!(resp.0.is_success(), "!gui_storage::get_enabled_account: {}", resp.1); +// let actual: RpcV2Response = json::from_str(&resp.1).unwrap(); +// let expected = gui_storage::AccountCoins { +// account_id: gui_storage::AccountId::HW { +// device_pubkey: "1549128bbfb33b997949b4105b6a6371c998e212".to_string(), +// }, +// coins: vec!["KMD".to_string(), "BCH".to_string()].into_iter().collect(), +// }; +// assert_eq!(actual.result, expected); +// } #[test] #[cfg(not(target_arch = "wasm32"))] From fbfe747aa84df04fc5db39a2aaa6f79ba9b29bb1 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 22 May 2024 08:05:59 +0100 Subject: [PATCH 128/186] fix review notes --- mm2src/coins/hd_wallet/storage/wasm_storage.rs | 3 ++- mm2src/coins/lp_coins.rs | 8 +------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/mm2src/coins/hd_wallet/storage/wasm_storage.rs b/mm2src/coins/hd_wallet/storage/wasm_storage.rs index 204e29bec2..f8d75c044b 100644 --- a/mm2src/coins/hd_wallet/storage/wasm_storage.rs +++ b/mm2src/coins/hd_wallet/storage/wasm_storage.rs @@ -169,7 +169,7 @@ impl HDWalletStorageInternalOps for HDWalletIndexedDbStorage { let db = SharedDb::downgrade(&coins_ctx.hd_wallet_db); Ok(HDWalletIndexedDbStorage { db, - db_id: db_id.map(|e| e.to_string()), + db_id: db_id.map(String::from), }) } @@ -320,6 +320,7 @@ impl HDWalletIndexedDbStorage { } /// This function is used in `hd_wallet_storage::tests`. +#[cfg(any(test, target_arch = "wasm32"))] pub(super) async fn get_all_storage_items(ctx: &MmArc) -> Vec { let coins_ctx = CoinsContext::from_ctx(ctx).unwrap(); let db = coins_ctx.hd_wallet_db.get_or_initialize(None).await.unwrap(); diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 6f12ff563a..5a763436e6 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -4589,14 +4589,8 @@ async fn find_unique_account_ids(ctx: &MmArc, active_only: bool) -> Result Date: Mon, 27 May 2024 09:31:22 +0100 Subject: [PATCH 129/186] fix error handling --- mm2src/coins/lightning/ln_utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm2src/coins/lightning/ln_utils.rs b/mm2src/coins/lightning/ln_utils.rs index 96666daf2d..b3eb308b80 100644 --- a/mm2src/coins/lightning/ln_utils.rs +++ b/mm2src/coins/lightning/ln_utils.rs @@ -82,7 +82,7 @@ pub async fn init_db(ctx: &MmArc, ticker: String) -> EnableLightningResult Date: Mon, 27 May 2024 11:09:27 +0100 Subject: [PATCH 130/186] fix hd_wallet todos --- mm2src/coins/hd_wallet/storage/mod.rs | 62 +++++++++++++++---- .../coins/hd_wallet/storage/sqlite_storage.rs | 4 +- .../coins/hd_wallet/storage/wasm_storage.rs | 4 +- .../utxo/utxo_builder/utxo_coin_builder.rs | 4 +- mm2src/mm2_core/src/sql_connection_pool.rs | 2 +- mm2src/mm2_test_helpers/src/for_tests.rs | 14 +++++ 6 files changed, 71 insertions(+), 19 deletions(-) diff --git a/mm2src/coins/hd_wallet/storage/mod.rs b/mm2src/coins/hd_wallet/storage/mod.rs index 5fa96b9a42..bc514e55f3 100644 --- a/mm2src/coins/hd_wallet/storage/mod.rs +++ b/mm2src/coins/hd_wallet/storage/mod.rs @@ -220,13 +220,13 @@ impl Default for HDWalletCoinStorage { } impl HDWalletCoinStorage { - // TODO: Since hd_wallet_rmd160 is unique for a device, do we use it as db_id too? or we can just use mm2 shared_db_id and use hd_wallet_rmd160 for primary key as it's currently done pub async fn init(ctx: &MmArc, coin: String) -> HDWalletStorageResult { let crypto_ctx = CryptoCtx::from_ctx(ctx)?; let hd_wallet_rmd160 = crypto_ctx .hw_wallet_rmd160() .or_mm_err(|| HDWalletStorageError::HDWalletUnavailable)?; - let inner = Box::new(HDWalletStorageInstance::init(ctx, None).await?); + let db_id = hex::encode(hd_wallet_rmd160.as_slice()); + let inner = Box::new(HDWalletStorageInstance::init(ctx, Some(&db_id)).await?); Ok(HDWalletCoinStorage { coin, hd_wallet_rmd160, @@ -234,13 +234,14 @@ impl HDWalletCoinStorage { }) } - // TODO: Since hd_wallet_rmd160 is unique for a device, do we use it as db_id too? or we can just use mm2 shared_db_id and use hd_wallet_rmd160 for primary key as it's currently done pub async fn init_with_rmd160( ctx: &MmArc, coin: String, hd_wallet_rmd160: H160, ) -> HDWalletStorageResult { - let inner = Box::new(HDWalletStorageInstance::init(ctx, None).await?); + let db_id = hex::encode(hd_wallet_rmd160.as_slice()); + let inner = Box::new(HDWalletStorageInstance::init(ctx, Some(&db_id)).await?); + Ok(HDWalletCoinStorage { coin, hd_wallet_rmd160, @@ -299,7 +300,7 @@ fn display_rmd160(rmd160: &H160) -> String { hex::encode(rmd160.deref()) } mod tests { use super::*; use itertools::Itertools; - use mm2_test_helpers::for_tests::mm_ctx_with_custom_db; + use mm2_test_helpers::for_tests::{add_custom_db, mm_ctx_with_custom_db}; use primitives::hash::H160; cfg_wasm32! { @@ -342,7 +343,12 @@ mod tests { let ctx = mm_ctx_with_custom_db(); let device0_rmd160 = H160::from("0000000000000000000000000000000000000020"); + let device0_rmd160_db_id = hex::encode(device0_rmd160.as_slice()); + add_custom_db(&ctx, device0_rmd160_db_id.clone()); + let device1_rmd160 = H160::from("0000000000000000000000000000000000000030"); + let device1_rmd160_db_id = hex::encode(device1_rmd160.as_slice()); + add_custom_db(&ctx, device1_rmd160_db_id.clone()); let rick_device0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device0_rmd160) .await @@ -371,9 +377,9 @@ mod tests { .await .expect("!HDWalletCoinStorage::upload_new_account: MORTY device=0 account=0"); - // All accounts must be in the only one database. + // All accounts must be in the only one database for device 0. // Rows in the database must differ by only `coin`, `hd_wallet_rmd160` and `account_id` values. - let all_accounts: Vec<_> = get_all_storage_items(&ctx) + let all_accounts: Vec<_> = get_all_storage_items(&ctx, Some(&device0_rmd160_db_id)) .await .into_iter() .sorted_by(|x, y| x.external_addresses_number.cmp(&y.external_addresses_number)) @@ -381,9 +387,16 @@ mod tests { assert_eq!(all_accounts, vec![ rick_device0_account0.clone(), rick_device0_account1.clone(), - rick_device1_account0.clone(), morty_device0_account0.clone() ]); + // All accounts must be in the only one database for device 1. + // Rows in the database must differ by only `coin`, `hd_wallet_rmd160` and `account_id` values. + let all_accounts: Vec<_> = get_all_storage_items(&ctx, Some(&device1_rmd160_db_id)) + .await + .into_iter() + .sorted_by(|x, y| x.external_addresses_number.cmp(&y.external_addresses_number)) + .collect(); + assert_eq!(all_accounts, vec![rick_device1_account0.clone(),]); let mut actual = rick_device0_db .load_all_accounts() @@ -433,8 +446,16 @@ mod tests { let ctx = mm_ctx_with_custom_db(); let device0_rmd160 = H160::from("0000000000000000000000000000000000000010"); + let device0_rmd160_db_id = hex::encode(device0_rmd160.as_slice()); + add_custom_db(&ctx, device0_rmd160_db_id.clone()); + let device1_rmd160 = H160::from("0000000000000000000000000000000000000020"); + let device1_rmd160_db_id = hex::encode(device1_rmd160.as_slice()); + add_custom_db(&ctx, device1_rmd160_db_id.clone()); + let device2_rmd160 = H160::from("0000000000000000000000000000000000000030"); + let device2_rmd160_db_id = hex::encode(device2_rmd160.as_slice()); + add_custom_db(&ctx, device2_rmd160_db_id.clone()); let wallet0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device0_rmd160) .await @@ -468,14 +489,31 @@ mod tests { .await .expect("HDWalletCoinStorage::clear_accounts: RICK wallet=0"); - // All accounts must be in the only one database. + // device0 database should return no account. + let all_accounts: Vec<_> = get_all_storage_items(&ctx, Some(&device0_rmd160_db_id)) + .await + .into_iter() + .sorted_by(|x, y| x.external_addresses_number.cmp(&y.external_addresses_number)) + .collect(); + assert_eq!(all_accounts, vec![]); + + // All accounts must be in only device1 database . + // Rows in the database must differ by only `coin`, `hd_wallet_rmd160` and `account_id` values. + let all_accounts: Vec<_> = get_all_storage_items(&ctx, Some(&device1_rmd160_db_id)) + .await + .into_iter() + .sorted_by(|x, y| x.external_addresses_number.cmp(&y.external_addresses_number)) + .collect(); + assert_eq!(all_accounts, vec![wallet1_account0]); + + // All accounts must be in only device2 database . // Rows in the database must differ by only `coin`, `hd_wallet_rmd160` and `account_id` values. - let all_accounts: Vec<_> = get_all_storage_items(&ctx) + let all_accounts: Vec<_> = get_all_storage_items(&ctx, Some(&device2_rmd160_db_id)) .await .into_iter() .sorted_by(|x, y| x.external_addresses_number.cmp(&y.external_addresses_number)) .collect(); - assert_eq!(all_accounts, vec![wallet1_account0, wallet2_account0]); + assert_eq!(all_accounts, vec![wallet2_account0]); } async fn test_update_account_impl() { @@ -494,6 +532,8 @@ mod tests { let ctx = mm_ctx_with_custom_db(); let device_rmd160 = H160::from("0000000000000000000000000000000000000010"); + let device_rmd160_db_id = hex::encode(device_rmd160.as_slice()); + add_custom_db(&ctx, device_rmd160_db_id.clone()); let db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device_rmd160) .await diff --git a/mm2src/coins/hd_wallet/storage/sqlite_storage.rs b/mm2src/coins/hd_wallet/storage/sqlite_storage.rs index 911b0f9518..e2afa49cae 100644 --- a/mm2src/coins/hd_wallet/storage/sqlite_storage.rs +++ b/mm2src/coins/hd_wallet/storage/sqlite_storage.rs @@ -275,11 +275,11 @@ enum UpdatingProperty { /// This function is used in `hd_wallet_storage::tests`. #[cfg(test)] -pub(crate) async fn get_all_storage_items(ctx: &MmArc) -> Vec { +pub(crate) async fn get_all_storage_items(ctx: &MmArc, db_id: Option<&str>) -> Vec { const SELECT_ALL_ACCOUNTS: &str = "SELECT account_id, account_xpub, external_addresses_number, internal_addresses_number FROM hd_account"; - let conn = ctx.shared_sqlite_conn_opt(None).unwrap(); + let conn = ctx.shared_sqlite_conn_opt(db_id).unwrap(); let conn = conn.lock().unwrap(); let mut statement = conn.prepare(SELECT_ALL_ACCOUNTS).unwrap(); statement diff --git a/mm2src/coins/hd_wallet/storage/wasm_storage.rs b/mm2src/coins/hd_wallet/storage/wasm_storage.rs index f8d75c044b..c8638f37c8 100644 --- a/mm2src/coins/hd_wallet/storage/wasm_storage.rs +++ b/mm2src/coins/hd_wallet/storage/wasm_storage.rs @@ -321,9 +321,9 @@ impl HDWalletIndexedDbStorage { /// This function is used in `hd_wallet_storage::tests`. #[cfg(any(test, target_arch = "wasm32"))] -pub(super) async fn get_all_storage_items(ctx: &MmArc) -> Vec { +pub(super) async fn get_all_storage_items(ctx: &MmArc, db_id: Option<&str>) -> Vec { let coins_ctx = CoinsContext::from_ctx(ctx).unwrap(); - let db = coins_ctx.hd_wallet_db.get_or_initialize(None).await.unwrap(); + let db = coins_ctx.hd_wallet_db.get_or_initialize(db_id).await.unwrap(); let transaction = db.inner.transaction().await.unwrap(); let table = transaction.table::().await.unwrap(); table diff --git a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs index 6206919df7..5c00eba000 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs @@ -827,9 +827,7 @@ pub trait UtxoCoinBuilderCommonOps { return Ok(None); } - let block_to_sync_from = current_block_height - blocks_to_sync; - - Ok(Some(block_to_sync_from)) + Ok(Some(current_block_height - blocks_to_sync)) } } diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 7585813ce2..1842b8310a 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -24,7 +24,7 @@ enum DbIdConnKind { /// A pool for managing SQLite connections, where each connection is keyed by a unique string identifier. #[derive(Clone)] pub struct SqliteConnPool { - connections: Arc>>>>, + pub connections: Arc>>>>, // default db_id rmd160_hex: String, // default shared_db_id diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index 1fa45039a4..a7735f1407 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -1102,6 +1102,20 @@ pub fn mm_ctx_with_custom_db_with_conf(conf: Option) -> MmArc { ctx } +#[cfg(target_arch = "wasm32")] +pub fn add_custom_db(_ctx: &MmArc, _db_id: String) {} + +#[cfg(not(target_arch = "wasm32"))] +pub fn add_custom_db(ctx: &MmArc, db_id: String) { + use db_common::sqlite::rusqlite::Connection; + use std::sync::Arc; + + let connections = ctx.sqlite_conn_pool.as_option().expect("db pool not initialized!"); + let mut connections = connections.connections.write().unwrap(); + connections.insert(db_id, Arc::new(Mutex::new(Connection::open_in_memory().unwrap()))); + drop(connections); +} + #[cfg(not(target_arch = "wasm32"))] pub async fn mm_ctx_with_custom_async_db() -> MmArc { let ctx = MmCtxBuilder::new().into_mm_arc(); From df3cf1ecb6fd5d11d5b6a3a2b796e71b81916085 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 27 May 2024 11:13:34 +0100 Subject: [PATCH 131/186] remove unneeded comments --- mm2src/coins/hd_wallet/storage/mod.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/mm2src/coins/hd_wallet/storage/mod.rs b/mm2src/coins/hd_wallet/storage/mod.rs index bc514e55f3..e7aa7bffbc 100644 --- a/mm2src/coins/hd_wallet/storage/mod.rs +++ b/mm2src/coins/hd_wallet/storage/mod.rs @@ -377,8 +377,7 @@ mod tests { .await .expect("!HDWalletCoinStorage::upload_new_account: MORTY device=0 account=0"); - // All accounts must be in the only one database for device 0. - // Rows in the database must differ by only `coin`, `hd_wallet_rmd160` and `account_id` values. + // All accounts must be in only device0_rmd160_db_id database. let all_accounts: Vec<_> = get_all_storage_items(&ctx, Some(&device0_rmd160_db_id)) .await .into_iter() @@ -389,8 +388,7 @@ mod tests { rick_device0_account1.clone(), morty_device0_account0.clone() ]); - // All accounts must be in the only one database for device 1. - // Rows in the database must differ by only `coin`, `hd_wallet_rmd160` and `account_id` values. + // All accounts must be in only device1_rmd160_db_id database. let all_accounts: Vec<_> = get_all_storage_items(&ctx, Some(&device1_rmd160_db_id)) .await .into_iter() @@ -498,7 +496,6 @@ mod tests { assert_eq!(all_accounts, vec![]); // All accounts must be in only device1 database . - // Rows in the database must differ by only `coin`, `hd_wallet_rmd160` and `account_id` values. let all_accounts: Vec<_> = get_all_storage_items(&ctx, Some(&device1_rmd160_db_id)) .await .into_iter() @@ -507,7 +504,6 @@ mod tests { assert_eq!(all_accounts, vec![wallet1_account0]); // All accounts must be in only device2 database . - // Rows in the database must differ by only `coin`, `hd_wallet_rmd160` and `account_id` values. let all_accounts: Vec<_> = get_all_storage_items(&ctx, Some(&device2_rmd160_db_id)) .await .into_iter() From c028061975d586cbd839c68a26a4a64fcaade169 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 28 May 2024 20:04:56 +0100 Subject: [PATCH 132/186] mark use_wacher as true --- mm2src/mm2_core/src/mm_ctx.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 1d70976815..07294b6865 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -328,7 +328,7 @@ impl MmCtx { pub fn is_watcher(&self) -> bool { self.conf["is_watcher"].as_bool().unwrap_or_default() } - pub fn use_watchers(&self) -> bool { self.conf["use_watchers"].as_bool().unwrap_or(false) } + pub fn use_watchers(&self) -> bool { self.conf["use_watchers"].as_bool().unwrap_or(true) } pub fn netid(&self) -> u16 { let netid = self.conf["netid"].as_u64().unwrap_or(0); From 309a5acc2d761ad04504d872cf0a073294a44cdf Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 29 May 2024 19:17:13 +0100 Subject: [PATCH 133/186] fix sqlite deadlock and improve api design --- .../src/tendermint_with_assets_activation.rs | 7 +- mm2src/common/common.rs | 2 +- mm2src/mm2_core/src/mm_ctx.rs | 22 +++--- mm2src/mm2_core/src/sql_connection_pool.rs | 49 ++++++++++++- mm2src/mm2_main/src/database.rs | 53 +++++++------- mm2src/mm2_main/src/database/my_orders.rs | 45 ++++++------ mm2src/mm2_main/src/database/my_swaps.rs | 31 +++----- mm2src/mm2_main/src/database/stats_nodes.rs | 42 ++++++----- mm2src/mm2_main/src/lp_ordermatch.rs | 10 +-- .../src/lp_ordermatch/my_orders_storage.rs | 16 +++-- mm2src/mm2_main/src/lp_swap.rs | 10 +-- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 71 ++++++++++--------- .../mm2_main/src/lp_swap/my_swaps_storage.rs | 30 ++++---- mm2src/mm2_main/src/lp_swap/saved_swap.rs | 2 +- mm2src/mm2_main/src/lp_swap/swap_v2_common.rs | 39 +++++----- mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs | 38 +++++----- mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 71 ++++++++++--------- 17 files changed, 301 insertions(+), 237 deletions(-) diff --git a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs index 915112ba5a..a9b91db97f 100644 --- a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs +++ b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs @@ -408,7 +408,12 @@ async fn run_db_migraiton_for_new_tendermint_pubkey( })? .to_string(); - let db_migration_sender = ctx.db_migration_watcher().await.get_sender().await; + let db_migration_sender = ctx + .db_migration_watcher + .as_option() + .expect("Db migration watcher isn't intialized yet!") + .get_sender() + .await; let mut db_migration_sender = db_migration_sender.lock().await; db_migration_sender .send(DbIds { diff --git a/mm2src/common/common.rs b/mm2src/common/common.rs index 5e393d6d98..5c4d16ef1e 100644 --- a/mm2src/common/common.rs +++ b/mm2src/common/common.rs @@ -880,7 +880,7 @@ pub const fn sixty_f64() -> f64 { 60. } pub fn one() -> NonZeroUsize { NonZeroUsize::new(1).unwrap() } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct PagingOptions { #[serde(default = "ten")] pub limit: usize, diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 07294b6865..d746815a58 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -375,28 +375,22 @@ impl MmCtx { .map(|pool| pool.sqlite_conn(db_id)) } - /// Obtains a connection from the pool for the specified database ID, panicking if the pool is not initialized. #[cfg(not(target_arch = "wasm32"))] - pub fn sqlite_connection(&self, db_id: Option<&str>) -> Arc> { - let pool = self - .sqlite_conn_pool + pub fn run_sql_query(&self, db_id: Option<&str>, f: F) -> R + where + F: FnOnce(MutexGuard) -> R + Send + 'static, + R: Send + 'static, + { + self.sqlite_conn_pool .or(&|| panic!("sqlite_connection is not initialized")) - .clone(); - pool.sqlite_conn(db_id) + .clone() + .run_sql_query(db_id, f) } #[cfg(not(target_arch = "wasm32"))] pub async fn init_db_migration_watcher(&self) -> Result, String> { DbMigrationWatcher::init(self).await } - - #[cfg(not(target_arch = "wasm32"))] - pub async fn db_migration_watcher(&self) -> Arc { - self.db_migration_watcher - .as_option() - .expect("Db migration watcher isn't intialized yet!") - .clone() - } } impl Default for MmCtx { diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 1842b8310a..1d1293303b 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -9,7 +9,7 @@ use gstuff::try_s; use primitives::hash::H160; use std::collections::{HashMap, HashSet}; use std::path::PathBuf; -use std::sync::{Arc, Mutex, RwLock}; +use std::sync::{Arc, Mutex, MutexGuard, RwLock}; pub const ASYNC_SQLITE_DB_ID: &str = "KOMODEFI.db"; const SYNC_SQLITE_DB_ID: &str = "MM2.db"; @@ -147,6 +147,53 @@ impl SqliteConnPool { connection } + /// Retrieves a single-user connection from the pool. + pub fn run_sql_query(&self, db_id: Option<&str>, f: F) -> R + where + F: FnOnce(MutexGuard) -> R + Send + 'static, + R: Send + 'static, + { + self.run_sql_query_impl(db_id, DbIdConnKind::Single, f) + } + + /// Retrieves a shared connection from the pool. + pub fn run_sql_query_shared(&self, db_id: Option<&str>, f: F) -> R + where + F: FnOnce(MutexGuard) -> R + Send + 'static, + R: Send + 'static, + { + self.run_sql_query_impl(db_id, DbIdConnKind::Shared, f) + } + + /// Internal implementation to retrieve or create a connection. + fn run_sql_query_impl(&self, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind, f: F) -> R + where + F: FnOnce(MutexGuard) -> R + Send + 'static, + R: Send + 'static, + { + let db_id_default = match db_id_conn_kind { + DbIdConnKind::Shared => hex::encode(self.shared_db_id.as_slice()), + DbIdConnKind::Single => self.rmd160_hex.clone(), + }; + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| db_id_default); + + let connections = self.connections.read().unwrap(); + if let Some(connection) = connections.get(&db_id) { + let conn = connection.lock().unwrap(); + return f(conn); + } + + let mut connections = self.connections.write().unwrap(); + let sqlite_file_path = match db_id_conn_kind { + DbIdConnKind::Shared => self.db_dir(&db_id).join(SQLITE_SHARED_DB_ID), + DbIdConnKind::Single => self.db_dir(&db_id).join(SYNC_SQLITE_DB_ID), + }; + let connection = Self::open_connection(sqlite_file_path); + connections.insert(db_id, Arc::clone(&connection)); + let conn = connection.lock().unwrap(); + f(conn) + } + /// Opens a database connection based on the database ID and connection kind. fn open_connection(sqlite_file_path: PathBuf) -> Arc> { log_sqlite_file_open_attempt(&sqlite_file_path); diff --git a/mm2src/mm2_main/src/database.rs b/mm2src/mm2_main/src/database.rs index e61dba342f..1ce3d73197 100644 --- a/mm2src/mm2_main/src/database.rs +++ b/mm2src/mm2_main/src/database.rs @@ -18,9 +18,9 @@ use stats_swaps::create_and_fill_stats_swaps_from_json_statements; const SELECT_MIGRATION: &str = "SELECT * FROM migration ORDER BY current_migration DESC LIMIT 1;"; fn get_current_migration(ctx: &MmArc, db_id: Option<&str>) -> SqlResult { - let conn = ctx.sqlite_connection(db_id); - let conn = conn.lock().unwrap(); - conn.query_row(SELECT_MIGRATION, [], |row| row.get(0)) + ctx.run_sql_query(db_id, move |conn| { + conn.query_row(SELECT_MIGRATION, [], |row| row.get(0)) + }) } pub async fn init_and_migrate_sql_db(ctx: &MmArc, db_id: Option<&str>) -> SqlResult<()> { @@ -57,28 +57,28 @@ pub async fn init_and_migrate_sql_db(ctx: &MmArc, db_id: Option<&str>) -> SqlRes } fn init_db(ctx: &MmArc, db_id: Option<&str>) -> SqlResult<()> { - let conn = ctx.sqlite_connection(db_id); - let conn = conn.lock().unwrap(); - run_optimization_pragmas(&conn)?; - let init_batch = concat!( - "BEGIN; - CREATE TABLE IF NOT EXISTS migration (current_migration INTEGER NOT_NULL UNIQUE); - INSERT INTO migration (current_migration) VALUES (1);", - CREATE_MY_SWAPS_TABLE!(), - "COMMIT;" - ); - conn.execute_batch(init_batch) + ctx.run_sql_query(db_id, move |conn| { + run_optimization_pragmas(&conn)?; + let init_batch = concat!( + "BEGIN; + CREATE TABLE IF NOT EXISTS migration (current_migration INTEGER NOT_NULL UNIQUE); + INSERT INTO migration (current_migration) VALUES (1);", + CREATE_MY_SWAPS_TABLE!(), + "COMMIT;" + ); + conn.execute_batch(init_batch) + }) } fn clean_db(ctx: &MmArc, db_id: Option<&str>) { - let conn = ctx.sqlite_connection(db_id); - let conn = conn.lock().unwrap(); - if let Err(e) = conn.execute_batch( - "DROP TABLE migration; - DROP TABLE my_swaps;", - ) { - error!("Error {} on SQLite database cleanup", e); - } + ctx.run_sql_query(db_id, move |conn| { + if let Err(e) = conn.execute_batch( + "DROP TABLE migration; + DROP TABLE my_swaps;", + ) { + error!("Error {} on SQLite database cleanup", e); + } + }) } async fn migration_1(ctx: &MmArc) -> Vec<(&'static str, Vec)> { @@ -149,16 +149,18 @@ async fn statements_for_migration(ctx: &MmArc, current_migration: i64) -> Option } } -pub async fn migrate_sqlite_database(ctx: &MmArc, current_migration: i64, db_id: Option<&str>) -> SqlResult<()> { - let mut current_migration = current_migration; +pub async fn migrate_sqlite_database(ctx: &MmArc, mut current_migration: i64, db_id: Option<&str>) -> SqlResult<()> { info!( "{}:db migrate_sqlite_database current migration {current_migration}", db_id.unwrap_or("default") ); + while let Some(statements_with_params) = statements_for_migration(ctx, current_migration).await { // `statements_for_migration` locks the [`MmCtx::sqlite_connection`] mutex, // so we can't create a transaction outside of this loop. - let conn = ctx.sqlite_connection(db_id); + let conn = ctx + .sqlite_conn_opt(db_id) + .expect("Connection should be initialized before we get here"); let conn = conn.lock().unwrap(); let transaction = conn.unchecked_transaction()?; for (statement, params) in statements_with_params { @@ -172,6 +174,7 @@ pub async fn migrate_sqlite_database(ctx: &MmArc, current_migration: i64, db_id: ])?; transaction.commit()?; } + info!( "{}:db migrate_sqlite_database complete migrated to {current_migration}", db_id.unwrap_or("default") diff --git a/mm2src/mm2_main/src/database/my_orders.rs b/mm2src/mm2_main/src/database/my_orders.rs index 0082174c11..f2441a1eb5 100644 --- a/mm2src/mm2_main/src/database/my_orders.rs +++ b/mm2src/mm2_main/src/database/my_orders.rs @@ -56,10 +56,10 @@ pub fn insert_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder) -> SqlRes 0.to_string(), "Created".to_string(), ]; - let conn = ctx.sqlite_connection(order.db_id.as_deref()); - let conn = conn.lock().unwrap(); - conn.execute(INSERT_MY_ORDER, params_from_iter(params.iter())) - .map(|_| ()) + ctx.run_sql_query(order.db_id().as_deref(), move |conn| { + conn.execute(INSERT_MY_ORDER, params_from_iter(params.iter())) + .map(|_| ()) + }) } pub fn insert_taker_order(ctx: &MmArc, uuid: Uuid, order: &TakerOrder) -> SqlResult<()> { @@ -82,10 +82,11 @@ pub fn insert_taker_order(ctx: &MmArc, uuid: Uuid, order: &TakerOrder) -> SqlRes 0.to_string(), "Created".to_string(), ]; - let conn = ctx.sqlite_connection(order.db_id.as_deref()); - let conn = conn.lock().unwrap(); - conn.execute(INSERT_MY_ORDER, params_from_iter(params.iter())) - .map(|_| ()) + + ctx.run_sql_query(order.db_id().as_deref(), move |conn| { + conn.execute(INSERT_MY_ORDER, params_from_iter(params.iter())) + .map(|_| ()) + }) } pub fn update_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder) -> SqlResult<()> { @@ -97,10 +98,10 @@ pub fn update_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder) -> SqlRes order.updated_at.unwrap_or(0).to_string(), "Updated".to_string(), ]; - let conn = ctx.sqlite_connection(order.db_id.as_deref()); - let conn = conn.lock().unwrap(); - conn.execute(UPDATE_MY_ORDER, params_from_iter(params.iter())) - .map(|_| ()) + ctx.run_sql_query(order.db_id().as_deref(), move |conn| { + conn.execute(UPDATE_MY_ORDER, params_from_iter(params.iter())) + .map(|_| ()) + }) } pub fn update_was_taker(ctx: &MmArc, uuid: Uuid, db_id: Option<&str>) -> SqlResult<()> { @@ -111,19 +112,21 @@ pub fn update_was_taker(ctx: &MmArc, uuid: Uuid, db_id: Option<&str>) -> SqlResu now_ms().to_string(), 1.to_string(), ]; - let conn = ctx.sqlite_connection(db_id); - let conn = conn.lock().unwrap(); - conn.execute(UPDATE_WAS_TAKER, params_from_iter(params.iter())) - .map(|_| ()) + + ctx.run_sql_query(db_id, move |conn| { + conn.execute(UPDATE_WAS_TAKER, params_from_iter(params.iter())) + .map(|_| ()) + }) } pub fn update_order_status(ctx: &MmArc, uuid: Uuid, status: String, db_id: Option<&str>) -> SqlResult<()> { debug!("Updating order {} in the SQLite database", uuid); let params = vec![uuid.to_string(), now_ms().to_string(), status]; - let conn = ctx.sqlite_connection(db_id); - let conn = conn.lock().unwrap(); - conn.execute(UPDATE_ORDER_STATUS, params_from_iter(params.iter())) - .map(|_| ()) + + ctx.run_sql_query(db_id, move |conn| { + conn.execute(UPDATE_ORDER_STATUS, params_from_iter(params.iter())) + .map(|_| ()) + }) } /// Adds where clauses determined by MyOrdersFilter @@ -210,7 +213,7 @@ impl From for SelectRecentOrdersUuidsErr { pub fn select_orders_by_filter( conn: &Connection, filter: &MyOrdersFilter, - paging_options: Option<&PagingOptions>, + paging_options: Option, ) -> SqlResult { let mut query_builder = SqlBuilder::select_from(MY_ORDERS_TABLE); let mut params = vec![]; diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index 6a27bb0cf3..7575160625 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -64,17 +64,14 @@ const INSERT_MY_SWAP: &str = "INSERT INTO my_swaps (my_coin, other_coin, uuid, started_at, swap_type) VALUES (?1, ?2, ?3, ?4, ?5)"; pub fn insert_new_swap( - ctx: &MmArc, + conn: &Connection, my_coin: &str, other_coin: &str, uuid: &str, started_at: &str, swap_type: u8, - db_id: Option<&str>, ) -> SqlResult<()> { debug!("Inserting new swap {} to the SQLite database", uuid); - let conn = ctx.sqlite_connection(db_id); - let conn = conn.lock().unwrap(); let params = [my_coin, other_coin, uuid, started_at, &swap_type.to_string()]; conn.execute(INSERT_MY_SWAP, params).map(|_| ()) } @@ -123,9 +120,7 @@ const INSERT_MY_SWAP_V2: &str = r#"INSERT INTO my_swaps ( :other_p2p_pub );"#; -pub fn insert_new_swap_v2(ctx: &MmArc, params: &[(&str, &dyn ToSql)], db_id: Option<&str>) -> SqlResult<()> { - let conn = ctx.sqlite_connection(db_id); - let conn = conn.lock().unwrap(); +pub fn insert_new_swap_v2(conn: &Connection, params: &[(&str, &dyn ToSql)]) -> SqlResult<()> { conn.execute(INSERT_MY_SWAP_V2, params).map(|_| ()) } @@ -201,8 +196,8 @@ fn apply_my_swaps_filter(builder: &mut SqlBuilder, params: &mut Vec<(&str, Strin pub fn select_uuids_by_my_swaps_filter( conn: &Connection, filter: &MySwapsFilter, - paging_options: Option<&PagingOptions>, - db_id: &str, + paging_options: Option, + db_id: String, ) -> SqlResult { let mut query_builder = SqlBuilder::select_from(MY_SWAPS_TABLE); let mut params = vec![]; @@ -220,7 +215,7 @@ pub fn select_uuids_by_my_swaps_filter( let total_count = total_count.try_into().expect("COUNT should always be >= 0"); if total_count == 0 { return Ok(MyRecentSwapsUuids { - pubkey: db_id.to_string(), + pubkey: db_id, uuids_and_types: vec![], skipped: 0, total_count: 0, @@ -262,19 +257,19 @@ pub fn select_uuids_by_my_swaps_filter( uuids_and_types, total_count, skipped, - pubkey: db_id.to_string(), + pubkey: db_id, }) } /// Returns whether a swap with specified uuid exists in DB -pub fn does_swap_exist(conn: &Connection, uuid: &str, _db_id: Option<&str>) -> SqlResult { +pub fn does_swap_exist(conn: &Connection, uuid: &str) -> SqlResult { const SELECT_SWAP_ID_BY_UUID: &str = "SELECT id FROM my_swaps WHERE uuid = :uuid;"; let res: Option = query_single_row(conn, SELECT_SWAP_ID_BY_UUID, &[(":uuid", uuid)], |row| row.get(0))?; Ok(res.is_some()) } /// Queries swap events by uuid -pub fn get_swap_events(conn: &Connection, uuid: &str, _db_id: Option<&str>) -> SqlResult { +pub fn get_swap_events(conn: &Connection, uuid: &str) -> SqlResult { const SELECT_SWAP_EVENTS_BY_UUID: &str = "SELECT events_json FROM my_swaps WHERE uuid = :uuid;"; let mut stmt = conn.prepare(SELECT_SWAP_EVENTS_BY_UUID)?; let swap_type = stmt.query_row(&[(":uuid", uuid)], |row| row.get(0))?; @@ -282,7 +277,7 @@ pub fn get_swap_events(conn: &Connection, uuid: &str, _db_id: Option<&str>) -> S } /// Updates swap events by uuid -pub fn update_swap_events(conn: &Connection, uuid: &str, events_json: &str, _db_id: Option<&str>) -> SqlResult<()> { +pub fn update_swap_events(conn: &Connection, uuid: &str, events_json: &str) -> SqlResult<()> { const UPDATE_SWAP_EVENTS_BY_UUID: &str = "UPDATE my_swaps SET events_json = :events_json WHERE uuid = :uuid;"; let mut stmt = conn.prepare(UPDATE_SWAP_EVENTS_BY_UUID)?; stmt.execute(&[(":uuid", uuid), (":events_json", events_json)]) @@ -291,16 +286,12 @@ pub fn update_swap_events(conn: &Connection, uuid: &str, events_json: &str, _db_ const UPDATE_SWAP_IS_FINISHED_BY_UUID: &str = "UPDATE my_swaps SET is_finished = 1 WHERE uuid = :uuid;"; -pub fn set_swap_is_finished(conn: &Connection, uuid: &str, _db_id: Option<&str>) -> SqlResult<()> { +pub fn set_swap_is_finished(conn: &Connection, uuid: &str) -> SqlResult<()> { let mut stmt = conn.prepare(UPDATE_SWAP_IS_FINISHED_BY_UUID)?; stmt.execute(&[(":uuid", uuid)]).map(|_| ()) } -pub fn select_unfinished_swaps_uuids( - conn: &Connection, - swap_type: u8, - _db_id: Option<&str>, -) -> SqlResult, SelectSwapsUuidsErr> { +pub fn select_unfinished_swaps_uuids(conn: &Connection, swap_type: u8) -> SqlResult, SelectSwapsUuidsErr> { const SELECT_UNFINISHED_SWAPS_UUIDS_BY_TYPE: &str = "SELECT uuid FROM my_swaps WHERE is_finished = 0 AND swap_type = :type;"; let mut stmt = conn.prepare(SELECT_UNFINISHED_SWAPS_UUIDS_BY_TYPE)?; diff --git a/mm2src/mm2_main/src/database/stats_nodes.rs b/mm2src/mm2_main/src/database/stats_nodes.rs index 0347e5468f..e5dfdf200c 100644 --- a/mm2src/mm2_main/src/database/stats_nodes.rs +++ b/mm2src/mm2_main/src/database/stats_nodes.rs @@ -1,7 +1,7 @@ /// This module contains code to work with nodes table for stats collection in MM2 SQLite DB use crate::mm2::lp_stats::{NodeInfo, NodeVersionStat}; use common::log::debug; -use db_common::sqlite::rusqlite::{params_from_iter, Error as SqlError, Result as SqlResult}; +use db_common::sqlite::rusqlite::{params_from_iter, Connection, Error as SqlError, Result as SqlResult}; use mm2_core::mm_ctx::MmArc; use std::collections::hash_map::HashMap; @@ -37,39 +37,42 @@ pub fn insert_node_info(ctx: &MmArc, node_info: &NodeInfo, db_id: Option<&str>) node_info.address.clone(), node_info.peer_id.clone(), ]; - let conn = ctx.sqlite_connection(db_id); - let conn = conn.lock().unwrap(); - conn.execute(INSERT_NODE, params_from_iter(params.iter())).map(|_| ()) + ctx.run_sql_query(db_id, move |conn| { + conn.execute(INSERT_NODE, params_from_iter(params.iter())).map(|_| ()) + }) } pub fn delete_node_info(ctx: &MmArc, name: String, db_id: Option<&str>) -> SqlResult<()> { debug!("Deleting info about node {} from the SQLite database", name); let params = vec![name]; - let conn = ctx.sqlite_connection(db_id); - let conn = conn.lock().unwrap(); - conn.execute(DELETE_NODE, params_from_iter(params.iter())).map(|_| ()) + ctx.run_sql_query(db_id, move |conn| { + conn.execute(DELETE_NODE, params_from_iter(params.iter())).map(|_| ()) + }) } -pub fn select_peers_addresses(ctx: &MmArc, db_id: Option<&str>) -> SqlResult, SqlError> { - let conn = ctx.sqlite_connection(db_id); - let conn = conn.lock().unwrap(); +fn select_peers_addresses_impl(conn: &Connection) -> SqlResult, SqlError> { let mut stmt = conn.prepare(SELECT_PEERS_ADDRESSES)?; let peers_addresses = stmt .query_map([], |row| Ok((row.get(0)?, row.get(1)?)))? .collect::>>()?; - Ok(peers_addresses) } -pub fn select_peers_names(ctx: &MmArc, db_id: Option<&str>) -> SqlResult, SqlError> { - let conn = ctx.sqlite_connection(db_id); - let conn = conn.lock().unwrap(); +pub fn select_peers_addresses(ctx: &MmArc, db_id: Option<&str>) -> SqlResult, SqlError> { + ctx.run_sql_query(db_id, move |conn| select_peers_addresses_impl(&conn)) +} + +fn select_peers_names_impl(conn: &Connection) -> SqlResult, SqlError> { let mut stmt = conn.prepare(SELECT_PEERS_NAMES)?; let peers_names = stmt .query_map([], |row| Ok((row.get(0)?, row.get(1)?)))? - .collect::>>(); + .collect::>>()?; - peers_names + Ok(peers_names) +} + +pub fn select_peers_names(ctx: &MmArc, db_id: Option<&str>) -> SqlResult, SqlError> { + ctx.run_sql_query(db_id, move |conn| select_peers_names_impl(&conn)) } pub fn insert_node_version_stat(ctx: &MmArc, node_version_stat: NodeVersionStat, db_id: Option<&str>) -> SqlResult<()> { @@ -83,7 +86,8 @@ pub fn insert_node_version_stat(ctx: &MmArc, node_version_stat: NodeVersionStat, node_version_stat.timestamp.to_string(), node_version_stat.error.unwrap_or_default(), ]; - let conn = ctx.sqlite_connection(db_id); - let conn = conn.lock().unwrap(); - conn.execute(INSERT_STAT, params_from_iter(params.iter())).map(|_| ()) + + ctx.run_sql_query(db_id, move |conn| { + conn.execute(INSERT_STAT, params_from_iter(params.iter())).map(|_| ()) + }) } diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index c5f526a331..9bae7b9819 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -1580,7 +1580,7 @@ pub struct TakerOrder { /// A custom priv key for more privacy to prevent linking orders of the same node between each other /// Commonly used with privacy coins (ARRR, ZCash, etc.) p2p_privkey: Option, - pub db_id: Option, + db_id: Option, } /// Result of match_reserved function @@ -1682,7 +1682,7 @@ impl TakerOrder { fn p2p_keypair(&self) -> Option<&KeyPair> { self.p2p_privkey.as_ref().map(|key| key.key_pair()) } - fn db_id(&self) -> Option { self.db_id.clone() } + pub fn db_id(&self) -> Option { self.db_id.clone() } } #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] @@ -1714,7 +1714,7 @@ pub struct MakerOrder { /// A custom priv key for more privacy to prevent linking orders of the same node between each other /// Commonly used with privacy coins (ARRR, ZCash, etc.) p2p_privkey: Option, - pub db_id: Option, + db_id: Option, } pub struct MakerOrderBuilder<'a> { @@ -2104,7 +2104,7 @@ impl MakerOrder { fn p2p_keypair(&self) -> Option<&KeyPair> { self.p2p_privkey.as_ref().map(|key| key.key_pair()) } - fn db_id(&self) -> Option { self.db_id.clone() } + pub fn db_id(&self) -> Option { self.db_id.clone() } } impl From for MakerOrder { @@ -4995,7 +4995,7 @@ pub enum TakerOrderCancellationReason { Cancelled, } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct MyOrdersFilter { pub order_type: Option, pub initial_action: Option, diff --git a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs index 4d7a0ea7da..c3adbe4295 100644 --- a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs +++ b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs @@ -326,16 +326,18 @@ mod native_impl { paging_options: Option<&PagingOptions>, db_id: Option<&str>, ) -> MyOrdersResult { - let conn = self.ctx.sqlite_connection(db_id); - let conn = conn.lock().unwrap(); - select_orders_by_filter(&conn, filter, paging_options) - .map_to_mm(|e| MyOrdersError::ErrorLoading(e.to_string())) + let filter = filter.clone(); + let paging_options = paging_options.cloned(); + self.ctx.run_sql_query(db_id, move |conn| { + select_orders_by_filter(&conn, &filter, paging_options) + .map_to_mm(|e| MyOrdersError::ErrorLoading(e.to_string())) + }) } async fn select_order_status(&self, uuid: Uuid, db_id: Option<&str>) -> MyOrdersResult { - let conn = self.ctx.sqlite_connection(db_id); - let conn = conn.lock().unwrap(); - select_status_by_uuid(&conn, &uuid).map_to_mm(|e| MyOrdersError::ErrorLoading(e.to_string())) + self.ctx.run_sql_query(db_id, move |conn| { + select_status_by_uuid(&conn, &uuid).map_to_mm(|e| MyOrdersError::ErrorLoading(e.to_string())) + }) } async fn save_maker_order_in_filtering_history(&self, order: &MakerOrder) -> MyOrdersResult<()> { diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 12e407d2f8..2df2ec1c38 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -1027,10 +1027,10 @@ pub async fn insert_new_swap_to_db( #[cfg(not(target_arch = "wasm32"))] fn add_swap_to_db_index(ctx: &MmArc, swap: &SavedSwap, db_id: Option<&str>) { - if let Some(conn) = ctx.sqlite_conn_opt(db_id) { - let conn = conn.lock().unwrap(); - crate::mm2::database::stats_swaps::add_swap_to_index(&conn, swap) - } + let swap = swap.clone(); + ctx.run_sql_query(db_id, move |conn| { + crate::mm2::database::stats_swaps::add_swap_to_index(&conn, &swap) + }); } #[cfg(not(target_arch = "wasm32"))] @@ -1203,7 +1203,7 @@ async fn broadcast_my_swap_status(ctx: &MmArc, uuid: Uuid, db_id: Option<&str>) Ok(()) } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct MySwapsFilter { pub my_coin: Option, pub other_coin: Option, diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index e923466c13..b3c2bb0461 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -34,7 +34,7 @@ use uuid::Uuid; cfg_native!( use crate::mm2::database::my_swaps::{insert_new_swap_v2, SELECT_MY_SWAP_V2_BY_UUID}; use common::async_blocking; - use db_common::sqlite::rusqlite::{named_params, Error as SqlError, Result as SqlResult, Row}; + use db_common::sqlite::rusqlite::{named_params, Connection, Error as SqlError, Result as SqlResult, Row}; use db_common::sqlite::rusqlite::types::Type as SqlType; ); @@ -128,6 +128,15 @@ pub enum MakerSwapEvent { Completed, } +#[cfg(not(target_arch = "wasm32"))] +fn get_repr_impl(conn: &Connection, id_str: &str) -> SqlResult { + conn.query_row( + SELECT_MY_SWAP_V2_BY_UUID, + &[(":uuid", &id_str)], + MakerSwapDbRepr::from_sql_row, + ) +} + /// Storage for maker swaps. #[derive(Clone)] pub struct MakerSwapStorage { @@ -155,29 +164,31 @@ impl StateMachineStorage for MakerSwapStorage { let ctx = self.ctx.clone(); let db_id = self.db_id.clone(); async_blocking(move || { - let sql_params = named_params! { - ":my_coin": &repr.maker_coin, - ":other_coin": &repr.taker_coin, - ":uuid": repr.uuid.to_string(), - ":started_at": repr.started_at, - ":swap_type": MAKER_SWAP_V2_TYPE, - ":maker_volume": repr.maker_volume.to_fraction_string(), - ":taker_volume": repr.taker_volume.to_fraction_string(), - ":premium": repr.taker_premium.to_fraction_string(), - ":dex_fee": repr.dex_fee_amount.to_fraction_string(), - ":dex_fee_burn": repr.dex_fee_burn.to_fraction_string(), - ":secret": repr.maker_secret.0, - ":secret_hash": repr.maker_secret_hash.0, - ":secret_hash_algo": repr.secret_hash_algo as u8, - ":p2p_privkey": repr.p2p_keypair.map(|k| k.priv_key()).unwrap_or_default(), - ":lock_duration": repr.lock_duration, - ":maker_coin_confs": repr.conf_settings.maker_coin_confs, - ":maker_coin_nota": repr.conf_settings.maker_coin_nota, - ":taker_coin_confs": repr.conf_settings.taker_coin_confs, - ":taker_coin_nota": repr.conf_settings.taker_coin_nota, - ":other_p2p_pub": repr.taker_p2p_pub.to_bytes(), - }; - insert_new_swap_v2(&ctx, sql_params, db_id.as_deref())?; + ctx.run_sql_query(db_id.as_deref(), move |conn| { + let sql_params = named_params! { + ":my_coin": &repr.maker_coin, + ":other_coin": &repr.taker_coin, + ":uuid": repr.uuid.to_string(), + ":started_at": repr.started_at, + ":swap_type": MAKER_SWAP_V2_TYPE, + ":maker_volume": repr.maker_volume.to_fraction_string(), + ":taker_volume": repr.taker_volume.to_fraction_string(), + ":premium": repr.taker_premium.to_fraction_string(), + ":dex_fee": repr.dex_fee_amount.to_fraction_string(), + ":dex_fee_burn": repr.dex_fee_burn.to_fraction_string(), + ":secret": repr.maker_secret.0, + ":secret_hash": repr.maker_secret_hash.0, + ":secret_hash_algo": repr.secret_hash_algo as u8, + ":p2p_privkey": repr.p2p_keypair.map(|k| k.priv_key()).unwrap_or_default(), + ":lock_duration": repr.lock_duration, + ":maker_coin_confs": repr.conf_settings.maker_coin_confs, + ":maker_coin_nota": repr.conf_settings.maker_coin_nota, + ":taker_coin_confs": repr.conf_settings.taker_coin_confs, + ":taker_coin_nota": repr.conf_settings.taker_coin_nota, + ":other_p2p_pub": repr.taker_p2p_pub.to_bytes(), + }; + insert_new_swap_v2(&conn, sql_params) + })?; Ok(()) }) .await @@ -216,16 +227,8 @@ impl StateMachineStorage for MakerSwapStorage { let id_str = id.to_string(); let db_id = self.db_id.clone(); - async_blocking(move || { - let conn = ctx.sqlite_connection(db_id.as_deref()); - let conn = conn.lock().unwrap(); - Ok(conn.query_row( - SELECT_MY_SWAP_V2_BY_UUID, - &[(":uuid", &id_str)], - MakerSwapDbRepr::from_sql_row, - )?) - }) - .await + async_blocking(move || Ok(ctx.run_sql_query(db_id.as_deref(), move |conn| get_repr_impl(&conn, &id_str))?)) + .await } #[cfg(target_arch = "wasm32")] diff --git a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs index 4d77892aea..4a78c89ebb 100644 --- a/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs +++ b/mm2src/mm2_main/src/lp_swap/my_swaps_storage.rs @@ -87,15 +87,18 @@ mod native_impl { swap_type: u8, db_id: Option<&str>, ) -> MySwapsResult<()> { - Ok(insert_new_swap( - &self.ctx, - my_coin, - other_coin, - &uuid.to_string(), - &started_at.to_string(), - swap_type, - db_id, - )?) + let my_coin = my_coin.to_owned(); + let other_coin = other_coin.to_owned(); + Ok(self.ctx.run_sql_query(db_id, move |conn| { + insert_new_swap( + &conn, + &my_coin, + &other_coin, + &uuid.to_string(), + &started_at.to_string(), + swap_type, + ) + })?) } async fn my_recent_swaps_with_filters( @@ -104,9 +107,12 @@ mod native_impl { paging_options: Option<&PagingOptions>, db_id: &str, ) -> MySwapsResult { - let conn = self.ctx.sqlite_connection(Some(db_id)); - let conn = conn.lock().unwrap(); - Ok(select_uuids_by_my_swaps_filter(&conn, filter, paging_options, db_id)?) + let filter = filter.clone(); + let paging_options = paging_options.map(|e| e.to_owned()); + let db_id = db_id.to_owned(); + Ok(self.ctx.run_sql_query(Some(&db_id.clone()), move |conn| { + select_uuids_by_my_swaps_filter(&conn, &filter, paging_options, db_id) + })?) } } } diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index b9027a4702..35e8544d24 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -29,7 +29,7 @@ pub enum SavedSwapError { InternalError(String), } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(tag = "type")] pub enum SavedSwap { Maker(MakerSavedSwap), diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs index 68e7c86f06..659d1debec 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs @@ -106,12 +106,10 @@ pub(super) async fn has_db_record_for( let id_str = id.to_string(); let db_id = db_id.map(|e| e.to_string()); - Ok(async_blocking(move || { - let conn = ctx.sqlite_connection(db_id.as_deref()); - let conn = conn.lock().unwrap(); - does_swap_exist(&conn, &id_str, db_id.as_deref()) - }) - .await?) + Ok( + async_blocking(move || ctx.run_sql_query(db_id.as_deref(), move |conn| does_swap_exist(&conn, &id_str))) + .await?, + ) } #[cfg(target_arch = "wasm32")] @@ -138,20 +136,19 @@ pub(super) async fn store_swap_event( where T::Event: DeserializeOwned + Serialize + Send + 'static, { - let id_str = id.to_string(); let db_id = db_id.map(|e| e.to_string()); - async_blocking(move || { - let conn = ctx.sqlite_connection(db_id.as_deref()); - let conn = conn.lock().unwrap(); - let events_json = get_swap_events(&conn, &id_str, db_id.as_deref())?; + let id_str = id.to_string(); + let events_json = ctx.run_sql_query(db_id.as_deref(), move |conn| get_swap_events(&conn, &id_str))?; let mut events: Vec = serde_json::from_str(&events_json)?; events.push(event); drop_mutability!(events); + let serialized_events = serde_json::to_string(&events)?; - let conn = ctx.sqlite_connection(db_id.as_deref()); - let conn = conn.lock().unwrap(); - update_swap_events(&conn, &id_str, &serialized_events, db_id.as_deref())?; + let id_str = id.to_string(); + ctx.run_sql_query(db_id.as_deref(), move |conn| { + update_swap_events(&conn, &id_str, &serialized_events) + })?; Ok(()) }) .await @@ -213,10 +210,10 @@ pub(super) async fn get_unfinished_swaps_uuids( ) -> MmResult, SwapStateMachineError> { let db_id = db_id.map(|e| e.to_string()); async_blocking(move || { - let conn = ctx.sqlite_connection(db_id.as_deref()); - let conn = conn.lock().unwrap(); - select_unfinished_swaps_uuids(&conn, swap_type, db_id.as_deref()) - .map_to_mm(|e| SwapStateMachineError::StorageError(e.to_string())) + ctx.run_sql_query(db_id.as_deref(), move |conn| { + select_unfinished_swaps_uuids(&conn, swap_type) + .map_to_mm(|e| SwapStateMachineError::StorageError(e.to_string())) + }) }) .await } @@ -247,9 +244,9 @@ pub(super) async fn mark_swap_as_finished( ) -> MmResult<(), SwapStateMachineError> { let db_id = db_id.map(|e| e.to_string()); async_blocking(move || { - let conn = ctx.sqlite_connection(db_id.as_deref()); - let conn = conn.lock().unwrap(); - Ok(set_swap_is_finished(&conn, &id.to_string(), db_id.as_deref())?) + ctx.run_sql_query(db_id.as_deref(), move |conn| { + Ok(set_swap_is_finished(&conn, &id.to_string())?) + }) }) .await } diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs index 91737bccf6..2d9db0dee8 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs @@ -22,7 +22,7 @@ cfg_native!( use crate::mm2::database::my_swaps::SELECT_MY_SWAP_V2_FOR_RPC_BY_UUID; use common::async_blocking; use db_common::sqlite::query_single_row; - use db_common::sqlite::rusqlite::{Result as SqlResult, Row, Error as SqlError}; + use db_common::sqlite::rusqlite::{Result as SqlResult, Connection, Row, Error as SqlError}; use db_common::sqlite::rusqlite::types::Type as SqlType; ); @@ -42,12 +42,11 @@ pub(super) async fn get_swap_type(ctx: &MmArc, uuid: &Uuid, db_id: Option<&str>) async_blocking(move || { const SELECT_SWAP_TYPE_BY_UUID: &str = "SELECT swap_type FROM my_swaps WHERE uuid = :uuid;"; - let conn = ctx.sqlite_connection(db_id.as_deref()); - let conn = conn.lock().unwrap(); - let maybe_swap_type = query_single_row(&conn, SELECT_SWAP_TYPE_BY_UUID, &[(":uuid", uuid.as_str())], |row| { - row.get(0) - })?; - Ok(maybe_swap_type) + Ok(ctx.run_sql_query(db_id.as_deref(), move |conn| { + query_single_row(&conn, SELECT_SWAP_TYPE_BY_UUID, &[(":uuid", uuid.as_str())], |row| { + row.get(0) + }) + })?) }) .await } @@ -168,6 +167,19 @@ pub(super) async fn get_taker_swap_data_for_rpc( get_swap_data_for_rpc_impl(ctx, uuid, db_id).await } +#[cfg(not(target_arch = "wasm32"))] +fn get_swap_data_for_rpc_impl_inner( + conn: &Connection, + uuid: String, +) -> SqlResult>> { + query_single_row( + conn, + SELECT_MY_SWAP_V2_FOR_RPC_BY_UUID, + &[(":uuid", uuid.as_str())], + MySwapForRpc::from_row, + ) +} + #[cfg(not(target_arch = "wasm32"))] async fn get_swap_data_for_rpc_impl( ctx: &MmArc, @@ -179,15 +191,9 @@ async fn get_swap_data_for_rpc_impl( let db_id = db_id.map(|e| e.to_string()); async_blocking(move || { - let conn = ctx.sqlite_connection(db_id.as_deref()); - let conn = conn.lock().unwrap(); - let swap_data = query_single_row( - &conn, - SELECT_MY_SWAP_V2_FOR_RPC_BY_UUID, - &[(":uuid", uuid.as_str())], - MySwapForRpc::from_row, - )?; - Ok(swap_data) + Ok(ctx.run_sql_query(db_id.as_deref(), move |conn| { + get_swap_data_for_rpc_impl_inner(&conn, uuid) + })?) }) .await } diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index 09ff0b33c0..f19773fe3d 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -35,7 +35,7 @@ use uuid::Uuid; cfg_native!( use crate::mm2::database::my_swaps::{insert_new_swap_v2, SELECT_MY_SWAP_V2_BY_UUID}; use common::async_blocking; - use db_common::sqlite::rusqlite::{named_params, Error as SqlError, Result as SqlResult, Row}; + use db_common::sqlite::rusqlite::{named_params, Connection, Error as SqlError, Result as SqlResult, Row}; use db_common::sqlite::rusqlite::types::Type as SqlType; ); @@ -160,6 +160,15 @@ pub enum TakerSwapEvent { Completed, } +#[cfg(not(target_arch = "wasm32"))] +fn get_repr_impl(conn: &Connection, id_str: &str) -> SqlResult { + conn.query_row( + SELECT_MY_SWAP_V2_BY_UUID, + &[(":uuid", &id_str)], + TakerSwapDbRepr::from_sql_row, + ) +} + /// Storage for taker swaps. #[derive(Clone)] pub struct TakerSwapStorage { @@ -187,29 +196,31 @@ impl StateMachineStorage for TakerSwapStorage { let ctx = self.ctx.clone(); let db_id = self.db_id.clone(); async_blocking(move || { - let sql_params = named_params! { - ":my_coin": repr.taker_coin, - ":other_coin": repr.maker_coin, - ":uuid": repr.uuid.to_string(), - ":started_at": repr.started_at, - ":swap_type": TAKER_SWAP_V2_TYPE, - ":maker_volume": repr.maker_volume.to_fraction_string(), - ":taker_volume": repr.taker_volume.to_fraction_string(), - ":premium": repr.taker_premium.to_fraction_string(), - ":dex_fee": repr.dex_fee_amount.to_fraction_string(), - ":dex_fee_burn": repr.dex_fee_burn.to_fraction_string(), - ":secret": repr.taker_secret.0, - ":secret_hash": repr.taker_secret_hash.0, - ":secret_hash_algo": repr.secret_hash_algo as u8, - ":p2p_privkey": repr.p2p_keypair.map(|k| k.priv_key()).unwrap_or_default(), - ":lock_duration": repr.lock_duration, - ":maker_coin_confs": repr.conf_settings.maker_coin_confs, - ":maker_coin_nota": repr.conf_settings.maker_coin_nota, - ":taker_coin_confs": repr.conf_settings.taker_coin_confs, - ":taker_coin_nota": repr.conf_settings.taker_coin_nota, - ":other_p2p_pub": repr.maker_p2p_pub.to_bytes(), - }; - insert_new_swap_v2(&ctx, sql_params, db_id.as_deref())?; + ctx.run_sql_query(db_id.as_deref(), move |conn| { + let sql_params = named_params! { + ":my_coin": repr.taker_coin, + ":other_coin": repr.maker_coin, + ":uuid": repr.uuid.to_string(), + ":started_at": repr.started_at, + ":swap_type": TAKER_SWAP_V2_TYPE, + ":maker_volume": repr.maker_volume.to_fraction_string(), + ":taker_volume": repr.taker_volume.to_fraction_string(), + ":premium": repr.taker_premium.to_fraction_string(), + ":dex_fee": repr.dex_fee_amount.to_fraction_string(), + ":dex_fee_burn": repr.dex_fee_burn.to_fraction_string(), + ":secret": repr.taker_secret.0, + ":secret_hash": repr.taker_secret_hash.0, + ":secret_hash_algo": repr.secret_hash_algo as u8, + ":p2p_privkey": repr.p2p_keypair.map(|k| k.priv_key()).unwrap_or_default(), + ":lock_duration": repr.lock_duration, + ":maker_coin_confs": repr.conf_settings.maker_coin_confs, + ":maker_coin_nota": repr.conf_settings.maker_coin_nota, + ":taker_coin_confs": repr.conf_settings.taker_coin_confs, + ":taker_coin_nota": repr.conf_settings.taker_coin_nota, + ":other_p2p_pub": repr.maker_p2p_pub.to_bytes(), + }; + insert_new_swap_v2(&conn, sql_params) + })?; Ok(()) }) .await @@ -248,16 +259,8 @@ impl StateMachineStorage for TakerSwapStorage { let id_str = id.to_string(); let db_id = self.db_id.clone(); - async_blocking(move || { - let conn = ctx.sqlite_connection(db_id.as_deref()); - let conn = conn.lock().unwrap(); - Ok(conn.query_row( - SELECT_MY_SWAP_V2_BY_UUID, - &[(":uuid", &id_str)], - TakerSwapDbRepr::from_sql_row, - )?) - }) - .await + async_blocking(move || Ok(ctx.run_sql_query(db_id.as_deref(), move |conn| get_repr_impl(&conn, &id_str))?)) + .await } #[cfg(target_arch = "wasm32")] From 3cfd616a33d653766be4671b6e854ae644783fef Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 29 May 2024 19:33:19 +0100 Subject: [PATCH 134/186] cargo clippy --- mm2src/mm2_core/src/mm_ctx.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index d746815a58..f58831a813 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -35,6 +35,7 @@ cfg_native! { use mm2_metrics::MmMetricsError; use std::net::{IpAddr, SocketAddr, AddrParseError}; use std::path::{Path, PathBuf}; + use std::sync::MutexGuard; } /// Default interval to export and record metrics to log. From 0b5275c0f40fe0381e38723e1f81e4d43551f5ac Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 30 May 2024 03:34:11 +0100 Subject: [PATCH 135/186] remove todos after review --- mm2src/mm2_main/src/lp_ordermatch.rs | 4 ++-- mm2src/mm2_main/src/lp_swap.rs | 1 - mm2src/mm2_main/src/lp_swap/pubkey_banning.rs | 2 -- mm2src/mm2_main/src/lp_swap/swap_v2_common.rs | 2 -- mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs | 1 - 5 files changed, 2 insertions(+), 8 deletions(-) diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 9bae7b9819..e444a5daea 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -2128,7 +2128,7 @@ impl From for MakerOrder { base_orderbook_ticker: taker_order.base_orderbook_ticker, rel_orderbook_ticker: taker_order.rel_orderbook_ticker, p2p_privkey: taker_order.p2p_privkey, - db_id: None, // TODO, + db_id: taker_order.db_id(), }, // The "buy" taker order is recreated with reversed pair as Maker order is always considered as "sell" TakerAction::Buy => { @@ -2151,7 +2151,7 @@ impl From for MakerOrder { base_orderbook_ticker: taker_order.rel_orderbook_ticker, rel_orderbook_ticker: taker_order.base_orderbook_ticker, p2p_privkey: taker_order.p2p_privkey, - db_id: None, // TODO, + db_id: taker_order.db_id(), } }, } diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 2df2ec1c38..a147204a39 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -1869,7 +1869,6 @@ async fn recv_swap_v2_msg( let wait_until = started + timeout; loop { Timer::sleep(1.).await; - // TODO: db_id let swap_ctx = SwapsContext::from_ctx(&ctx).unwrap(); let mut msgs = swap_ctx.swap_v2_msgs.lock().unwrap(); if let Some(msg_store) = msgs.get_mut(uuid) { diff --git a/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs b/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs index 4a628a59d9..0c44c745ad 100644 --- a/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs +++ b/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs @@ -21,7 +21,6 @@ pub enum BanReason { } pub fn ban_pubkey_on_failed_swap(ctx: &MmArc, pubkey: H256, swap_uuid: &Uuid, event: SwapEvent) { - // TODO: db_id let ctx = SwapsContext::from_ctx(ctx).unwrap(); let mut banned = ctx.banned_pubkeys.lock().unwrap(); banned.insert(pubkey.into(), BanReason::FailedSwap { @@ -31,7 +30,6 @@ pub fn ban_pubkey_on_failed_swap(ctx: &MmArc, pubkey: H256, swap_uuid: &Uuid, ev } pub fn is_pubkey_banned(ctx: &MmArc, pubkey: &H256Json) -> bool { - // TODO: db_id let ctx = SwapsContext::from_ctx(ctx).unwrap(); let banned = ctx.banned_pubkeys.lock().unwrap(); banned.contains_key(pubkey) diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs index 659d1debec..dbd971592a 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_common.rs @@ -272,7 +272,6 @@ pub(super) async fn mark_swap_as_finished( pub(super) fn init_additional_context_impl(ctx: &MmArc, swap_info: ActiveSwapV2Info, other_p2p_pubkey: PublicKey) { subscribe_to_topic(ctx, swap_v2_topic(&swap_info.uuid)); - // TODO: db_id let swap_ctx = SwapsContext::from_ctx(ctx).expect("SwapsContext::from_ctx should not fail"); swap_ctx.init_msg_v2_store(swap_info.uuid, other_p2p_pubkey); swap_ctx @@ -284,7 +283,6 @@ pub(super) fn init_additional_context_impl(ctx: &MmArc, swap_info: ActiveSwapV2I pub(super) fn clean_up_context_impl(ctx: &MmArc, uuid: &Uuid, maker_coin: &str, taker_coin: &str) { unsubscribe_from_topic(ctx, swap_v2_topic(uuid)); - // TODO: db_id let swap_ctx = SwapsContext::from_ctx(ctx).expect("SwapsContext::from_ctx should not fail"); swap_ctx.remove_msg_v2_store(uuid); swap_ctx.active_swaps_v2_infos.lock().unwrap().remove(uuid); diff --git a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs index 2bbb91ab4a..6d632abdee 100644 --- a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs +++ b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs @@ -56,7 +56,6 @@ pub struct GetSharedDbIdResponse { shared_db_id: H160Json, } -// TODO: Return shared_db_id for all available and active unique pubkeys pub async fn get_shared_db_id(ctx: MmArc, _req: Json) -> GetSharedDbIdResult { let shared_db_id = ctx.shared_db_id().to_owned().into(); Ok(GetSharedDbIdResponse { shared_db_id }) From a53bd38c86894c9d8f5cc6b994f8da1345c4c418 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 30 May 2024 10:47:11 +0100 Subject: [PATCH 136/186] fix build error --- mm2src/mm2_main/src/lp_ordermatch.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index e444a5daea..964a591b95 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -2128,7 +2128,7 @@ impl From for MakerOrder { base_orderbook_ticker: taker_order.base_orderbook_ticker, rel_orderbook_ticker: taker_order.rel_orderbook_ticker, p2p_privkey: taker_order.p2p_privkey, - db_id: taker_order.db_id(), + db_id: taker_order.db_id, }, // The "buy" taker order is recreated with reversed pair as Maker order is always considered as "sell" TakerAction::Buy => { @@ -2151,7 +2151,7 @@ impl From for MakerOrder { base_orderbook_ticker: taker_order.rel_orderbook_ticker, rel_orderbook_ticker: taker_order.base_orderbook_ticker, p2p_privkey: taker_order.p2p_privkey, - db_id: taker_order.db_id(), + db_id: taker_order.db_id, } }, } From ff42791accc43387a6c41362b9ef0601e5ac9b4c Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 4 Jun 2024 12:52:13 +0100 Subject: [PATCH 137/186] revert hd_wallet storage changes --- mm2src/coins/hd_wallet/storage/mod.rs | 62 ++++--------------- .../coins/hd_wallet/storage/sqlite_storage.rs | 4 +- .../coins/hd_wallet/storage/wasm_storage.rs | 4 +- mm2src/mm2_core/src/sql_connection_pool.rs | 2 +- mm2src/mm2_test_helpers/src/for_tests.rs | 14 ----- 5 files changed, 18 insertions(+), 68 deletions(-) diff --git a/mm2src/coins/hd_wallet/storage/mod.rs b/mm2src/coins/hd_wallet/storage/mod.rs index e7aa7bffbc..5fa96b9a42 100644 --- a/mm2src/coins/hd_wallet/storage/mod.rs +++ b/mm2src/coins/hd_wallet/storage/mod.rs @@ -220,13 +220,13 @@ impl Default for HDWalletCoinStorage { } impl HDWalletCoinStorage { + // TODO: Since hd_wallet_rmd160 is unique for a device, do we use it as db_id too? or we can just use mm2 shared_db_id and use hd_wallet_rmd160 for primary key as it's currently done pub async fn init(ctx: &MmArc, coin: String) -> HDWalletStorageResult { let crypto_ctx = CryptoCtx::from_ctx(ctx)?; let hd_wallet_rmd160 = crypto_ctx .hw_wallet_rmd160() .or_mm_err(|| HDWalletStorageError::HDWalletUnavailable)?; - let db_id = hex::encode(hd_wallet_rmd160.as_slice()); - let inner = Box::new(HDWalletStorageInstance::init(ctx, Some(&db_id)).await?); + let inner = Box::new(HDWalletStorageInstance::init(ctx, None).await?); Ok(HDWalletCoinStorage { coin, hd_wallet_rmd160, @@ -234,14 +234,13 @@ impl HDWalletCoinStorage { }) } + // TODO: Since hd_wallet_rmd160 is unique for a device, do we use it as db_id too? or we can just use mm2 shared_db_id and use hd_wallet_rmd160 for primary key as it's currently done pub async fn init_with_rmd160( ctx: &MmArc, coin: String, hd_wallet_rmd160: H160, ) -> HDWalletStorageResult { - let db_id = hex::encode(hd_wallet_rmd160.as_slice()); - let inner = Box::new(HDWalletStorageInstance::init(ctx, Some(&db_id)).await?); - + let inner = Box::new(HDWalletStorageInstance::init(ctx, None).await?); Ok(HDWalletCoinStorage { coin, hd_wallet_rmd160, @@ -300,7 +299,7 @@ fn display_rmd160(rmd160: &H160) -> String { hex::encode(rmd160.deref()) } mod tests { use super::*; use itertools::Itertools; - use mm2_test_helpers::for_tests::{add_custom_db, mm_ctx_with_custom_db}; + use mm2_test_helpers::for_tests::mm_ctx_with_custom_db; use primitives::hash::H160; cfg_wasm32! { @@ -343,12 +342,7 @@ mod tests { let ctx = mm_ctx_with_custom_db(); let device0_rmd160 = H160::from("0000000000000000000000000000000000000020"); - let device0_rmd160_db_id = hex::encode(device0_rmd160.as_slice()); - add_custom_db(&ctx, device0_rmd160_db_id.clone()); - let device1_rmd160 = H160::from("0000000000000000000000000000000000000030"); - let device1_rmd160_db_id = hex::encode(device1_rmd160.as_slice()); - add_custom_db(&ctx, device1_rmd160_db_id.clone()); let rick_device0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device0_rmd160) .await @@ -377,8 +371,9 @@ mod tests { .await .expect("!HDWalletCoinStorage::upload_new_account: MORTY device=0 account=0"); - // All accounts must be in only device0_rmd160_db_id database. - let all_accounts: Vec<_> = get_all_storage_items(&ctx, Some(&device0_rmd160_db_id)) + // All accounts must be in the only one database. + // Rows in the database must differ by only `coin`, `hd_wallet_rmd160` and `account_id` values. + let all_accounts: Vec<_> = get_all_storage_items(&ctx) .await .into_iter() .sorted_by(|x, y| x.external_addresses_number.cmp(&y.external_addresses_number)) @@ -386,15 +381,9 @@ mod tests { assert_eq!(all_accounts, vec![ rick_device0_account0.clone(), rick_device0_account1.clone(), + rick_device1_account0.clone(), morty_device0_account0.clone() ]); - // All accounts must be in only device1_rmd160_db_id database. - let all_accounts: Vec<_> = get_all_storage_items(&ctx, Some(&device1_rmd160_db_id)) - .await - .into_iter() - .sorted_by(|x, y| x.external_addresses_number.cmp(&y.external_addresses_number)) - .collect(); - assert_eq!(all_accounts, vec![rick_device1_account0.clone(),]); let mut actual = rick_device0_db .load_all_accounts() @@ -444,16 +433,8 @@ mod tests { let ctx = mm_ctx_with_custom_db(); let device0_rmd160 = H160::from("0000000000000000000000000000000000000010"); - let device0_rmd160_db_id = hex::encode(device0_rmd160.as_slice()); - add_custom_db(&ctx, device0_rmd160_db_id.clone()); - let device1_rmd160 = H160::from("0000000000000000000000000000000000000020"); - let device1_rmd160_db_id = hex::encode(device1_rmd160.as_slice()); - add_custom_db(&ctx, device1_rmd160_db_id.clone()); - let device2_rmd160 = H160::from("0000000000000000000000000000000000000030"); - let device2_rmd160_db_id = hex::encode(device2_rmd160.as_slice()); - add_custom_db(&ctx, device2_rmd160_db_id.clone()); let wallet0_db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device0_rmd160) .await @@ -487,29 +468,14 @@ mod tests { .await .expect("HDWalletCoinStorage::clear_accounts: RICK wallet=0"); - // device0 database should return no account. - let all_accounts: Vec<_> = get_all_storage_items(&ctx, Some(&device0_rmd160_db_id)) - .await - .into_iter() - .sorted_by(|x, y| x.external_addresses_number.cmp(&y.external_addresses_number)) - .collect(); - assert_eq!(all_accounts, vec![]); - - // All accounts must be in only device1 database . - let all_accounts: Vec<_> = get_all_storage_items(&ctx, Some(&device1_rmd160_db_id)) - .await - .into_iter() - .sorted_by(|x, y| x.external_addresses_number.cmp(&y.external_addresses_number)) - .collect(); - assert_eq!(all_accounts, vec![wallet1_account0]); - - // All accounts must be in only device2 database . - let all_accounts: Vec<_> = get_all_storage_items(&ctx, Some(&device2_rmd160_db_id)) + // All accounts must be in the only one database. + // Rows in the database must differ by only `coin`, `hd_wallet_rmd160` and `account_id` values. + let all_accounts: Vec<_> = get_all_storage_items(&ctx) .await .into_iter() .sorted_by(|x, y| x.external_addresses_number.cmp(&y.external_addresses_number)) .collect(); - assert_eq!(all_accounts, vec![wallet2_account0]); + assert_eq!(all_accounts, vec![wallet1_account0, wallet2_account0]); } async fn test_update_account_impl() { @@ -528,8 +494,6 @@ mod tests { let ctx = mm_ctx_with_custom_db(); let device_rmd160 = H160::from("0000000000000000000000000000000000000010"); - let device_rmd160_db_id = hex::encode(device_rmd160.as_slice()); - add_custom_db(&ctx, device_rmd160_db_id.clone()); let db = HDWalletCoinStorage::init_with_rmd160(&ctx, "RICK".to_owned(), device_rmd160) .await diff --git a/mm2src/coins/hd_wallet/storage/sqlite_storage.rs b/mm2src/coins/hd_wallet/storage/sqlite_storage.rs index e2afa49cae..911b0f9518 100644 --- a/mm2src/coins/hd_wallet/storage/sqlite_storage.rs +++ b/mm2src/coins/hd_wallet/storage/sqlite_storage.rs @@ -275,11 +275,11 @@ enum UpdatingProperty { /// This function is used in `hd_wallet_storage::tests`. #[cfg(test)] -pub(crate) async fn get_all_storage_items(ctx: &MmArc, db_id: Option<&str>) -> Vec { +pub(crate) async fn get_all_storage_items(ctx: &MmArc) -> Vec { const SELECT_ALL_ACCOUNTS: &str = "SELECT account_id, account_xpub, external_addresses_number, internal_addresses_number FROM hd_account"; - let conn = ctx.shared_sqlite_conn_opt(db_id).unwrap(); + let conn = ctx.shared_sqlite_conn_opt(None).unwrap(); let conn = conn.lock().unwrap(); let mut statement = conn.prepare(SELECT_ALL_ACCOUNTS).unwrap(); statement diff --git a/mm2src/coins/hd_wallet/storage/wasm_storage.rs b/mm2src/coins/hd_wallet/storage/wasm_storage.rs index c8638f37c8..f8d75c044b 100644 --- a/mm2src/coins/hd_wallet/storage/wasm_storage.rs +++ b/mm2src/coins/hd_wallet/storage/wasm_storage.rs @@ -321,9 +321,9 @@ impl HDWalletIndexedDbStorage { /// This function is used in `hd_wallet_storage::tests`. #[cfg(any(test, target_arch = "wasm32"))] -pub(super) async fn get_all_storage_items(ctx: &MmArc, db_id: Option<&str>) -> Vec { +pub(super) async fn get_all_storage_items(ctx: &MmArc) -> Vec { let coins_ctx = CoinsContext::from_ctx(ctx).unwrap(); - let db = coins_ctx.hd_wallet_db.get_or_initialize(db_id).await.unwrap(); + let db = coins_ctx.hd_wallet_db.get_or_initialize(None).await.unwrap(); let transaction = db.inner.transaction().await.unwrap(); let table = transaction.table::().await.unwrap(); table diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 1d1293303b..0a4b78d9d9 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -24,7 +24,7 @@ enum DbIdConnKind { /// A pool for managing SQLite connections, where each connection is keyed by a unique string identifier. #[derive(Clone)] pub struct SqliteConnPool { - pub connections: Arc>>>>, + connections: Arc>>>>, // default db_id rmd160_hex: String, // default shared_db_id diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index a7735f1407..1fa45039a4 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -1102,20 +1102,6 @@ pub fn mm_ctx_with_custom_db_with_conf(conf: Option) -> MmArc { ctx } -#[cfg(target_arch = "wasm32")] -pub fn add_custom_db(_ctx: &MmArc, _db_id: String) {} - -#[cfg(not(target_arch = "wasm32"))] -pub fn add_custom_db(ctx: &MmArc, db_id: String) { - use db_common::sqlite::rusqlite::Connection; - use std::sync::Arc; - - let connections = ctx.sqlite_conn_pool.as_option().expect("db pool not initialized!"); - let mut connections = connections.connections.write().unwrap(); - connections.insert(db_id, Arc::new(Mutex::new(Connection::open_in_memory().unwrap()))); - drop(connections); -} - #[cfg(not(target_arch = "wasm32"))] pub async fn mm_ctx_with_custom_async_db() -> MmArc { let ctx = MmCtxBuilder::new().into_mm_arc(); From 4b99dbad136dbfb60d1805d8a391aff5f7ef33f3 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 7 Jun 2024 00:16:31 +0100 Subject: [PATCH 138/186] impl account_db_id for eth and make account_db_id async --- mm2src/coins/eth.rs | 43 +++-- mm2src/coins/lightning/ln_utils.rs | 2 +- mm2src/coins/lp_coins.rs | 14 +- mm2src/coins/my_tx_history_v2.rs | 2 +- mm2src/coins/nft.rs | 4 +- mm2src/coins/tendermint/tendermint_coin.rs | 2 +- .../utxo/utxo_builder/utxo_coin_builder.rs | 2 +- .../src/platform_coin_with_tokens.rs | 2 +- .../standalone_coin/init_standalone_coin.rs | 2 +- mm2src/mm2_main/src/lp_ordermatch.rs | 24 +-- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 13 +- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 2 +- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 13 +- mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 2 +- mm2src/mm2_main/src/ordermatch_tests.rs | 152 +++++++++++------- 15 files changed, 165 insertions(+), 114 deletions(-) diff --git a/mm2src/coins/eth.rs b/mm2src/coins/eth.rs index b7bcfc51e5..ff1303c662 100644 --- a/mm2src/coins/eth.rs +++ b/mm2src/coins/eth.rs @@ -2823,7 +2823,8 @@ impl EthCoin { }, }; - let mut saved_traces = match self.load_saved_traces(ctx, my_address, self.account_db_id().as_deref()) { + let mut saved_traces = match self.load_saved_traces(ctx, my_address, self.account_db_id().await.as_deref()) + { Some(traces) => traces, None => SavedTraces { traces: vec![], @@ -2913,7 +2914,7 @@ impl EthCoin { } else { 0.into() }; - self.store_eth_traces(ctx, my_address, &saved_traces, self.account_db_id().as_deref()); + self.store_eth_traces(ctx, my_address, &saved_traces, self.account_db_id().await.as_deref()); } if current_block > saved_traces.latest_block { @@ -2969,7 +2970,7 @@ impl EthCoin { saved_traces.traces.extend(to_traces_after_latest); saved_traces.latest_block = current_block; - self.store_eth_traces(ctx, my_address, &saved_traces, self.account_db_id().as_deref()); + self.store_eth_traces(ctx, my_address, &saved_traces, self.account_db_id().await.as_deref()); } saved_traces.traces.sort_by(|a, b| b.block_number.cmp(&a.block_number)); for trace in saved_traces.traces { @@ -3197,15 +3198,15 @@ impl EthCoin { }, }; - let mut saved_events = match self.load_saved_erc20_events(ctx, my_address, self.account_db_id().as_deref()) - { - Some(events) => events, - None => SavedErc20Events { - events: vec![], - earliest_block: current_block, - latest_block: current_block, - }, - }; + let mut saved_events = + match self.load_saved_erc20_events(ctx, my_address, self.account_db_id().await.as_deref()) { + Some(events) => events, + None => SavedErc20Events { + events: vec![], + earliest_block: current_block, + latest_block: current_block, + }, + }; *self.history_sync_state.lock().unwrap() = HistorySyncState::InProgress(json!({ "blocks_left": saved_events.earliest_block, })); @@ -3277,7 +3278,7 @@ impl EthCoin { } else { 0.into() }; - self.store_erc20_events(ctx, my_address, &saved_events, self.account_db_id().as_deref()); + self.store_erc20_events(ctx, my_address, &saved_events, self.account_db_id().await.as_deref()); } if current_block > saved_events.latest_block { @@ -3334,7 +3335,7 @@ impl EthCoin { saved_events.events.extend(from_events_after_latest); saved_events.events.extend(to_events_after_latest); saved_events.latest_block = current_block; - self.store_erc20_events(ctx, my_address, &saved_events, self.account_db_id().as_deref()); + self.store_erc20_events(ctx, my_address, &saved_events, self.account_db_id().await.as_deref()); } let all_events: HashMap<_, _> = saved_events @@ -5723,6 +5724,20 @@ impl MmCoin for EthCoin { tokens.remove(ticker); }; } + + async fn account_db_id(&self) -> Option { + let derivation_method = &self.deref().derivation_method; + match derivation_method.as_ref() { + DerivationMethod::SingleAddress(single) => return Some(hex::encode(single.as_bytes())), + DerivationMethod::HDWallet(hd_wallet) => { + if let Some(addr) = hd_wallet.get_enabled_address().await { + return Some(hex::encode(addr.address.as_bytes())); + } + }, + } + + None + } } pub trait TryToAddress { diff --git a/mm2src/coins/lightning/ln_utils.rs b/mm2src/coins/lightning/ln_utils.rs index b3eb308b80..02e59112ee 100644 --- a/mm2src/coins/lightning/ln_utils.rs +++ b/mm2src/coins/lightning/ln_utils.rs @@ -77,7 +77,7 @@ pub async fn init_db(ctx: &MmArc, ticker: String) -> EnableLightningResult Box + Send>; - fn account_db_id(&self) -> Option { None } + async fn account_db_id(&self) -> Option { None } fn account_shared_db_id(&self) -> Option { None } @@ -3218,7 +3218,8 @@ pub trait MmCoin: // BCH cash address format has colon after prefix, e.g. bitcoincash: // Colon can't be used in file names on Windows so it should be escaped let my_address = my_address.replace(':', "_"); - ctx.dbdir(self.account_db_id().as_deref()) + let db_id = block_on(self.account_db_id()); + ctx.dbdir(db_id.as_deref()) .join("TRANSACTIONS") .join(format!("{}_{}.json", self.ticker(), my_address)) } @@ -3230,7 +3231,8 @@ pub trait MmCoin: // BCH cash address format has colon after prefix, e.g. bitcoincash: // Colon can't be used in file names on Windows so it should be escaped let my_address = my_address.replace(':', "_"); - ctx.dbdir(self.account_db_id().as_deref()) + let db_id = block_on(self.account_db_id()); + ctx.dbdir(db_id.as_deref()) .join("TRANSACTIONS") .join(format!("{}_{}_migration", self.ticker(), my_address)) } @@ -4588,7 +4590,7 @@ async fn find_unique_account_ids(ctx: &MmArc, active_only: bool) -> Result Result>, S } /// `get_trade_fee` rpc implementation. -/// There is some consideration about this rpc: +/// There is some consideration about this rpc: /// for eth coin this rpc returns max possible trade fee (estimated for maximum possible gas limit for any kind of swap). /// However for eth coin, as part of fixing this issue https://github.com/KomodoPlatform/komodo-defi-framework/issues/1848, /// `max_taker_vol' and `trade_preimage` rpc now return more accurate required gas calculations. diff --git a/mm2src/coins/my_tx_history_v2.rs b/mm2src/coins/my_tx_history_v2.rs index f9ed3ff2b2..039c4d2a41 100644 --- a/mm2src/coins/my_tx_history_v2.rs +++ b/mm2src/coins/my_tx_history_v2.rs @@ -402,7 +402,7 @@ pub(crate) async fn my_tx_history_v2_impl( where Coin: CoinWithTxHistoryV2 + MmCoin, { - let tx_history_storage = TxHistoryStorageBuilder::new(&ctx, coin.account_db_id()).build()?; + let tx_history_storage = TxHistoryStorageBuilder::new(&ctx, coin.account_db_id().await).build()?; let wallet_id = coin.history_wallet_id(); let is_storage_init = tx_history_storage.is_initialized_for(&wallet_id).await?; diff --git a/mm2src/coins/nft.rs b/mm2src/coins/nft.rs index fb855880b5..9a92eedc91 100644 --- a/mm2src/coins/nft.rs +++ b/mm2src/coins/nft.rs @@ -1653,7 +1653,7 @@ pub async fn find_unique_nft_account_ids( for coin in coins.iter() { if coin.is_available() { // Use default if no db_id - let db_id = coin.inner.account_db_id().unwrap_or_else(|| ctx.rmd160_hex()); + let db_id = coin.inner.account_db_id().await.unwrap_or_else(|| ctx.rmd160_hex()); let entry = active_id_chains.entry(db_id).or_insert_with(Vec::new); if let Ok(chain) = Chain::from_ticker(coin.inner.ticker()) { if chains.contains(&chain) { @@ -1674,7 +1674,7 @@ pub async fn find_nft_account_id_for_chain(ctx: &MmArc, chains: Chain) -> Result for coin in coins.iter() { if coin.is_available() { // Use default if no db_id - let db_id = coin.inner.account_db_id().unwrap_or_else(|| ctx.rmd160_hex()); + let db_id = coin.inner.account_db_id().await.unwrap_or_else(|| ctx.rmd160_hex()); if let Ok(chain) = Chain::from_ticker(coin.inner.ticker()) { if chains == chain { return Ok(Some((db_id, chain))); diff --git a/mm2src/coins/tendermint/tendermint_coin.rs b/mm2src/coins/tendermint/tendermint_coin.rs index 57117f16a8..0d96a57072 100644 --- a/mm2src/coins/tendermint/tendermint_coin.rs +++ b/mm2src/coins/tendermint/tendermint_coin.rs @@ -2284,7 +2284,7 @@ impl MmCoin for TendermintCoin { fn on_token_deactivated(&self, _ticker: &str) {} - fn account_db_id(&self) -> Option { + async fn account_db_id(&self) -> Option { if let Ok(public_key) = self.activation_policy.public_key() { let address_hash = dhash160(&public_key.to_bytes()); let address_rmd160_hex = hex::encode(address_hash.as_slice()); diff --git a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs index 5c00eba000..55ae91e71e 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs @@ -607,7 +607,7 @@ pub trait UtxoCoinBuilderCommonOps { .await .map_to_mm(UtxoCoinBuildError::Internal)? { - Some(coin) => coin.inner.account_db_id(), + Some(coin) => coin.inner.account_db_id().await, None => None, }; let storage_ticker = self.ticker().replace('-', "_"); diff --git a/mm2src/coins_activation/src/platform_coin_with_tokens.rs b/mm2src/coins_activation/src/platform_coin_with_tokens.rs index c4a01706bc..0b97403dce 100644 --- a/mm2src/coins_activation/src/platform_coin_with_tokens.rs +++ b/mm2src/coins_activation/src/platform_coin_with_tokens.rs @@ -476,7 +476,7 @@ where if req.request.tx_history() { platform_coin.start_history_background_fetching( ctx.clone(), - TxHistoryStorageBuilder::new(&ctx, platform_coin.clone().into().account_db_id()).build()?, + TxHistoryStorageBuilder::new(&ctx, platform_coin.clone().into().account_db_id().await).build()?, activation_result.get_platform_balance(), ); } diff --git a/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs b/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs index 4d75b3fa00..9a265680cb 100644 --- a/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs +++ b/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs @@ -223,7 +223,7 @@ where .await?; coin.start_history_background_fetching( self.ctx.metrics.clone(), - TxHistoryStorageBuilder::new(&self.ctx, coin_clone.into().account_db_id()).build()?, + TxHistoryStorageBuilder::new(&self.ctx, coin_clone.into().account_db_id().await).build()?, current_balances, ); } diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 964a591b95..58ae8c95ac 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -1422,7 +1422,7 @@ impl<'a> TakerOrderBuilder<'a> { /// Validate fields and build #[allow(clippy::result_large_err)] - pub fn build(self) -> Result { + pub async fn build(self) -> Result { let min_base_amount = self.base_coin.min_trading_vol(); let min_rel_amount = self.rel_coin.min_trading_vol(); @@ -1517,13 +1517,13 @@ impl<'a> TakerOrderBuilder<'a> { base_orderbook_ticker: self.base_orderbook_ticker, rel_orderbook_ticker: self.rel_orderbook_ticker, p2p_privkey, - db_id: self.base_coin.account_db_id(), + db_id: self.base_coin.account_db_id().await, }) } #[cfg(test)] /// skip validation for tests - fn build_unchecked(self) -> TakerOrder { + async fn build_unchecked(self) -> TakerOrder { let base_protocol_info = match &self.action { TakerAction::Buy => self.base_coin.coin_protocol_info(Some(self.base_amount.clone())), TakerAction::Sell => self.base_coin.coin_protocol_info(None), @@ -1558,7 +1558,7 @@ impl<'a> TakerOrderBuilder<'a> { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: self.base_coin.account_db_id(), + db_id: self.base_coin.account_db_id().await, } } } @@ -1916,7 +1916,7 @@ impl<'a> MakerOrderBuilder<'a> { /// Build MakerOrder #[allow(clippy::result_large_err)] - pub fn build(self) -> Result { + pub async fn build(self) -> Result { if self.base_coin.ticker() == self.rel_coin.ticker() { return Err(MakerOrderBuildError::BaseEqualRel); } @@ -1970,12 +1970,12 @@ impl<'a> MakerOrderBuilder<'a> { base_orderbook_ticker: self.base_orderbook_ticker, rel_orderbook_ticker: self.rel_orderbook_ticker, p2p_privkey, - db_id: self.base_coin.account_db_id(), + db_id: self.base_coin.account_db_id().await, }) } #[cfg(test)] - fn build_unchecked(self) -> MakerOrder { + async fn build_unchecked(self) -> MakerOrder { let created_at = now_ms(); #[allow(clippy::or_fun_call)] MakerOrder { @@ -1995,7 +1995,7 @@ impl<'a> MakerOrderBuilder<'a> { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: self.base_coin.account_db_id(), + db_id: self.base_coin.account_db_id().await, } } } @@ -2986,7 +2986,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO }, }; - let account_db_id = maker_coin.account_db_id(); + let account_db_id = maker_coin.account_db_id().await; if ctx.use_trading_proto_v2() { let secret_hash_algo = detect_secret_hash_algo(&maker_coin, &taker_coin); match (maker_coin, taker_coin) { @@ -3139,7 +3139,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat ); let now = now_sec(); - let account_db_id = taker_coin.account_db_id(); + let account_db_id = taker_coin.account_db_id().await; if ctx.use_trading_proto_v2() { let taker_secret = match generate_secret() { Ok(s) => s.into(), @@ -3960,7 +3960,7 @@ pub async fn lp_auto_buy( if let Some(timeout) = input.timeout { order_builder = order_builder.with_timeout(timeout); } - let order = try_s!(order_builder.build()); + let order = try_s!(order_builder.build().await); let request_orderbook = false; try_s!( @@ -4703,7 +4703,7 @@ pub async fn create_maker_order(ctx: &MmArc, req: SetPriceReq) -> Result, uuid: &Uuid) } async fn save_my_maker_swap_event(ctx: &MmArc, swap: &MakerSwap, event: MakerSavedEvent) -> Result<(), String> { - let db_id = swap.maker_coin.account_db_id(); + let db_id = swap.db_id(); let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id.as_deref(), swap.uuid).await { Ok(Some(swap)) => swap, Ok(None) => SavedSwap::Maker(MakerSavedSwap { @@ -282,7 +282,7 @@ impl MakerSwap { } #[inline] - fn db_id(&self) -> Option { self.maker_coin.account_db_id() } + fn db_id(&self) -> Option { self.r().data.db_id.clone() } fn apply_event(&self, event: MakerSwapEvent) { match event { @@ -579,7 +579,7 @@ impl MakerSwap { maker_coin_htlc_pubkey: Some(maker_coin_htlc_pubkey.as_slice().into()), taker_coin_htlc_pubkey: Some(taker_coin_htlc_pubkey.as_slice().into()), p2p_privkey: self.p2p_privkey.map(SerializableSecp256k1Keypair::from), - db_id: self.db_id(), + db_id: self.maker_coin.account_db_id().await, }; // This will be done during order match @@ -2034,10 +2034,10 @@ impl RunMakerSwapInput { } } - fn db_id(&self) -> Option { + async fn db_id(&self) -> Option { match self { RunMakerSwapInput::StartNew(swap) => swap.db_id(), - RunMakerSwapInput::KickStart { maker_coin, .. } => maker_coin.account_db_id(), + RunMakerSwapInput::KickStart { maker_coin, .. } => maker_coin.account_db_id().await, } } } @@ -2048,7 +2048,7 @@ impl RunMakerSwapInput { /// Every produced event is saved to local DB. Swap status is broadcasted to P2P network after completion. pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { let uuid = swap.uuid().to_owned(); - let db_id = swap.db_id().to_owned(); + let db_id = swap.db_id().await.to_owned(); let mut attempts = 0; let swap_lock = loop { match SwapLock::lock(&ctx, uuid, 40., db_id.as_deref()).await { @@ -2309,6 +2309,7 @@ pub async fn maker_swap_trade_preimage( // perform an additional validation let _order = builder .build() + .await .map_to_mm(|e| TradePreimageRpcError::from_maker_order_build_error(e, base_coin_ticker, rel_coin_ticker))?; let volume = if req.max { Some(volume) } else { None }; diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index b3c2bb0461..8ee09aafa5 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -661,7 +661,7 @@ impl Result { - acquire_reentrancy_lock_impl(&self.ctx, self.uuid, self.maker_coin.account_db_id().as_deref()).await + acquire_reentrancy_lock_impl(&self.ctx, self.uuid, self.maker_coin.account_db_id().await.as_deref()).await } fn spawn_reentrancy_lock_renew(&mut self, guard: Self::ReentrancyLock) { diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 324877e9a7..39e966a700 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -108,7 +108,7 @@ pub fn stats_taker_swap_file_path(ctx: &MmArc, db_id: Option<&str>, uuid: &Uuid) } async fn save_my_taker_swap_event(ctx: &MmArc, swap: &TakerSwap, event: TakerSavedEvent) -> Result<(), String> { - let db_id = swap.taker_coin.account_db_id(); + let db_id = swap.db_id(); let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id.as_deref(), swap.uuid).await { Ok(Some(swap)) => swap, Ok(None) => SavedSwap::Taker(TakerSavedSwap { @@ -381,10 +381,10 @@ impl RunTakerSwapInput { } } - fn db_id(&self) -> Option { + async fn db_id(&self) -> Option { match self { RunTakerSwapInput::StartNew(swap) => swap.db_id(), - RunTakerSwapInput::KickStart { taker_coin, .. } => taker_coin.account_db_id(), + RunTakerSwapInput::KickStart { taker_coin, .. } => taker_coin.account_db_id().await, } } } @@ -395,7 +395,7 @@ impl RunTakerSwapInput { /// Every produced event is saved to local DB. Swap status is broadcast to P2P network after completion. pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { let uuid = swap.uuid().to_owned(); - let db_id = swap.db_id().to_owned(); + let db_id = swap.db_id().await.to_owned(); let mut attempts = 0; let swap_lock = loop { match SwapLock::lock(&ctx, uuid, 40., db_id.as_deref()).await { @@ -806,7 +806,7 @@ impl TakerSwap { fn wait_refund_until(&self) -> u64 { self.r().data.taker_payment_lock + 3700 } #[inline] - fn db_id(&self) -> Option { self.taker_coin.account_db_id() } + fn db_id(&self) -> Option { self.r().data.db_id.clone() } pub(crate) fn apply_event(&self, event: TakerSwapEvent) { match event { @@ -1129,7 +1129,7 @@ impl TakerSwap { maker_coin_htlc_pubkey: Some(maker_coin_htlc_pubkey.as_slice().into()), taker_coin_htlc_pubkey: Some(taker_coin_htlc_pubkey.as_slice().into()), p2p_privkey: self.p2p_privkey.map(SerializableSecp256k1Keypair::from), - db_id: self.taker_coin.account_db_id(), + db_id: self.taker_coin.account_db_id().await, }; // This will be done during order match @@ -2548,6 +2548,7 @@ pub async fn taker_swap_trade_preimage( .with_sender_pubkey(H256Json::from(our_public_id.bytes)); let _ = order_builder .build() + .await .map_to_mm(|e| TradePreimageRpcError::from_taker_order_build_error(e, &req.base, &req.rel))?; let (base_coin_fee, rel_coin_fee) = match action { diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index f19773fe3d..3d6397d43a 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -777,7 +777,7 @@ impl Result { - acquire_reentrancy_lock_impl(&self.ctx, self.uuid, self.taker_coin.account_db_id().as_deref()).await + acquire_reentrancy_lock_impl(&self.ctx, self.uuid, self.taker_coin.account_db_id().await.as_deref()).await } fn spawn_reentrancy_lock_renew(&mut self, guard: Self::ReentrancyLock) { diff --git a/mm2src/mm2_main/src/ordermatch_tests.rs b/mm2src/mm2_main/src/ordermatch_tests.rs index 35bc290692..e28c978600 100644 --- a/mm2src/mm2_main/src/ordermatch_tests.rs +++ b/mm2src/mm2_main/src/ordermatch_tests.rs @@ -340,25 +340,31 @@ fn test_match_maker_order_and_taker_request() { fn maker_order_match_with_request_zero_volumes() { let coin = MmCoinEnum::Test(TestCoin::default()); - let maker_order = MakerOrderBuilder::new(&coin, &coin) - .with_max_base_vol(1.into()) - .with_price(1.into()) - .build_unchecked(); + let maker_order = block_on( + MakerOrderBuilder::new(&coin, &coin) + .with_max_base_vol(1.into()) + .with_price(1.into()) + .build_unchecked(), + ); // default taker order has empty coins and zero amounts so it should pass to the price calculation stage (division) - let taker_order = TakerOrderBuilder::new(&coin, &coin) - .with_rel_amount(1.into()) - .build_unchecked(); + let taker_order = block_on( + TakerOrderBuilder::new(&coin, &coin) + .with_rel_amount(1.into()) + .build_unchecked(), + ); let expected = OrderMatchResult::NotMatched; let actual = maker_order.match_with_request(&taker_order.request); assert_eq!(expected, actual); // default taker order has empty coins and zero amounts so it should pass to the price calculation stage (division) - let taker_request = TakerOrderBuilder::new(&coin, &coin) - .with_base_amount(1.into()) - .with_action(TakerAction::Sell) - .build_unchecked(); + let taker_request = block_on( + TakerOrderBuilder::new(&coin, &coin) + .with_base_amount(1.into()) + .with_action(TakerAction::Sell) + .build_unchecked(), + ); let expected = OrderMatchResult::NotMatched; let actual = maker_order.match_with_request(&taker_request.request); @@ -1294,7 +1300,7 @@ fn should_process_request_only_once() { fn test_choose_maker_confs_settings() { let coin = TestCoin::default().into(); // no confs set - let taker_order = TakerOrderBuilder::new(&coin, &coin).build_unchecked(); + let taker_order = block_on(TakerOrderBuilder::new(&coin, &coin).build_unchecked()); TestCoin::requires_notarization.mock_safe(|_| MockResult::Return(true)); TestCoin::required_confirmations.mock_safe(|_| MockResult::Return(8)); let settings = choose_maker_confs_and_notas(None, &taker_order.request, &coin, &coin); @@ -1311,7 +1317,7 @@ fn test_choose_maker_confs_settings() { rel_nota: false, }; // no confs set - let taker_order = TakerOrderBuilder::new(&coin, &coin).build_unchecked(); + let taker_order = block_on(TakerOrderBuilder::new(&coin, &coin).build_unchecked()); let settings = choose_maker_confs_and_notas(Some(maker_conf_settings), &taker_order.request, &coin, &coin); // should pick settings from maker order assert!(!settings.maker_coin_nota); @@ -1331,9 +1337,11 @@ fn test_choose_maker_confs_settings() { rel_confs: 5, rel_nota: false, }; - let taker_order = TakerOrderBuilder::new(&coin, &coin) - .with_conf_settings(taker_conf_settings) - .build_unchecked(); + let taker_order = block_on( + TakerOrderBuilder::new(&coin, &coin) + .with_conf_settings(taker_conf_settings) + .build_unchecked(), + ); let settings = choose_maker_confs_and_notas(Some(maker_conf_settings), &taker_order.request, &coin, &coin); // should pick settings from taker request because taker will wait less time for our // payment confirmation @@ -1354,9 +1362,11 @@ fn test_choose_maker_confs_settings() { rel_confs: 1000, rel_nota: true, }; - let taker_order = TakerOrderBuilder::new(&coin, &coin) - .with_conf_settings(taker_conf_settings) - .build_unchecked(); + let taker_order = block_on( + TakerOrderBuilder::new(&coin, &coin) + .with_conf_settings(taker_conf_settings) + .build_unchecked(), + ); let settings = choose_maker_confs_and_notas(Some(maker_conf_settings), &taker_order.request, &coin, &coin); // keep using our settings allowing taker to wait for our payment conf as much as he likes assert!(!settings.maker_coin_nota); @@ -1377,9 +1387,11 @@ fn test_choose_maker_confs_settings() { base_confs: 1, base_nota: false, }; - let taker_order = TakerOrderBuilder::new(&coin, &coin) - .with_conf_settings(taker_conf_settings) - .build_unchecked(); + let taker_order = block_on( + TakerOrderBuilder::new(&coin, &coin) + .with_conf_settings(taker_conf_settings) + .build_unchecked(), + ); let settings = choose_maker_confs_and_notas(Some(maker_conf_settings), &taker_order.request, &coin, &coin); // Taker conf settings should not have any effect on maker conf requirements for taker payment @@ -1399,10 +1411,12 @@ fn test_choose_maker_confs_settings() { base_confs: 5, base_nota: false, }; - let taker_order = TakerOrderBuilder::new(&coin, &coin) - .with_conf_settings(taker_conf_settings) - .with_action(TakerAction::Sell) - .build_unchecked(); + let taker_order = block_on( + TakerOrderBuilder::new(&coin, &coin) + .with_conf_settings(taker_conf_settings) + .with_action(TakerAction::Sell) + .build_unchecked(), + ); let settings = choose_maker_confs_and_notas(Some(maker_conf_settings), &taker_order.request, &coin, &coin); // should pick settings from taker request because taker will wait less time for our // payment confirmation @@ -1417,7 +1431,7 @@ fn test_choose_taker_confs_settings_buy_action() { let coin = TestCoin::default().into(); // no confs and notas set - let taker_order = TakerOrderBuilder::new(&coin, &coin).build_unchecked(); + let taker_order = block_on(TakerOrderBuilder::new(&coin, &coin).build_unchecked()); // no confs and notas set let maker_reserved = MakerReserved::default(); TestCoin::requires_notarization.mock_safe(|_| MockResult::Return(true)); @@ -1435,9 +1449,11 @@ fn test_choose_taker_confs_settings_buy_action() { rel_confs: 4, rel_nota: false, }; - let taker_order = TakerOrderBuilder::new(&coin, &coin) - .with_conf_settings(taker_conf_settings) - .build_unchecked(); + let taker_order = block_on( + TakerOrderBuilder::new(&coin, &coin) + .with_conf_settings(taker_conf_settings) + .build_unchecked(), + ); // no confs and notas set let maker_reserved = MakerReserved::default(); let settings = choose_taker_confs_and_notas(&taker_order.request, &maker_reserved.conf_settings, &coin, &coin); @@ -1454,9 +1470,11 @@ fn test_choose_taker_confs_settings_buy_action() { rel_confs: 2, rel_nota: true, }; - let taker_order = TakerOrderBuilder::new(&coin, &coin) - .with_conf_settings(taker_conf_settings) - .build_unchecked(); + let taker_order = block_on( + TakerOrderBuilder::new(&coin, &coin) + .with_conf_settings(taker_conf_settings) + .build_unchecked(), + ); let mut maker_reserved = MakerReserved::default(); let maker_conf_settings = OrderConfirmationsSettings { rel_confs: 1, @@ -1479,9 +1497,11 @@ fn test_choose_taker_confs_settings_buy_action() { rel_confs: 1, rel_nota: false, }; - let taker_order = TakerOrderBuilder::new(&coin, &coin) - .with_conf_settings(taker_conf_settings) - .build_unchecked(); + let taker_order = block_on( + TakerOrderBuilder::new(&coin, &coin) + .with_conf_settings(taker_conf_settings) + .build_unchecked(), + ); let mut maker_reserved = MakerReserved::default(); let maker_conf_settings = OrderConfirmationsSettings { rel_confs: 2, @@ -1504,9 +1524,11 @@ fn test_choose_taker_confs_settings_buy_action() { rel_confs: 1, rel_nota: false, }; - let taker_order = TakerOrderBuilder::new(&coin, &coin) - .with_conf_settings(taker_conf_settings) - .build_unchecked(); + let taker_order = block_on( + TakerOrderBuilder::new(&coin, &coin) + .with_conf_settings(taker_conf_settings) + .build_unchecked(), + ); let mut maker_reserved = MakerReserved::default(); let maker_conf_settings = OrderConfirmationsSettings { base_confs: 1, @@ -1529,9 +1551,11 @@ fn test_choose_taker_confs_settings_sell_action() { let coin = TestCoin::default().into(); // no confs and notas set - let taker_order = TakerOrderBuilder::new(&coin, &coin) - .with_action(TakerAction::Sell) - .build_unchecked(); + let taker_order = block_on( + TakerOrderBuilder::new(&coin, &coin) + .with_action(TakerAction::Sell) + .build_unchecked(), + ); // no confs and notas set let maker_reserved = MakerReserved::default(); TestCoin::requires_notarization.mock_safe(|_| MockResult::Return(true)); @@ -1549,10 +1573,12 @@ fn test_choose_taker_confs_settings_sell_action() { rel_confs: 5, rel_nota: true, }; - let taker_order = TakerOrderBuilder::new(&coin, &coin) - .with_action(TakerAction::Sell) - .with_conf_settings(taker_conf_settings) - .build_unchecked(); + let taker_order = block_on( + TakerOrderBuilder::new(&coin, &coin) + .with_action(TakerAction::Sell) + .with_conf_settings(taker_conf_settings) + .build_unchecked(), + ); // no confs and notas set let maker_reserved = MakerReserved::default(); let settings = choose_taker_confs_and_notas(&taker_order.request, &maker_reserved.conf_settings, &coin, &coin); @@ -1569,10 +1595,12 @@ fn test_choose_taker_confs_settings_sell_action() { rel_confs: 2, rel_nota: true, }; - let taker_order = TakerOrderBuilder::new(&coin, &coin) - .with_action(TakerAction::Sell) - .with_conf_settings(taker_conf_settings) - .build_unchecked(); + let taker_order = block_on( + TakerOrderBuilder::new(&coin, &coin) + .with_action(TakerAction::Sell) + .with_conf_settings(taker_conf_settings) + .build_unchecked(), + ); let mut maker_reserved = MakerReserved::default(); let maker_conf_settings = OrderConfirmationsSettings { base_confs: 2, @@ -1595,10 +1623,12 @@ fn test_choose_taker_confs_settings_sell_action() { rel_confs: 2, rel_nota: true, }; - let taker_order = TakerOrderBuilder::new(&coin, &coin) - .with_action(TakerAction::Sell) - .with_conf_settings(taker_conf_settings) - .build_unchecked(); + let taker_order = block_on( + TakerOrderBuilder::new(&coin, &coin) + .with_action(TakerAction::Sell) + .with_conf_settings(taker_conf_settings) + .build_unchecked(), + ); let mut maker_reserved = MakerReserved::default(); let maker_conf_settings = OrderConfirmationsSettings { rel_confs: 2, @@ -1621,10 +1651,12 @@ fn test_choose_taker_confs_settings_sell_action() { rel_confs: 2, rel_nota: true, }; - let taker_order = TakerOrderBuilder::new(&coin, &coin) - .with_action(TakerAction::Sell) - .with_conf_settings(taker_conf_settings) - .build_unchecked(); + let taker_order = block_on( + TakerOrderBuilder::new(&coin, &coin) + .with_action(TakerAction::Sell) + .with_conf_settings(taker_conf_settings) + .build_unchecked(), + ); let mut maker_reserved = MakerReserved::default(); let maker_conf_settings = OrderConfirmationsSettings { rel_confs: 2, @@ -2208,7 +2240,7 @@ fn test_taker_request_can_match_with_maker_pubkey() { let maker_pubkey = H256Json::default(); // default has MatchBy::Any - let mut order = TakerOrderBuilder::new(&coin, &coin).build_unchecked(); + let mut order = block_on(TakerOrderBuilder::new(&coin, &coin).build_unchecked()); assert!(order.request.can_match_with_maker_pubkey(&maker_pubkey)); // the uuids of orders is checked in another method @@ -2230,7 +2262,7 @@ fn test_taker_request_can_match_with_uuid() { let coin = MmCoinEnum::Test(TestCoin::default()); // default has MatchBy::Any - let mut order = TakerOrderBuilder::new(&coin, &coin).build_unchecked(); + let mut order = block_on(TakerOrderBuilder::new(&coin, &coin).build_unchecked()); assert!(order.request.can_match_with_uuid(&uuid)); // the uuids of orders is checked in another method From 2d427d9976545382687bfbf4c63a19415894062b Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 7 Jun 2024 04:01:07 +0100 Subject: [PATCH 139/186] impl account_db_id for utxo coins --- mm2src/coins/eth.rs | 81 ++++++++++++----------- mm2src/coins/lp_coins.rs | 101 +++++++++++++++-------------- mm2src/coins/qrc20/history.rs | 8 +-- mm2src/coins/utxo/bch.rs | 2 + mm2src/coins/utxo/qtum.rs | 3 + mm2src/coins/utxo/slp.rs | 10 +-- mm2src/coins/utxo/utxo_common.rs | 39 +++++++---- mm2src/coins/utxo/utxo_standard.rs | 2 + 8 files changed, 138 insertions(+), 108 deletions(-) diff --git a/mm2src/coins/eth.rs b/mm2src/coins/eth.rs index ff1303c662..355d41328e 100644 --- a/mm2src/coins/eth.rs +++ b/mm2src/coins/eth.rs @@ -141,21 +141,27 @@ mod eth_rpc; #[cfg(any(test, target_arch = "wasm32"))] mod for_tests; pub(crate) mod nft_swap_v2; mod web3_transport; + use web3_transport::{http_transport::HttpTransportNode, Web3Transport}; pub mod eth_hd_wallet; + use eth_hd_wallet::EthHDWallet; #[path = "eth/v2_activation.rs"] pub mod v2_activation; + use v2_activation::{build_address_and_priv_key_policy, EthActivationV2Error}; mod eth_withdraw; + use eth_withdraw::{EthWithdraw, InitEthWithdraw, StandardEthWithdraw}; mod nonce; + use nonce::ParityNonce; mod eip1559_gas_fee; + pub(crate) use eip1559_gas_fee::FeePerGasEstimated; use eip1559_gas_fee::{BlocknativeGasApiCaller, FeePerGasSimpleEstimator, GasApiConfig, GasApiProvider, InfuraGasApiCaller}; @@ -898,7 +904,7 @@ pub async fn withdraw_erc1155(ctx: MmArc, withdraw_type: WithdrawErc1155) -> Wit EthCoinType::Erc20 { .. } => { return MmError::err(WithdrawError::InternalError( "Erc20 coin type doesnt support withdraw nft".to_owned(), - )) + )); }, EthCoinType::Nft { .. } => return MmError::err(WithdrawError::NftProtocolNotSupported), }; @@ -987,7 +993,7 @@ pub async fn withdraw_erc721(ctx: MmArc, withdraw_type: WithdrawErc721) -> Withd EthCoinType::Erc20 { .. } => { return MmError::err(WithdrawError::InternalError( "Erc20 coin type doesnt support withdraw nft".to_owned(), - )) + )); }, // TODO: start to use NFT GLOBAL TOKEN for withdraw EthCoinType::Nft { .. } => return MmError::err(WithdrawError::NftProtocolNotSupported), @@ -1047,6 +1053,7 @@ pub async fn withdraw_erc721(ctx: MmArc, withdraw_type: WithdrawErc721) -> Withd #[derive(Clone)] pub struct EthCoin(Arc); + impl Deref for EthCoin { type Target = EthCoinImpl; fn deref(&self) -> &EthCoinImpl { &self.0 } @@ -2045,7 +2052,7 @@ impl WatcherOps for EthCoin { EthCoinType::Nft { .. } => { return MmError::err(WatcherRewardError::InternalError( "Nft Protocol is not supported yet!".to_string(), - )) + )); }, } }, @@ -2307,7 +2314,7 @@ impl MarketCoinOps for EthCoin { Create => { return Box::new(futures01::future::err(TransactionErr::Plain(ERRL!( "Invalid payment action: the payment action cannot be create" - )))) + )))); }, }, }; @@ -2318,7 +2325,7 @@ impl MarketCoinOps for EthCoin { EthCoinType::Nft { .. } => { return Box::new(futures01::future::err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported yet!" - )))) + )))); }, }; @@ -2330,7 +2337,7 @@ impl MarketCoinOps for EthCoin { return Box::new(futures01::future::err(TransactionErr::Plain(ERRL!( "Expected Token::FixedBytes, got {:?}", invalid_token - )))) + )))); }, }; let selfi = self.clone(); @@ -2836,7 +2843,7 @@ impl EthCoin { "blocks_left": saved_traces.earliest_block.as_u64(), })); - let mut existing_history = match self.load_history_from_file(ctx).compat().await { + let mut existing_history = match self.load_history_from_file(ctx).await { Ok(history) => history, Err(e) => { ctx.log.log( @@ -3127,7 +3134,7 @@ impl EthCoin { existing_history.push(details); - if let Err(e) = self.save_history_to_file(ctx, existing_history.clone()).compat().await { + if let Err(e) = self.save_history_to_file(ctx, existing_history.clone()).await { ctx.log.log( "", &[&"tx_history", &self.ticker], @@ -3348,7 +3355,7 @@ impl EthCoin { all_events.sort_by(|a, b| b.block_number.unwrap().cmp(&a.block_number.unwrap())); for event in all_events { - let mut existing_history = match self.load_history_from_file(ctx).compat().await { + let mut existing_history = match self.load_history_from_file(ctx).await { Ok(history) => history, Err(e) => { ctx.log.log( @@ -3508,7 +3515,7 @@ impl EthCoin { existing_history.push(details); - if let Err(e) = self.save_history_to_file(ctx, existing_history).compat().await { + if let Err(e) = self.save_history_to_file(ctx, existing_history).await { ctx.log.log( "", &[&"tx_history", &self.ticker], @@ -3617,7 +3624,7 @@ impl EthCoin { EthCoinType::Nft { .. } => { return Box::new(futures01::future::err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported yet!" - )))) + )))); }, } } @@ -3752,21 +3759,21 @@ impl EthCoin { amount, wait_for_required_allowance_until, ) - .map_err(move |e| { - TransactionErr::Plain(ERRL!( + .map_err(move |e| { + TransactionErr::Plain(ERRL!( "Allowed value was not updated in time after sending approve transaction {:02x}: {}", approved.tx_hash_as_bytes(), e )) - }) - .and_then(move |_| { - arc.sign_and_send_transaction( - value, - Call(swap_contract_address), - data, - gas, - ) - }) + }) + .and_then(move |_| { + arc.sign_and_send_transaction( + value, + Call(swap_contract_address), + data, + gas, + ) + }) }), ) } else { @@ -3782,7 +3789,7 @@ impl EthCoin { EthCoinType::Nft { .. } => { return Box::new(futures01::future::err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported yet!" - )))) + )))); }, } } @@ -3801,7 +3808,7 @@ impl EthCoin { Create => { return Box::new(futures01::future::err(TransactionErr::Plain(ERRL!( "Invalid payment action: the payment action cannot be create" - )))) + )))); }, }; @@ -3903,7 +3910,7 @@ impl EthCoin { EthCoinType::Nft { .. } => { return Box::new(futures01::future::err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported yet!" - )))) + )))); }, } } @@ -3922,7 +3929,7 @@ impl EthCoin { Create => { return Box::new(futures01::future::err(TransactionErr::Plain(ERRL!( "Invalid payment action: the payment action cannot be create" - )))) + )))); }, }; @@ -4028,7 +4035,7 @@ impl EthCoin { EthCoinType::Nft { .. } => { return Box::new(futures01::future::err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported yet!" - )))) + )))); }, } } @@ -4153,7 +4160,7 @@ impl EthCoin { EthCoinType::Nft { .. } => { return Err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported!" - ))) + ))); }, } } @@ -4278,7 +4285,7 @@ impl EthCoin { EthCoinType::Nft { .. } => { return Err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported yet!" - ))) + ))); }, } } @@ -4397,7 +4404,7 @@ impl EthCoin { EthCoinType::Erc20 { .. } => { return MmError::err(BalanceError::Internal( "Erc20 coin type doesnt support Erc1155 standard".to_owned(), - )) + )); }, }; let wallet_amount = u256_to_big_decimal(wallet_amount_uint, self.decimals)?; @@ -4427,7 +4434,7 @@ impl EthCoin { EthCoinType::Erc20 { .. } => { return MmError::err(GetNftInfoError::Internal( "Erc20 coin type doesnt support Erc721 standard".to_owned(), - )) + )); }, }; Ok(owner_address) @@ -4579,7 +4586,7 @@ impl EthCoin { EthCoinType::Nft { .. } => { return Err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported yet!" - ))) + ))); }, }; let function = try_tx_s!(ERC20_CONTRACT.function("approve")); @@ -5006,7 +5013,7 @@ impl EthCoin { let transaction = match try_s!(self.transaction(TransactionId::Hash(tx_hash)).await) { Some(t) => t, None => { - return ERR!("Found ReceiverSpent event, but transaction {:02x} is missing", tx_hash) + return ERR!("Found ReceiverSpent event, but transaction {:02x} is missing", tx_hash); }, }; @@ -5031,7 +5038,7 @@ impl EthCoin { let transaction = match try_s!(self.transaction(TransactionId::Hash(tx_hash)).await) { Some(t) => t, None => { - return ERR!("Found SenderRefunded event, but transaction {:02x} is missing", tx_hash) + return ERR!("Found SenderRefunded event, but transaction {:02x} is missing", tx_hash); }, }; @@ -5903,7 +5910,7 @@ fn validate_fee_impl(coin: EthCoin, validate_fee_args: EthValidateFeeArgs<'_>) - return MmError::err(ValidatePaymentError::WrongPaymentTx(format!( "Should have got uint token but got {:?}", value_input - ))) + ))); }, } }, @@ -6575,7 +6582,7 @@ fn get_valid_nft_addr_to_withdraw( _ => { return MmError::err(GetValidEthWithdrawAddError::CoinDoesntSupportNftWithdraw { coin: coin_enum.ticker().to_owned(), - }) + }); }, }; let to_addr = valid_addr_from_str(to).map_err(GetValidEthWithdrawAddError::InvalidAddress)?; @@ -6642,7 +6649,7 @@ async fn get_eth_gas_details_from_withdraw_fee( max_fee_per_gas, max_priority_fee_per_gas, }), - )) + )); }, EthGasLimitOption::Calc => // go to gas estimate code diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 8a7db5937e..b0b2c4dabf 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -48,7 +48,7 @@ use common::custom_futures::timeout::TimeoutError; use common::executor::{abortable_queue::{AbortableQueue, WeakSpawner}, AbortSettings, AbortedError, SpawnAbortable, SpawnFuture}; use common::log::{warn, LogOnError}; -use common::{block_on, calc_total_pages, now_sec, ten, HttpStatusCode}; +use common::{calc_total_pages, now_sec, ten, HttpStatusCode}; use crypto::{derive_secp256k1_secret, Bip32Error, Bip44Chain, CryptoCtx, CryptoCtxError, DerivationPath, GlobalHDAccountArc, HDPathToCoin, HwRpcError, KeyPairPolicy, RpcDerivationPath, Secp256k1ExtendedPublicKey, Secp256k1Secret, WithHwRpcError}; @@ -57,7 +57,7 @@ use enum_derives::{EnumFromStringify, EnumFromTrait}; use ethereum_types::H256; use futures::compat::Future01CompatExt; use futures::lock::Mutex as AsyncMutex; -use futures::{FutureExt, TryFutureExt}; +use futures::TryFutureExt; use futures01::Future; use hex::FromHexError; use http::{Response, StatusCode}; @@ -92,7 +92,8 @@ cfg_native! { use crate::lightning::ln_conf::PlatformCoinConfirmationTargets; use ::lightning::ln::PaymentHash as LightningPayment; use async_std::fs; - use futures::AsyncWriteExt; + use common::block_on; + use futures::{AsyncWriteExt, FutureExt}; use lightning_invoice::{Invoice, ParseOrSemanticError}; use std::io; use std::path::PathBuf; @@ -3239,12 +3240,12 @@ pub trait MmCoin: /// Loads existing tx history from file, returns empty vector if file is not found /// Cleans the existing file if deserialization fails - fn load_history_from_file(&self, ctx: &MmArc) -> TxHistoryFut> { - load_history_from_file_impl(self, ctx) + async fn load_history_from_file(&self, ctx: &MmArc) -> TxHistoryResult> { + load_history_from_file_impl(self, ctx).await } - fn save_history_to_file(&self, ctx: &MmArc, history: Vec) -> TxHistoryFut<()> { - save_history_to_file_impl(self, ctx, history) + async fn save_history_to_file(&self, ctx: &MmArc, history: Vec) -> TxHistoryResult<()> { + save_history_to_file_impl(self, ctx, history).await } #[cfg(not(target_arch = "wasm32"))] @@ -4812,7 +4813,7 @@ pub async fn my_tx_history(ctx: MmArc, req: Json) -> Result>, S Err(err) => return ERR!("!lp_coinfind({}): {}", request.coin, err), }; - let history = try_s!(coin.load_history_from_file(&ctx).compat().await); + let history = try_s!(coin.load_history_from_file(&ctx).await); let total_records = history.len(); let limit = if request.max { total_records } else { request.limit }; @@ -5104,40 +5105,37 @@ pub fn address_by_coin_conf_and_pubkey_str( } #[cfg(target_arch = "wasm32")] -fn load_history_from_file_impl(coin: &T, ctx: &MmArc) -> TxHistoryFut> +async fn load_history_from_file_impl(coin: &T, ctx: &MmArc) -> TxHistoryResult> where T: MmCoin + ?Sized, { let ctx = ctx.clone(); let ticker = coin.ticker().to_owned(); - let my_address = try_f!(coin.my_address()); - let db_id = coin.account_db_id(); + let my_address = coin.my_address()?; + let db_id = coin.account_db_id().await; + let coins_ctx = CoinsContext::from_ctx(&ctx).unwrap(); - let fut = async move { - let coins_ctx = CoinsContext::from_ctx(&ctx).unwrap(); - let db = coins_ctx.tx_history_db(db_id.as_deref()).await?; - let err = match load_tx_history(&db, &ticker, &my_address).await { - Ok(history) => return Ok(history), - Err(e) => e, - }; + let db = coins_ctx.tx_history_db(db_id.as_deref()).await?; + let err = match load_tx_history(&db, &ticker, &my_address).await { + Ok(history) => return Ok(history), + Err(e) => e, + }; - if let TxHistoryError::ErrorDeserializing(e) = err.get_inner() { - ctx.log.log( - "🌋", - &[&"tx_history", &ticker.to_owned()], - &ERRL!("Error {} on history deserialization, resetting the cache.", e), - ); - clear_tx_history(&db, &ticker, &my_address).await?; - return Ok(Vec::new()); - } + if let TxHistoryError::ErrorDeserializing(e) = err.get_inner() { + ctx.log.log( + "🌋", + &[&"tx_history", &ticker.to_owned()], + &ERRL!("Error {} on history deserialization, resetting the cache.", e), + ); + clear_tx_history(&db, &ticker, &my_address).await?; + return Ok(Vec::new()); + } - Err(err) - }; - Box::new(fut.boxed().compat()) + Err(err) } #[cfg(not(target_arch = "wasm32"))] -fn load_history_from_file_impl(coin: &T, ctx: &MmArc) -> TxHistoryFut> +async fn load_history_from_file_impl(coin: &T, ctx: &MmArc) -> TxHistoryResult> where T: MmCoin + ?Sized, { @@ -5145,7 +5143,7 @@ where let history_path = coin.tx_history_path(ctx); let ctx = ctx.clone(); - let fut = async move { + async move { let content = match fs::read(&history_path).await { Ok(content) => content, Err(err) if err.kind() == io::ErrorKind::NotFound => { @@ -5174,29 +5172,31 @@ where .await .map_to_mm(|e| TxHistoryError::ErrorClearing(e.to_string()))?; Ok(Vec::new()) - }; - Box::new(fut.boxed().compat()) + } + .await } #[cfg(target_arch = "wasm32")] -fn save_history_to_file_impl(coin: &T, ctx: &MmArc, mut history: Vec) -> TxHistoryFut<()> +async fn save_history_to_file_impl( + coin: &T, + ctx: &MmArc, + mut history: Vec, +) -> TxHistoryResult<()> where T: MmCoin + MarketCoinOps + ?Sized, { let ctx = ctx.clone(); let ticker = coin.ticker().to_owned(); - let my_address = try_f!(coin.my_address()); - let db_id = coin.account_db_id(); + let my_address = coin.my_address()?; history.sort_unstable_by(compare_transaction_details); - let fut = async move { - let coins_ctx = CoinsContext::from_ctx(&ctx).unwrap(); - let db = coins_ctx.tx_history_db(db_id.as_deref()).await?; - save_tx_history(&db, &ticker, &my_address, history).await?; - Ok(()) - }; - Box::new(fut.boxed().compat()) + let db_id = coin.account_db_id().await; + let coins_ctx = CoinsContext::from_ctx(&ctx).unwrap(); + let db = coins_ctx.tx_history_db(db_id.as_deref()).await?; + save_tx_history(&db, &ticker, &my_address, history).await?; + + Ok(()) } #[cfg(not(target_arch = "wasm32"))] @@ -5255,7 +5255,11 @@ where } #[cfg(not(target_arch = "wasm32"))] -fn save_history_to_file_impl(coin: &T, ctx: &MmArc, mut history: Vec) -> TxHistoryFut<()> +async fn save_history_to_file_impl( + coin: &T, + ctx: &MmArc, + mut history: Vec, +) -> TxHistoryResult<()> where T: MmCoin + MarketCoinOps + ?Sized, { @@ -5264,7 +5268,7 @@ where history.sort_unstable_by(compare_transaction_details); - let fut = async move { + async move { let content = json::to_vec(&history).map_to_mm(|e| TxHistoryError::ErrorSerializing(e.to_string()))?; let fs_fut = async { @@ -5280,9 +5284,10 @@ where let error = format!("Error '{}' creating/writing/renaming the tmp file {}", e, tmp_file); return MmError::err(TxHistoryError::ErrorSaving(error)); } + Ok(()) - }; - Box::new(fut.boxed().compat()) + } + .await } pub(crate) fn compare_transaction_details(a: &TransactionDetails, b: &TransactionDetails) -> Ordering { diff --git a/mm2src/coins/qrc20/history.rs b/mm2src/coins/qrc20/history.rs index af3c41f078..f4d00d6b9e 100644 --- a/mm2src/coins/qrc20/history.rs +++ b/mm2src/coins/qrc20/history.rs @@ -177,7 +177,7 @@ impl Qrc20Coin { .flat_map(|(_, value)| value) .map(|(_tx_id, tx)| tx.clone()) .collect(); - if let Err(e) = self.save_history_to_file(&ctx, to_write).compat().await { + if let Err(e) = self.save_history_to_file(&ctx, to_write).await { ctx.log.log( "", &[&"tx_history", &self.as_ref().conf.ticker], @@ -357,7 +357,7 @@ impl Qrc20Coin { RequestTxHistoryResult::Retry { error: ERRL!("Error {:?} on blockchain_contract_event_get_history", err), } - } + }; }, JsonRpcErrorType::InvalidRequest(err) | JsonRpcErrorType::Transport(err) @@ -372,7 +372,7 @@ impl Qrc20Coin { UtxoRpcError::InvalidResponse(e) | UtxoRpcError::Internal(e) => { return RequestTxHistoryResult::Retry { error: ERRL!("Error {} on blockchain_contract_event_get_history", e), - } + }; }, }, }; @@ -541,7 +541,7 @@ impl Qrc20Coin { } async fn try_load_history_from_file(&self, ctx: &MmArc) -> TxHistoryResult { - let history = self.load_history_from_file(ctx).compat().await?; + let history = self.load_history_from_file(ctx).await?; let mut history_map: HistoryMapByHash = HashMap::default(); for tx in history { diff --git a/mm2src/coins/utxo/bch.rs b/mm2src/coins/utxo/bch.rs index 2b94135933..1e3985521f 100644 --- a/mm2src/coins/utxo/bch.rs +++ b/mm2src/coins/utxo/bch.rs @@ -1358,6 +1358,8 @@ impl MmCoin for BchCoin { tokens.remove(ticker); }; } + + async fn account_db_id(&self) -> Option { utxo_common::account_db_id(self.clone()).await } } #[async_trait] diff --git a/mm2src/coins/utxo/qtum.rs b/mm2src/coins/utxo/qtum.rs index 8b8a60d246..c75c9b5e5d 100644 --- a/mm2src/coins/utxo/qtum.rs +++ b/mm2src/coins/utxo/qtum.rs @@ -73,6 +73,7 @@ impl From for WithdrawError { } #[path = "qtum_delegation.rs"] mod qtum_delegation; + #[derive(Debug, Serialize, Deserialize)] #[serde(tag = "format")] pub enum QtumAddressFormat { @@ -978,6 +979,8 @@ impl MmCoin for QtumCoin { fn on_disabled(&self) -> Result<(), AbortedError> { AbortableSystem::abort_all(&self.as_ref().abortable_system) } fn on_token_deactivated(&self, _ticker: &str) {} + + async fn account_db_id(&self) -> Option { utxo_common::account_db_id(self.clone()).await } } #[async_trait] diff --git a/mm2src/coins/utxo/slp.rs b/mm2src/coins/utxo/slp.rs index c559449c22..494777bb0b 100644 --- a/mm2src/coins/utxo/slp.rs +++ b/mm2src/coins/utxo/slp.rs @@ -493,7 +493,7 @@ impl SlpToken { _ => { return MmError::err(ValidatePaymentError::WrongPaymentTx( "Invalid Slp tx details".to_string(), - )) + )); }, } @@ -1102,7 +1102,7 @@ impl MarketCoinOps for SlpToken { DerivationMethod::HDWallet(_) => { return MmError::err(MyAddressError::UnexpectedDerivationMethod( "'my_address' is deprecated for HD wallets".to_string(), - )) + )); }, }; let slp_address = self @@ -1655,7 +1655,7 @@ impl MmCoin for SlpToken { CashAddrType::P2SH => { return MmError::err(WithdrawError::InvalidAddress( "Withdrawal to P2SH is not supported".into(), - )) + )); }, }; let slp_output = SlpOutput { amount, script_pubkey }; @@ -1747,7 +1747,7 @@ impl MmCoin for SlpToken { return ValidateAddressResult { is_valid: false, reason: Some(format!("Error {} on parsing the {} as cash address", e, address)), - } + }; }, }; @@ -2227,7 +2227,7 @@ mod slp_tests { discriminant(&tx_err), discriminant(&TransactionErr::TxRecoverable( TransactionEnum::from(utxo_tx), - String::new() + String::new(), )) ); } diff --git a/mm2src/coins/utxo/utxo_common.rs b/mm2src/coins/utxo/utxo_common.rs index be54d53fe1..5c13bea458 100644 --- a/mm2src/coins/utxo/utxo_common.rs +++ b/mm2src/coins/utxo/utxo_common.rs @@ -2119,7 +2119,7 @@ pub fn watcher_validate_taker_fee( return MmError::err(ValidatePaymentError::WrongPaymentTx(format!( "Provided dex fee tx {:?} does not have output {}", taker_fee_tx, output_index - ))) + ))); }, } @@ -2207,7 +2207,7 @@ pub fn validate_fee( return MmError::err(ValidatePaymentError::WrongPaymentTx(format!( "Provided dex fee tx {:?} does not have output {}", tx, output_index - ))) + ))); }, } @@ -2234,7 +2234,7 @@ pub fn validate_fee( return MmError::err(ValidatePaymentError::WrongPaymentTx(format!( "Provided burn tx output {:?} does not have output {}", tx, output_index - ))) + ))); }, } } @@ -2308,7 +2308,7 @@ pub fn watcher_validate_taker_payment( None => { return MmError::err(ValidatePaymentError::WrongPaymentTx( "Payment tx has no outputs".to_string(), - )) + )); }, }; @@ -2323,7 +2323,7 @@ pub fn watcher_validate_taker_payment( None => { return MmError::err(ValidatePaymentError::WrongPaymentTx( "Taker payment refund tx has no inputs".to_string(), - )) + )); }, }; @@ -2546,7 +2546,7 @@ pub async fn get_taker_watcher_reward(coin: &T, address: &str) -> ValidateAd return ValidateAddressResult { is_valid: false, reason: Some(e.to_string()), - } + }; }, }; @@ -3124,7 +3124,7 @@ where T: UtxoStandardOps + UtxoCommonOps + MmCoin + MarketCoinOps, { const MIGRATION_NUMBER: u64 = 1; - let history = match coin.load_history_from_file(ctx).compat().await { + let history = match coin.load_history_from_file(ctx).await { Ok(history) => history, Err(e) => { log_tag!( @@ -3155,7 +3155,7 @@ where .collect(); if updated { - if let Err(e) = coin.save_history_to_file(ctx, to_write).compat().await { + if let Err(e) = coin.save_history_to_file(ctx, to_write).await { log_tag!( ctx, "", @@ -3197,7 +3197,7 @@ where migrate_tx_history(&coin, &ctx).await; let mut my_balance: Option = None; - let history = match coin.load_history_from_file(&ctx).compat().await { + let history = match coin.load_history_from_file(&ctx).await { Ok(history) => history, Err(e) => { log_tag!( @@ -3304,7 +3304,7 @@ where if history_map.len() < history_length { let to_write: Vec = history_map.values().cloned().collect(); - if let Err(e) = coin.save_history_to_file(&ctx, to_write).compat().await { + if let Err(e) = coin.save_history_to_file(&ctx, to_write).await { log_tag!( ctx, "", @@ -3390,7 +3390,7 @@ where } if updated { let to_write: Vec = history_map.values().cloned().collect(); - if let Err(e) = coin.save_history_to_file(&ctx, to_write).compat().await { + if let Err(e) = coin.save_history_to_file(&ctx, to_write).await { log_tag!( ctx, "", @@ -3430,7 +3430,7 @@ where return RequestTxHistoryResult::CriticalError(ERRL!( "Error on getting self address: {}. Stop tx history", e - )) + )); }, }; @@ -4308,7 +4308,7 @@ pub async fn validate_payment<'a, T: UtxoCommonOps>( None => { return MmError::err(ValidatePaymentError::WrongPaymentTx( "Payment tx has no outputs".to_string(), - )) + )); }, }; @@ -5127,6 +5127,17 @@ where Ok(transaction) } +pub async fn account_db_id(coin: Coin) -> Option +where + Coin: CoinWithDerivationMethod + HDWalletCoinOps + HDCoinWithdrawOps + UtxoCommonOps, +{ + let derivation = coin.derivation_method(); + derivation + .single_addr() + .await + .map(|addr| hex::encode(addr.hash().to_vec())) +} + #[test] fn test_increase_by_percent() { assert_eq!(increase_by_percent(4300, 1.), 4343); diff --git a/mm2src/coins/utxo/utxo_standard.rs b/mm2src/coins/utxo/utxo_standard.rs index 03c889d790..2052e6586c 100644 --- a/mm2src/coins/utxo/utxo_standard.rs +++ b/mm2src/coins/utxo/utxo_standard.rs @@ -1006,6 +1006,8 @@ impl MmCoin for UtxoStandardCoin { fn on_disabled(&self) -> Result<(), AbortedError> { AbortableSystem::abort_all(&self.as_ref().abortable_system) } fn on_token_deactivated(&self, _ticker: &str) {} + + async fn account_db_id(&self) -> Option { utxo_common::account_db_id(self.clone()).await } } #[async_trait] From b51643585c7517b4c8316ab00c2852394720888d Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 10 Jun 2024 02:40:08 +0100 Subject: [PATCH 140/186] impl account_db_id for utxo and eth --- mm2src/coins/eth.rs | 12 ++-- mm2src/coins/eth/v2_activation.rs | 38 ++++++++++++ .../utxo/utxo_builder/utxo_arc_builder.rs | 41 +++++++++++++ mm2src/coins/utxo/utxo_common.rs | 12 ++-- mm2src/mm2_core/src/sql_connection_pool.rs | 61 +++++++++++-------- 5 files changed, 127 insertions(+), 37 deletions(-) diff --git a/mm2src/coins/eth.rs b/mm2src/coins/eth.rs index 355d41328e..bbfff14759 100644 --- a/mm2src/coins/eth.rs +++ b/mm2src/coins/eth.rs @@ -5734,13 +5734,11 @@ impl MmCoin for EthCoin { async fn account_db_id(&self) -> Option { let derivation_method = &self.deref().derivation_method; - match derivation_method.as_ref() { - DerivationMethod::SingleAddress(single) => return Some(hex::encode(single.as_bytes())), - DerivationMethod::HDWallet(hd_wallet) => { - if let Some(addr) = hd_wallet.get_enabled_address().await { - return Some(hex::encode(addr.address.as_bytes())); - } - }, + if let DerivationMethod::HDWallet(hd_wallet) = derivation_method.as_ref() { + if let Some(addr) = hd_wallet.get_enabled_address().await { + let addr = dhash160(addr.address.as_bytes()); + return Some(hex::encode(addr)); + } } None diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index e7b2e62b9a..4c431a792c 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -6,9 +6,14 @@ use crate::nft::nft_errors::{GetNftInfoError, ParseChainTypeError}; use crate::nft::nft_structs::Chain; #[cfg(target_arch = "wasm32")] use crate::EthMetamaskPolicy; use common::executor::AbortedError; +#[cfg(not(target_arch = "wasm32"))] +use crypto::shared_db_id::shared_db_id_from_seed; use crypto::{trezor::TrezorError, Bip32Error, CryptoCtxError, HwError}; use enum_derives::EnumFromTrait; +#[cfg(not(target_arch = "wasm32"))] use futures::SinkExt; use instant::Instant; +#[cfg(not(target_arch = "wasm32"))] +use mm2_core::sql_connection_pool::DbIds; use mm2_err_handle::common_errors::WithInternal; #[cfg(target_arch = "wasm32")] use mm2_metamask::{from_metamask_error, MetamaskError, MetamaskRpcError, WithMetamaskRpcError}; @@ -540,6 +545,7 @@ pub async fn eth_coin_from_conf_and_request_v2( ) => { let auth_address = key_pair.address(); let auth_address_str = display_eth_address(&auth_address); + // TODO: send migration request. build_web3_instances(ctx, ticker.to_string(), auth_address_str, key_pair, req.nodes.clone()).await? }, (EthRpcMode::Default, EthPrivKeyPolicy::Trezor) => { @@ -682,6 +688,10 @@ pub(crate) async fn build_address_and_priv_key_policy( enabled_address: *path_to_address, gap_limit, }; + + #[cfg(not(target_arch = "wasm32"))] + run_db_migraiton_for_new_eth_pubkey(ctx, &activated_key).await?; + let derivation_method = DerivationMethod::HDWallet(hd_wallet); Ok(( EthPrivKeyPolicy::HDWallet { @@ -922,3 +932,31 @@ fn compress_public_key(uncompressed: H520) -> MmResult MmResult<(), EthActivationV2Error> { + let db_id = hex::encode(pubkey.address().to_bytes()); + let shared_db_id = shared_db_id_from_seed(&db_id) + .mm_err(|err| EthActivationV2Error::InternalError(err.to_string()))? + .to_string(); + + let db_migration_sender = ctx + .db_migration_watcher + .as_option() + .expect("Db migration watcher isn't intialized yet!") + .get_sender() + .await; + let mut db_migration_sender = db_migration_sender.lock().await; + db_migration_sender + .send(DbIds { + db_id: db_id.clone(), + shared_db_id: shared_db_id.clone(), + }) + .await + .map_to_mm(|err| EthActivationV2Error::InternalError(err.to_string()))?; + + debug!("Public key hash: {db_id}"); + debug!("Shared Database ID: {shared_db_id}"); + + Ok(()) +} diff --git a/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs index 60b4d75ff0..deef3bf83d 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs @@ -8,11 +8,18 @@ use crate::utxo::{generate_and_send_tx, FeePolicy, GetUtxoListOps, UtxoArc, Utxo UtxoWeak}; use crate::{DerivationMethod, PrivKeyBuildPolicy, UtxoActivationParams}; use async_trait::async_trait; +#[cfg(not(target_arch = "wasm32"))] use bitcrypto::dhash160; use chain::{BlockHeader, TransactionOutput}; use common::executor::{AbortSettings, SpawnAbortable, Timer}; use common::log::{debug, error, info, warn}; +#[cfg(not(target_arch = "wasm32"))] +use crypto::shared_db_id::shared_db_id_from_seed; use futures::compat::Future01CompatExt; +#[cfg(not(target_arch = "wasm32"))] use futures::SinkExt; +#[cfg(not(target_arch = "wasm32"))] use keys::hash::H160; use mm2_core::mm_ctx::MmArc; +#[cfg(not(target_arch = "wasm32"))] +use mm2_core::sql_connection_pool::DbIds; use mm2_err_handle::prelude::*; use mm2_event_stream::behaviour::{EventBehaviour, EventInitStatus}; #[cfg(test)] use mocktopus::macros::*; @@ -133,6 +140,12 @@ where } } + #[cfg(not(target_arch = "wasm32"))] + if let PrivKeyBuildPolicy::GlobalHDAccount(hd) = self.priv_key_policy() { + let rmd = dhash160(&hd.root_priv_key().public_key().to_bytes()); + run_db_migraiton_for_new_eth_pubkey(self.ctx, rmd).await? + }; + Ok(result_coin) } } @@ -697,3 +710,31 @@ fn spawn_block_header_utxo_loop( .weak_spawner() .spawn_with_settings(fut, settings); } + +#[cfg(not(target_arch = "wasm32"))] +async fn run_db_migraiton_for_new_eth_pubkey(ctx: &MmArc, pubkey: H160) -> MmResult<(), UtxoCoinBuildError> { + let db_id = hex::encode(pubkey.as_slice()); + let shared_db_id = shared_db_id_from_seed(&db_id) + .mm_err(|err| UtxoCoinBuildError::Internal(err.to_string()))? + .to_string(); + + let db_migration_sender = ctx + .db_migration_watcher + .as_option() + .expect("Db migration watcher isn't intialized yet!") + .get_sender() + .await; + let mut db_migration_sender = db_migration_sender.lock().await; + db_migration_sender + .send(DbIds { + db_id: db_id.clone(), + shared_db_id: shared_db_id.clone(), + }) + .await + .map_to_mm(|err| UtxoCoinBuildError::Internal(err.to_string()))?; + + debug!("Public key hash: {db_id}"); + debug!("Shared Database ID: {shared_db_id}"); + + Ok(()) +} diff --git a/mm2src/coins/utxo/utxo_common.rs b/mm2src/coins/utxo/utxo_common.rs index 5c13bea458..c03be79720 100644 --- a/mm2src/coins/utxo/utxo_common.rs +++ b/mm2src/coins/utxo/utxo_common.rs @@ -5131,11 +5131,13 @@ pub async fn account_db_id(coin: Coin) -> Option where Coin: CoinWithDerivationMethod + HDWalletCoinOps + HDCoinWithdrawOps + UtxoCommonOps, { - let derivation = coin.derivation_method(); - derivation - .single_addr() - .await - .map(|addr| hex::encode(addr.hash().to_vec())) + if let DerivationMethod::HDWallet(hd_wallet) = coin.derivation_method() { + if let Some(addr) = hd_wallet.get_enabled_address().await { + return Some(hex::encode(addr.address().hash().to_vec())); + } + } + + None } #[test] diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 0a4b78d9d9..cc1f2f605d 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -1,6 +1,6 @@ use crate::mm_ctx::{log_sqlite_file_open_attempt, path_to_dbdir, MmCtx}; use async_std::sync::RwLock as AsyncRwLock; -use common::log::error; +use common::log::{error, info}; use db_common::async_sql_conn::AsyncConnection; use db_common::sqlite::rusqlite::Connection; use futures::channel::mpsc::{channel, Receiver, Sender}; @@ -29,7 +29,7 @@ pub struct SqliteConnPool { rmd160_hex: String, // default shared_db_id shared_db_id: H160, - db_dir: String, + db_root: Option, } impl SqliteConnPool { @@ -68,11 +68,12 @@ impl SqliteConnPool { // Connection pool is not already initialized, create new connection pool. let conn = Self::open_connection(sqlite_file_path); let connections = Arc::new(RwLock::new(HashMap::from([(db_id, conn)]))); + let db_root = ctx.conf["dbdir"].as_str(); try_s!(ctx.sqlite_conn_pool.pin(Self { connections, rmd160_hex: ctx.rmd160_hex(), shared_db_id: *ctx.shared_db_id(), - db_dir: ctx.conf["dbdir"].to_string() + db_root: db_root.map(|d| d.to_owned()) })); Ok(()) @@ -102,11 +103,12 @@ impl SqliteConnPool { let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); let connections = Arc::new(RwLock::new(HashMap::from([(db_id, connection)]))); + let db_root = ctx.conf["dbdir"].as_str(); try_s!(ctx.sqlite_conn_pool.pin(Self { connections, rmd160_hex: ctx.rmd160_hex(), shared_db_id: *ctx.shared_db_id(), - db_dir: ctx.conf["dbdir"].to_string() + db_root: db_root.map(|d| d.to_owned()) })); Ok(()) @@ -137,10 +139,10 @@ impl SqliteConnPool { drop(connections); let mut connections = self.connections.write().unwrap(); - let sqlite_file_path = match db_id_conn_kind { - DbIdConnKind::Shared => self.db_dir(&db_id).join(SQLITE_SHARED_DB_ID), - DbIdConnKind::Single => self.db_dir(&db_id).join(SYNC_SQLITE_DB_ID), - }; + let sqlite_file_path = self.db_dir(&db_id).join(match db_id_conn_kind { + DbIdConnKind::Shared => SQLITE_SHARED_DB_ID, + DbIdConnKind::Single => SYNC_SQLITE_DB_ID, + }); let connection = Self::open_connection(sqlite_file_path); connections.insert(db_id, Arc::clone(&connection)); @@ -149,27 +151,27 @@ impl SqliteConnPool { /// Retrieves a single-user connection from the pool. pub fn run_sql_query(&self, db_id: Option<&str>, f: F) -> R - where - F: FnOnce(MutexGuard) -> R + Send + 'static, - R: Send + 'static, + where + F: FnOnce(MutexGuard) -> R + Send + 'static, + R: Send + 'static, { self.run_sql_query_impl(db_id, DbIdConnKind::Single, f) } /// Retrieves a shared connection from the pool. pub fn run_sql_query_shared(&self, db_id: Option<&str>, f: F) -> R - where - F: FnOnce(MutexGuard) -> R + Send + 'static, - R: Send + 'static, + where + F: FnOnce(MutexGuard) -> R + Send + 'static, + R: Send + 'static, { self.run_sql_query_impl(db_id, DbIdConnKind::Shared, f) } - /// Internal implementation to retrieve or create a connection. + /// Internal run a sql query. fn run_sql_query_impl(&self, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind, f: F) -> R - where - F: FnOnce(MutexGuard) -> R + Send + 'static, - R: Send + 'static, + where + F: FnOnce(MutexGuard) -> R + Send + 'static, + R: Send + 'static, { let db_id_default = match db_id_conn_kind { DbIdConnKind::Shared => hex::encode(self.shared_db_id.as_slice()), @@ -182,19 +184,21 @@ impl SqliteConnPool { let conn = connection.lock().unwrap(); return f(conn); } + drop(connections); + let sqlite_file_path = self.db_dir(&db_id).join(match db_id_conn_kind { + DbIdConnKind::Shared => SQLITE_SHARED_DB_ID, + DbIdConnKind::Single => SYNC_SQLITE_DB_ID, + }); let mut connections = self.connections.write().unwrap(); - let sqlite_file_path = match db_id_conn_kind { - DbIdConnKind::Shared => self.db_dir(&db_id).join(SQLITE_SHARED_DB_ID), - DbIdConnKind::Single => self.db_dir(&db_id).join(SYNC_SQLITE_DB_ID), - }; let connection = Self::open_connection(sqlite_file_path); connections.insert(db_id, Arc::clone(&connection)); - let conn = connection.lock().unwrap(); - f(conn) + + f(connection.try_lock().unwrap()) } /// Opens a database connection based on the database ID and connection kind. + #[cfg(not(test))] fn open_connection(sqlite_file_path: PathBuf) -> Arc> { log_sqlite_file_open_attempt(&sqlite_file_path); Arc::new(Mutex::new( @@ -202,7 +206,13 @@ impl SqliteConnPool { )) } - fn db_dir(&self, db_id: &str) -> PathBuf { path_to_dbdir(Some(&self.db_dir), db_id) } + /// Opens a database connection based on the database ID and connection kind. + #[cfg(test)] + fn open_connection(_sqlite_file_path: PathBuf) -> Arc> { + Arc::new(Mutex::new(Connection::open_in_memory().unwrap())) + } + + fn db_dir(&self, db_id: &str) -> PathBuf { path_to_dbdir(self.db_root.as_deref(), db_id) } } /// A pool for managing async SQLite connections, where each connection is keyed by a unique string identifier. @@ -303,6 +313,7 @@ pub struct DbIds { pub db_id: String, pub shared_db_id: String, } + pub type DbMigrationHandler = Arc>>; pub type DbMigrationSender = Arc>>; From 497a0064ada35b1d7d3b99172441c3b8f80dea31 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 10 Jun 2024 02:57:28 +0100 Subject: [PATCH 141/186] fix clippy --- mm2src/mm2_core/src/sql_connection_pool.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index cc1f2f605d..8a18349fc2 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -1,6 +1,6 @@ use crate::mm_ctx::{log_sqlite_file_open_attempt, path_to_dbdir, MmCtx}; use async_std::sync::RwLock as AsyncRwLock; -use common::log::{error, info}; +use common::log::error; use db_common::async_sql_conn::AsyncConnection; use db_common::sqlite::rusqlite::Connection; use futures::channel::mpsc::{channel, Receiver, Sender}; @@ -151,27 +151,27 @@ impl SqliteConnPool { /// Retrieves a single-user connection from the pool. pub fn run_sql_query(&self, db_id: Option<&str>, f: F) -> R - where - F: FnOnce(MutexGuard) -> R + Send + 'static, - R: Send + 'static, + where + F: FnOnce(MutexGuard) -> R + Send + 'static, + R: Send + 'static, { self.run_sql_query_impl(db_id, DbIdConnKind::Single, f) } /// Retrieves a shared connection from the pool. pub fn run_sql_query_shared(&self, db_id: Option<&str>, f: F) -> R - where - F: FnOnce(MutexGuard) -> R + Send + 'static, - R: Send + 'static, + where + F: FnOnce(MutexGuard) -> R + Send + 'static, + R: Send + 'static, { self.run_sql_query_impl(db_id, DbIdConnKind::Shared, f) } /// Internal run a sql query. fn run_sql_query_impl(&self, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind, f: F) -> R - where - F: FnOnce(MutexGuard) -> R + Send + 'static, - R: Send + 'static, + where + F: FnOnce(MutexGuard) -> R + Send + 'static, + R: Send + 'static, { let db_id_default = match db_id_conn_kind { DbIdConnKind::Shared => hex::encode(self.shared_db_id.as_slice()), From df102643bbe485cbaa422f1873af0a2ff127c9be Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 10 Jun 2024 16:09:28 +0100 Subject: [PATCH 142/186] fix native unit tests and other fixes --- mm2src/coins/eth.rs | 10 +- mm2src/coins/lp_coins.rs | 50 ++- mm2src/coins/qrc20/history.rs | 4 +- mm2src/coins/utxo/utxo_common.rs | 24 +- mm2src/coins/utxo/utxo_common_tests.rs | 13 +- mm2src/mm2_core/src/sql_connection_pool.rs | 5 + .../tests/mm2_tests/mm2_tests_inner.rs | 354 +++++++++--------- mm2src/mm2_test_helpers/src/for_tests.rs | 5 +- 8 files changed, 256 insertions(+), 209 deletions(-) diff --git a/mm2src/coins/eth.rs b/mm2src/coins/eth.rs index bbfff14759..b458db4164 100644 --- a/mm2src/coins/eth.rs +++ b/mm2src/coins/eth.rs @@ -2843,7 +2843,10 @@ impl EthCoin { "blocks_left": saved_traces.earliest_block.as_u64(), })); - let mut existing_history = match self.load_history_from_file(ctx).await { + let mut existing_history = match self + .load_history_from_file(ctx, self.account_db_id().await.as_deref()) + .await + { Ok(history) => history, Err(e) => { ctx.log.log( @@ -3355,7 +3358,10 @@ impl EthCoin { all_events.sort_by(|a, b| b.block_number.unwrap().cmp(&a.block_number.unwrap())); for event in all_events { - let mut existing_history = match self.load_history_from_file(ctx).await { + let mut existing_history = match self + .load_history_from_file(ctx, self.account_db_id().await.as_deref()) + .await + { Ok(history) => history, Err(e) => { ctx.log.log( diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index b0b2c4dabf..defe998a6a 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -92,7 +92,6 @@ cfg_native! { use crate::lightning::ln_conf::PlatformCoinConfirmationTargets; use ::lightning::ln::PaymentHash as LightningPayment; use async_std::fs; - use common::block_on; use futures::{AsyncWriteExt, FutureExt}; use lightning_invoice::{Invoice, ParseOrSemanticError}; use std::io; @@ -3214,34 +3213,36 @@ pub trait MmCoin: /// Path to tx history file #[cfg(not(target_arch = "wasm32"))] - fn tx_history_path(&self, ctx: &MmArc) -> PathBuf { + fn tx_history_path(&self, ctx: &MmArc, db_id: Option<&str>) -> PathBuf { let my_address = self.my_address().unwrap_or_default(); // BCH cash address format has colon after prefix, e.g. bitcoincash: // Colon can't be used in file names on Windows so it should be escaped let my_address = my_address.replace(':', "_"); - let db_id = block_on(self.account_db_id()); - ctx.dbdir(db_id.as_deref()) + ctx.dbdir(db_id) .join("TRANSACTIONS") .join(format!("{}_{}.json", self.ticker(), my_address)) } /// Path to tx history migration file #[cfg(not(target_arch = "wasm32"))] - fn tx_migration_path(&self, ctx: &MmArc) -> PathBuf { + fn tx_migration_path(&self, ctx: &MmArc, db_id: Option<&str>) -> PathBuf { let my_address = self.my_address().unwrap_or_default(); // BCH cash address format has colon after prefix, e.g. bitcoincash: // Colon can't be used in file names on Windows so it should be escaped let my_address = my_address.replace(':', "_"); - let db_id = block_on(self.account_db_id()); - ctx.dbdir(db_id.as_deref()) + ctx.dbdir(db_id) .join("TRANSACTIONS") .join(format!("{}_{}_migration", self.ticker(), my_address)) } /// Loads existing tx history from file, returns empty vector if file is not found /// Cleans the existing file if deserialization fails - async fn load_history_from_file(&self, ctx: &MmArc) -> TxHistoryResult> { - load_history_from_file_impl(self, ctx).await + async fn load_history_from_file( + &self, + ctx: &MmArc, + db_id: Option<&str>, + ) -> TxHistoryResult> { + load_history_from_file_impl(self, ctx, db_id).await } async fn save_history_to_file(&self, ctx: &MmArc, history: Vec) -> TxHistoryResult<()> { @@ -3249,11 +3250,13 @@ pub trait MmCoin: } #[cfg(not(target_arch = "wasm32"))] - fn get_tx_history_migration(&self, ctx: &MmArc) -> TxHistoryFut { get_tx_history_migration_impl(self, ctx) } + fn get_tx_history_migration(&self, ctx: &MmArc, db_id: Option<&str>) -> TxHistoryFut { + get_tx_history_migration_impl(self, ctx, db_id) + } #[cfg(not(target_arch = "wasm32"))] - fn update_migration_file(&self, ctx: &MmArc, migration_number: u64) -> TxHistoryFut<()> { - update_migration_file_impl(self, ctx, migration_number) + fn update_migration_file(&self, ctx: &MmArc, migration_number: u64, db_id: Option<&str>) -> TxHistoryFut<()> { + update_migration_file_impl(self, ctx, migration_number, db_id) } /// Transaction history background sync status @@ -4813,7 +4816,10 @@ pub async fn my_tx_history(ctx: MmArc, req: Json) -> Result>, S Err(err) => return ERR!("!lp_coinfind({}): {}", request.coin, err), }; - let history = try_s!(coin.load_history_from_file(&ctx).await); + let history = try_s!( + coin.load_history_from_file(&ctx, coin.account_db_id().await.as_deref()) + .await + ); let total_records = history.len(); let limit = if request.max { total_records } else { request.limit }; @@ -5135,12 +5141,16 @@ where } #[cfg(not(target_arch = "wasm32"))] -async fn load_history_from_file_impl(coin: &T, ctx: &MmArc) -> TxHistoryResult> +async fn load_history_from_file_impl( + coin: &T, + ctx: &MmArc, + db_id: Option<&str>, +) -> TxHistoryResult> where T: MmCoin + ?Sized, { let ticker = coin.ticker().to_owned(); - let history_path = coin.tx_history_path(ctx); + let history_path = coin.tx_history_path(ctx, db_id); let ctx = ctx.clone(); async move { @@ -5200,11 +5210,11 @@ where } #[cfg(not(target_arch = "wasm32"))] -fn get_tx_history_migration_impl(coin: &T, ctx: &MmArc) -> TxHistoryFut +fn get_tx_history_migration_impl(coin: &T, ctx: &MmArc, db_id: Option<&str>) -> TxHistoryFut where T: MmCoin + MarketCoinOps + ?Sized, { - let migration_path = coin.tx_migration_path(ctx); + let migration_path = coin.tx_migration_path(ctx, db_id); let fut = async move { let current_migration = match fs::read(&migration_path).await { @@ -5227,11 +5237,11 @@ where } #[cfg(not(target_arch = "wasm32"))] -fn update_migration_file_impl(coin: &T, ctx: &MmArc, migration_number: u64) -> TxHistoryFut<()> +fn update_migration_file_impl(coin: &T, ctx: &MmArc, migration_number: u64, db_id: Option<&str>) -> TxHistoryFut<()> where T: MmCoin + MarketCoinOps + ?Sized, { - let migration_path = coin.tx_migration_path(ctx); + let migration_path = coin.tx_migration_path(ctx, db_id); let tmp_file = format!("{}.tmp", migration_path.display()); let fut = async move { @@ -5263,7 +5273,7 @@ async fn save_history_to_file_impl( where T: MmCoin + MarketCoinOps + ?Sized, { - let history_path = coin.tx_history_path(ctx); + let history_path = coin.tx_history_path(ctx, coin.account_db_id().await.as_deref()); let tmp_file = format!("{}.tmp", history_path.display()); history.sort_unstable_by(compare_transaction_details); diff --git a/mm2src/coins/qrc20/history.rs b/mm2src/coins/qrc20/history.rs index f4d00d6b9e..652610aa44 100644 --- a/mm2src/coins/qrc20/history.rs +++ b/mm2src/coins/qrc20/history.rs @@ -541,7 +541,9 @@ impl Qrc20Coin { } async fn try_load_history_from_file(&self, ctx: &MmArc) -> TxHistoryResult { - let history = self.load_history_from_file(ctx).await?; + let history = self + .load_history_from_file(ctx, self.account_db_id().await.as_deref()) + .await?; let mut history_map: HistoryMapByHash = HashMap::default(); for tx in history { diff --git a/mm2src/coins/utxo/utxo_common.rs b/mm2src/coins/utxo/utxo_common.rs index c03be79720..42d0857ea1 100644 --- a/mm2src/coins/utxo/utxo_common.rs +++ b/mm2src/coins/utxo/utxo_common.rs @@ -3124,7 +3124,10 @@ where T: UtxoStandardOps + UtxoCommonOps + MmCoin + MarketCoinOps, { const MIGRATION_NUMBER: u64 = 1; - let history = match coin.load_history_from_file(ctx).await { + let history = match coin + .load_history_from_file(ctx, coin.account_db_id().await.as_deref()) + .await + { Ok(history) => history, Err(e) => { log_tag!( @@ -3166,7 +3169,11 @@ where return; }; } - if let Err(e) = coin.update_migration_file(ctx, MIGRATION_NUMBER).compat().await { + if let Err(e) = coin + .update_migration_file(ctx, MIGRATION_NUMBER, coin.account_db_id().await.as_deref()) + .compat() + .await + { log_tag!( ctx, "", @@ -3182,7 +3189,11 @@ async fn migrate_tx_history(coin: &T, ctx: &MmArc) where T: UtxoStandardOps + UtxoCommonOps + MmCoin + MarketCoinOps, { - let current_migration = coin.get_tx_history_migration(ctx).compat().await.unwrap_or(0); + let current_migration = coin + .get_tx_history_migration(ctx, coin.account_db_id().await.as_deref()) + .compat() + .await + .unwrap_or(0); if current_migration < 1 { tx_history_migration_1(coin, ctx).await; } @@ -3197,7 +3208,10 @@ where migrate_tx_history(&coin, &ctx).await; let mut my_balance: Option = None; - let history = match coin.load_history_from_file(&ctx).await { + let history = match coin + .load_history_from_file(&ctx, coin.account_db_id().await.as_deref()) + .await + { Ok(history) => history, Err(e) => { log_tag!( @@ -5133,7 +5147,7 @@ where { if let DerivationMethod::HDWallet(hd_wallet) = coin.derivation_method() { if let Some(addr) = hd_wallet.get_enabled_address().await { - return Some(hex::encode(addr.address().hash().to_vec())); + return Some(hex::encode(addr.pubkey().address_hash().as_slice())); } } diff --git a/mm2src/coins/utxo/utxo_common_tests.rs b/mm2src/coins/utxo/utxo_common_tests.rs index c558803425..7b06b88d15 100644 --- a/mm2src/coins/utxo/utxo_common_tests.rs +++ b/mm2src/coins/utxo/utxo_common_tests.rs @@ -285,8 +285,15 @@ pub(super) async fn test_hd_utxo_tx_history_impl(rpc_client: ElectrumClient) { }); let coin = utxo_coin_from_fields(fields); + #[cfg(not(target_arch = "wasm32"))] + { + let dbs = ctx.sqlite_conn_pool.as_option().unwrap(); + dbs.add_test_db("b591d089ee36906f96172761c78556f2f75953aa".to_string()); + } let current_balances = coin.my_addresses_balances().await.unwrap(); - let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, Some("b591d089ee36906f96172761c78556f2f75953aa".to_string())) + .build() + .unwrap(); spawn(utxo_history_loop( coin.clone(), storage, @@ -311,7 +318,9 @@ pub(super) async fn test_hd_utxo_tx_history_impl(rpc_client: ElectrumClient) { _ => unimplemented!(), } - let storage = TxHistoryStorageBuilder::new(&ctx, None).build().unwrap(); + let storage = TxHistoryStorageBuilder::new(&ctx, coin.account_db_id().await) + .build() + .unwrap(); spawn(utxo_history_loop( coin.clone(), storage, diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 8a18349fc2..3091b9d6b1 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -197,6 +197,11 @@ impl SqliteConnPool { f(connection.try_lock().unwrap()) } + pub fn add_test_db(&self, db_id: String) { + let mut connections = self.connections.write().unwrap(); + connections.insert(db_id, Arc::new(Mutex::new(Connection::open_in_memory().unwrap()))); + } + /// Opens a database connection based on the database ID and connection kind. #[cfg(not(test))] fn open_connection(sqlite_file_path: PathBuf) -> Arc> { diff --git a/mm2src/mm2_main/tests/mm2_tests/mm2_tests_inner.rs b/mm2src/mm2_main/tests/mm2_tests/mm2_tests_inner.rs index ef80bc6542..2015abc7e5 100644 --- a/mm2src/mm2_main/tests/mm2_tests/mm2_tests_inner.rs +++ b/mm2src/mm2_main/tests/mm2_tests/mm2_tests_inner.rs @@ -53,7 +53,7 @@ cfg_wasm32! { fn test_rpc() { let (_, mm, _dump_log, _dump_dashboard) = mm_spat(); - let no_method = block_on(mm.rpc(&json! ({ + let no_method = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "coin": "RICK", "ipaddr": "electrum1.cipig.net", @@ -67,7 +67,7 @@ fn test_rpc() { assert!(not_json.0.is_server_error()); assert_eq!((not_json.2)[ACCESS_CONTROL_ALLOW_ORIGIN], "http://localhost:4000"); - let unknown_method = block_on(mm.rpc(&json! ({ + let unknown_method = block_on(mm.rpc(&json!({ "method": "unknown_method", }))) .unwrap(); @@ -75,7 +75,7 @@ fn test_rpc() { assert!(unknown_method.0.is_server_error()); assert_eq!((unknown_method.2)[ACCESS_CONTROL_ALLOW_ORIGIN], "http://localhost:4000"); - let version = block_on(mm.rpc(&json! ({ + let version = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "version", }))) @@ -84,7 +84,7 @@ fn test_rpc() { assert_eq!((version.2)[ACCESS_CONTROL_ALLOW_ORIGIN], "http://localhost:4000"); let _version: MmVersionResponse = json::from_str(&version.1).unwrap(); - let help = block_on(mm.rpc(&json! ({ + let help = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "help", }))) @@ -108,7 +108,7 @@ fn orders_of_banned_pubkeys_should_not_be_displayed() { // start bob and immediately place the order let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), @@ -132,7 +132,7 @@ fn orders_of_banned_pubkeys_should_not_be_displayed() { ); // issue sell request on Bob side by setting base/rel price log!("Issue bob sell request"); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "setprice", "base": "RICK", @@ -144,7 +144,7 @@ fn orders_of_banned_pubkeys_should_not_be_displayed() { assert!(rc.0.is_success(), "!setprice: {}", rc.1); let mut mm_alice = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("ALICE_TRADE_IP") .ok(), @@ -163,7 +163,7 @@ fn orders_of_banned_pubkeys_should_not_be_displayed() { log!("Alice log path: {}", mm_alice.log_path.display()); log!("Ban Bob pubkey on Alice side"); - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "ban_pubkey", "pubkey": "2cd3021a2197361fb70b862c412bc8e44cff6951fa1de45ceabfdd9b4c520420", @@ -173,7 +173,7 @@ fn orders_of_banned_pubkeys_should_not_be_displayed() { assert!(rc.0.is_success(), "!ban_pubkey: {}", rc.1); log!("Get RICK/MORTY orderbook on Alice side"); - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "orderbook", "base": "RICK", @@ -213,7 +213,7 @@ fn test_my_balance() { ]); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), @@ -233,7 +233,7 @@ fn test_my_balance() { let json = block_on(enable_electrum(&mm, "RICK", false, DOC_ELECTRUM_ADDRS)); assert_eq!(json.balance, "7.777".parse().unwrap()); - let my_balance = block_on(mm.rpc(&json! ({ + let my_balance = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "my_balance", "coin": "RICK", @@ -297,7 +297,7 @@ fn test_p2wpkh_my_balance() { #[cfg(not(target_arch = "wasm32"))] fn check_set_price_fails(mm: &MarketMakerIt, base: &str, rel: &str) { - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "setprice", "base": base, @@ -315,7 +315,7 @@ fn check_set_price_fails(mm: &MarketMakerIt, base: &str, rel: &str) { #[cfg(not(target_arch = "wasm32"))] fn check_buy_fails(mm: &MarketMakerIt, base: &str, rel: &str, vol: f64) { - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "buy", "base": base, @@ -329,7 +329,7 @@ fn check_buy_fails(mm: &MarketMakerIt, base: &str, rel: &str, vol: f64) { #[cfg(not(target_arch = "wasm32"))] fn check_sell_fails(mm: &MarketMakerIt, base: &str, rel: &str, vol: f64) { - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "sell", "base": base, @@ -353,7 +353,7 @@ fn test_check_balance_on_order_post() { // start bob and immediately place the order let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), @@ -415,7 +415,7 @@ fn test_rpc_password_from_json() { // do not allow empty password let mut err_mm1 = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "passphrase": "bob passphrase", @@ -432,7 +432,7 @@ fn test_rpc_password_from_json() { // do not allow empty password let mut err_mm2 = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "passphrase": "bob passphrase", @@ -448,7 +448,7 @@ fn test_rpc_password_from_json() { block_on(err_mm2.wait_for_log(5., |log| log.contains("rpc_password must be string"))).unwrap(); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "passphrase": "bob passphrase", @@ -462,7 +462,7 @@ fn test_rpc_password_from_json() { .unwrap(); let (_dump_log, _dump_dashboard) = mm.mm_dump(); log!("Log path: {}", mm.log_path.display()); - let electrum_invalid = block_on(mm.rpc(&json! ({ + let electrum_invalid = block_on(mm.rpc(&json!({ "userpass": "password1", "method": "electrum", "coin": "RICK", @@ -497,7 +497,7 @@ fn test_rpc_password_from_json() { electrum.1 ); - let electrum = block_on(mm.rpc(&json! ({ + let electrum = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "electrum", "coin": "MORTY", @@ -515,7 +515,7 @@ fn test_rpc_password_from_json() { electrum.1 ); - let orderbook = block_on(mm.rpc(&json! ({ + let orderbook = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "orderbook", "base": "RICK", @@ -542,7 +542,7 @@ fn test_mmrpc_v2() { ]); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "passphrase": "bob passphrase", @@ -560,7 +560,7 @@ fn test_mmrpc_v2() { let _electrum = block_on(enable_electrum(&mm, "RICK", false, DOC_ELECTRUM_ADDRS)); // no `userpass` - let withdraw = block_on(mm.rpc(&json! ({ + let withdraw = block_on(mm.rpc(&json!({ "mmrpc": "2.0", "method": "withdraw", "params": { @@ -580,7 +580,7 @@ fn test_mmrpc_v2() { assert!(withdraw_error.error_data.is_none()); // invalid `userpass` - let withdraw = block_on(mm.rpc(&json! ({ + let withdraw = block_on(mm.rpc(&json!({ "mmrpc": "2.0", "userpass": "another password", "method": "withdraw", @@ -601,7 +601,7 @@ fn test_mmrpc_v2() { assert!(withdraw_error.error_data.is_some()); // invalid `mmrpc` version - let withdraw = block_on(mm.rpc(&json! ({ + let withdraw = block_on(mm.rpc(&json!({ "mmrpc": "1.0", "userpass": mm.userpass, "method": "withdraw", @@ -622,7 +622,7 @@ fn test_mmrpc_v2() { assert_eq!(withdraw_error.error_type, "InvalidMmRpcVersion"); // 'id' = 3 - let withdraw = block_on(mm.rpc(&json! ({ + let withdraw = block_on(mm.rpc(&json!({ "mmrpc": "2.0", "userpass": mm.userpass, "method": "withdraw", @@ -648,7 +648,7 @@ fn test_rpc_password_from_json_no_userpass() { ]); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "passphrase": "bob passphrase", @@ -661,7 +661,7 @@ fn test_rpc_password_from_json_no_userpass() { .unwrap(); let (_dump_log, _dump_dashboard) = mm.mm_dump(); log!("Log path: {}", mm.log_path.display()); - let electrum = block_on(mm.rpc(&json! ({ + let electrum = block_on(mm.rpc(&json!({ "method": "electrum", "coin": "RICK", "urls": ["electrum2.cipig.net:10017"], @@ -807,7 +807,7 @@ async fn trade_base_rel_electrum( for (base, rel) in pairs.iter() { log!("Get {}/{} orderbook", base, rel); let rc = mm_bob - .rpc(&json! ({ + .rpc(&json!({ "userpass": mm_bob.userpass, "method": "orderbook", "base": base, @@ -873,7 +873,7 @@ fn withdraw_and_send( use std::ops::Sub; let from = from.map(WithdrawFrom::AddressId); - let withdraw = block_on(mm.rpc(&json! ({ + let withdraw = block_on(mm.rpc(&json!({ "mmrpc": "2.0", "userpass": mm.userpass, "method": "withdraw", @@ -910,7 +910,7 @@ fn withdraw_and_send( assert_eq!(tx_details.from, vec![from_str]); } - let send = block_on(mm.rpc(&json! ({ + let send = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "send_raw_transaction", "coin": coin, @@ -927,14 +927,14 @@ fn withdraw_and_send( fn test_withdraw_and_send() { let alice_passphrase = get_passphrase!(".env.client", "ALICE_PASSPHRASE").unwrap(); - let coins = json! ([ + let coins = json!([ {"coin":"RICK","asset":"RICK","rpcport":8923,"txversion":4,"overwintered":1,"txfee":1000,"protocol":{"type":"UTXO"}}, {"coin":"MORTY","asset":"MORTY","rpcport":8923,"txversion":4,"overwintered":1,"txfee":1000,"protocol":{"type":"UTXO"}}, {"coin":"MORTY_SEGWIT","asset":"MORTY_SEGWIT","txversion":4,"overwintered":1,"segwit":true,"txfee":1000,"protocol":{"type":"UTXO"}}, ]); let mm_alice = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 8100, "myipaddr": env::var ("ALICE_TRADE_IP") .ok(), @@ -971,7 +971,7 @@ fn test_withdraw_and_send() { ); // allow to withdraw non-Segwit coin to P2SH addresses - let withdraw = block_on(mm_alice.rpc(&json! ({ + let withdraw = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "mmrpc": "2.0", "method": "withdraw", @@ -986,7 +986,7 @@ fn test_withdraw_and_send() { assert!(withdraw.0.is_success(), "MORTY withdraw: {}", withdraw.1); // allow to withdraw to P2SH addresses if Segwit flag is true - let withdraw = block_on(mm_alice.rpc(&json! ({ + let withdraw = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "mmrpc": "2.0", "method": "withdraw", @@ -1003,7 +1003,7 @@ fn test_withdraw_and_send() { // must not allow to withdraw too small amount 0.000005 (less than 0.00001 dust) let small_amount = MmNumber::from("0.000005").to_decimal(); - let withdraw = block_on(mm_alice.rpc(&json! ({ + let withdraw = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "mmrpc": "2.0", "method": "withdraw", @@ -1250,10 +1250,10 @@ fn test_withdraw_segwit() { #[test] #[cfg(not(target_arch = "wasm32"))] fn test_swap_status() { - let coins = json! ([{"coin":"RICK","asset":"RICK"},]); + let coins = json!([{"coin":"RICK","asset":"RICK"},]); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 8100, "myipaddr": env::var ("ALICE_TRADE_IP") .ok(), @@ -1268,7 +1268,7 @@ fn test_swap_status() { ) .unwrap(); - let my_swap = block_on(mm.rpc(&json! ({ + let my_swap = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "my_swap_status", "params": { @@ -1279,7 +1279,7 @@ fn test_swap_status() { assert!(my_swap.0.is_server_error(), "!not found status code: {}", my_swap.1); - let stats_swap = block_on(mm.rpc(&json! ({ + let stats_swap = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "stats_swap_status", "params": { @@ -1305,7 +1305,7 @@ fn test_order_errors_when_base_equal_rel() { ]); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), @@ -1324,7 +1324,7 @@ fn test_order_errors_when_base_equal_rel() { log!("Log path: {}", mm.log_path.display()); block_on(enable_electrum(&mm, "RICK", false, DOC_ELECTRUM_ADDRS)); - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "setprice", "base": "RICK", @@ -1334,7 +1334,7 @@ fn test_order_errors_when_base_equal_rel() { .unwrap(); assert!(rc.0.is_server_error(), "setprice should have failed, but got {:?}", rc); - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "buy", "base": "RICK", @@ -1345,7 +1345,7 @@ fn test_order_errors_when_base_equal_rel() { .unwrap(); assert!(rc.0.is_server_error(), "buy should have failed, but got {:?}", rc); - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "sell", "base": "RICK", @@ -1364,7 +1364,7 @@ fn startup_passphrase(passphrase: &str, expected_address: &str) { ]); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), @@ -1430,7 +1430,7 @@ fn test_cancel_order() { // start bob and immediately place the order let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "dht": "on", // Enable DHT without delay. @@ -1456,7 +1456,7 @@ fn test_cancel_order() { ); log!("Issue sell request on Bob side by setting base/rel price…"); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "setprice", "base": "RICK", @@ -1470,7 +1470,7 @@ fn test_cancel_order() { log!("{:?}", setprice_json); let mm_alice = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "dht": "on", // Enable DHT without delay. @@ -1497,7 +1497,7 @@ fn test_cancel_order() { ); log!("Get RICK/MORTY orderbook on Alice side"); - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "orderbook", "base": "RICK", @@ -1514,7 +1514,7 @@ fn test_cancel_order() { "Alice RICK/MORTY orderbook must have exactly 1 ask" ); - let cancel_rc = block_on(mm_bob.rpc(&json! ({ + let cancel_rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "cancel_order", "uuid": setprice_json["result"]["uuid"], @@ -1535,7 +1535,7 @@ fn test_cancel_order() { // Bob orderbook must show no orders log!("Get RICK/MORTY orderbook on Bob side"); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "orderbook", "base": "RICK", @@ -1550,7 +1550,7 @@ fn test_cancel_order() { // Alice orderbook must show no orders log!("Get RICK/MORTY orderbook on Alice side"); - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "orderbook", "base": "RICK", @@ -1575,7 +1575,7 @@ fn test_cancel_all_orders() { let bob_passphrase = "bob passphrase"; // start bob and immediately place the order let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "dht": "on", // Enable DHT without delay. @@ -1600,7 +1600,7 @@ fn test_cancel_all_orders() { ); log!("Issue sell request on Bob side by setting base/rel price…"); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "setprice", "base": "RICK", @@ -1614,7 +1614,7 @@ fn test_cancel_all_orders() { log!("{:?}", setprice_json); let mm_alice = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "dht": "on", // Enable DHT without delay. @@ -1643,7 +1643,7 @@ fn test_cancel_all_orders() { thread::sleep(Duration::from_secs(3)); log!("Get RICK/MORTY orderbook on Alice side"); - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "orderbook", "base": "RICK", @@ -1657,7 +1657,7 @@ fn test_cancel_all_orders() { let asks = alice_orderbook["asks"].as_array().unwrap(); assert_eq!(asks.len(), 1, "Alice RICK/MORTY orderbook must have exactly 1 ask"); - let cancel_rc = block_on(mm_bob.rpc(&json! ({ + let cancel_rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "cancel_all_orders", "cancel_by": { @@ -1680,7 +1680,7 @@ fn test_cancel_all_orders() { // Bob orderbook must show no orders log!("Get RICK/MORTY orderbook on Bob side"); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "orderbook", "base": "RICK", @@ -1696,7 +1696,7 @@ fn test_cancel_all_orders() { // Alice orderbook must show no orders log!("Get RICK/MORTY orderbook on Alice side"); - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "orderbook", "base": "RICK", @@ -1723,7 +1723,7 @@ fn test_electrum_enable_conn_errors() { ]); let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "dht": "on", // Enable DHT without delay. @@ -1769,7 +1769,7 @@ fn test_order_should_not_be_displayed_when_node_is_down() { // start bob and immediately place the order let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "dht": "on", // Enable DHT without delay. @@ -1795,7 +1795,7 @@ fn test_order_should_not_be_displayed_when_node_is_down() { log!("Bob enable MORTY {:?}", electrum_morty); let mm_alice = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("ALICE_TRADE_IP") .ok(), @@ -1822,7 +1822,7 @@ fn test_order_should_not_be_displayed_when_node_is_down() { // issue sell request on Bob side by setting base/rel price log!("Issue bob sell request"); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "setprice", "base": "RICK", @@ -1836,7 +1836,7 @@ fn test_order_should_not_be_displayed_when_node_is_down() { thread::sleep(Duration::from_secs(2)); log!("Get RICK/MORTY orderbook on Alice side"); - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "orderbook", "base": "RICK", @@ -1853,7 +1853,7 @@ fn test_order_should_not_be_displayed_when_node_is_down() { block_on(mm_bob.stop()).unwrap(); thread::sleep(Duration::from_secs(6)); - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "orderbook", "base": "RICK", @@ -1880,7 +1880,7 @@ fn test_own_orders_should_not_be_removed_from_orderbook() { // start bob and immediately place the order let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "dht": "on", // Enable DHT without delay. @@ -1908,7 +1908,7 @@ fn test_own_orders_should_not_be_removed_from_orderbook() { // issue sell request on Bob side by setting base/rel price log!("Issue bob sell request"); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "setprice", "base": "RICK", @@ -1921,7 +1921,7 @@ fn test_own_orders_should_not_be_removed_from_orderbook() { thread::sleep(Duration::from_secs(6)); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "orderbook", "base": "RICK", @@ -1940,7 +1940,7 @@ fn test_own_orders_should_not_be_removed_from_orderbook() { #[cfg(not(target_arch = "wasm32"))] fn check_priv_key(mm: &MarketMakerIt, coin: &str, expected_priv_key: &str) { - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "show_priv_key", "coin": coin @@ -1958,7 +1958,7 @@ fn test_show_priv_key() { let coins = json!([rick_conf(), morty_conf(), eth_dev_conf()]); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), @@ -1992,13 +1992,13 @@ fn test_show_priv_key() { #[test] #[cfg(not(target_arch = "wasm32"))] fn test_electrum_and_enable_response() { - let coins = json! ([ + let coins = json!([ {"coin":"RICK","asset":"RICK","rpcport":8923,"txversion":4,"overwintered":1,"protocol":{"type":"UTXO"},"mature_confirmations":101}, eth_dev_conf(), ]); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), @@ -2041,7 +2041,7 @@ fn test_electrum_and_enable_response() { assert_eq!(rick_response["mature_confirmations"], Json::from(101)); // should change requires notarization at runtime - let requires_nota_rick = block_on(mm.rpc(&json! ({ + let requires_nota_rick = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "set_requires_notarization", "coin": "RICK", @@ -2062,7 +2062,7 @@ fn test_electrum_and_enable_response() { Json::from(false) ); - let enable_eth = block_on(mm.rpc(&json! ({ + let enable_eth = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "enable", "coin": "ETH", @@ -2100,7 +2100,7 @@ fn set_price_with_cancel_previous_should_broadcast_cancelled_message() { // start bob and immediately place the order let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "dht": "on", // Enable DHT without delay. @@ -2124,7 +2124,7 @@ fn set_price_with_cancel_previous_should_broadcast_cancelled_message() { block_on(enable_coins_rick_morty_electrum(&mm_bob)) ); - let set_price_json = json! ({ + let set_price_json = json!({ "userpass": mm_bob.userpass, "method": "setprice", "base": "RICK", @@ -2137,7 +2137,7 @@ fn set_price_with_cancel_previous_should_broadcast_cancelled_message() { assert!(rc.0.is_success(), "!setprice: {}", rc.1); let mm_alice = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "dht": "on", // Enable DHT without delay. @@ -2163,7 +2163,7 @@ fn set_price_with_cancel_previous_should_broadcast_cancelled_message() { ); log!("Get RICK/MORTY orderbook on Alice side"); - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "orderbook", "base": "RICK", @@ -2187,7 +2187,7 @@ fn set_price_with_cancel_previous_should_broadcast_cancelled_message() { // Bob orderbook must show 1 order log!("Get RICK/MORTY orderbook on Bob side"); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "orderbook", "base": "RICK", @@ -2203,7 +2203,7 @@ fn set_price_with_cancel_previous_should_broadcast_cancelled_message() { // Alice orderbook must have 1 order log!("Get RICK/MORTY orderbook on Alice side"); - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "orderbook", "base": "RICK", @@ -2225,7 +2225,7 @@ fn test_batch_requests() { // start bob and immediately place the order let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "dht": "on", // Enable DHT without delay. @@ -2302,7 +2302,7 @@ fn test_metrics_method() { ]); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), @@ -2352,7 +2352,7 @@ fn test_electrum_tx_history() { ]); let mut mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), @@ -2410,7 +2410,7 @@ fn spin_n_nodes(seednodes: &[&str], coins: &Json, n: usize) -> Vec<(MarketMakerI let mut mm_nodes = Vec::with_capacity(n); for i in 0..n { let mut mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("ALICE_TRADE_IP") .ok(), @@ -2444,7 +2444,7 @@ fn test_convert_utxo_address() { ]); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), @@ -2464,7 +2464,7 @@ fn test_convert_utxo_address() { let _electrum = block_on(enable_electrum(&mm, "BCH", false, T_BCH_ELECTRUMS)); // test standard to cashaddress - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "BCH", @@ -2488,7 +2488,7 @@ fn test_convert_utxo_address() { assert_eq!(actual, expected); // test cashaddress to standard - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "BCH", @@ -2512,7 +2512,7 @@ fn test_convert_utxo_address() { assert_eq!(actual, expected); // test standard to standard - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "BCH", @@ -2536,7 +2536,7 @@ fn test_convert_utxo_address() { assert_eq!(actual, expected); // test invalid address - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "BCH", @@ -2566,7 +2566,7 @@ fn test_convert_segwit_address() { let _electrum = block_on(enable_electrum(&mm, "tBTC-Segwit", false, TBTC_ELECTRUMS)); // test standard to segwit - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "tBTC-Segwit", @@ -2590,7 +2590,7 @@ fn test_convert_segwit_address() { assert_eq!(actual, expected); // test segwit to standard - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "tBTC-Segwit", @@ -2614,7 +2614,7 @@ fn test_convert_segwit_address() { assert_eq!(actual, expected); // test invalid tBTC standard address - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "tBTC-Segwit", @@ -2630,7 +2630,7 @@ fn test_convert_segwit_address() { assert!(rc.1.contains("invalid address prefix")); // test invalid tBTC segwit address - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "tBTC-Segwit", @@ -2653,7 +2653,7 @@ fn test_convert_eth_address() { // start mm and immediately place the order let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "dht": "on", // Enable DHT without delay. @@ -2675,7 +2675,7 @@ fn test_convert_eth_address() { block_on(enable_native(&mm, "ETH", ETH_SEPOLIA_NODES, None)); // test single-case to mixed-case - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "ETH", @@ -2699,7 +2699,7 @@ fn test_convert_eth_address() { assert_eq!(actual, expected); // test mixed-case to mixed-case (expect error) - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "ETH", @@ -2723,7 +2723,7 @@ fn test_convert_eth_address() { assert_eq!(actual, expected); // test invalid address - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "ETH", @@ -2948,13 +2948,13 @@ fn test_get_staking_infos_qtum() { #[cfg(not(target_arch = "wasm32"))] fn test_convert_qrc20_address() { let passphrase = "cV463HpebE2djP9ugJry5wZ9st5cc6AbkHXGryZVPXMH1XJK8cVU"; - let coins = json! ([ + let coins = json!([ {"coin":"QRC20","required_confirmations":0,"pubtype": 120,"p2shtype": 50,"wiftype": 128,"txfee": 0,"mm2": 1,"mature_confirmations":2000, "protocol":{"type":"QRC20","protocol_data":{"platform":"QTUM","contract_address":"0xd362e096e873eb7907e205fadc6175c6fec7bc44"}}}, ]); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 8999, "dht": "on", // Enable DHT without delay. @@ -2986,7 +2986,7 @@ fn test_convert_qrc20_address() { )); // test wallet to contract - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "QRC20", @@ -3010,7 +3010,7 @@ fn test_convert_qrc20_address() { assert_eq!(actual, expected); // test contract to wallet - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "QRC20", @@ -3034,7 +3034,7 @@ fn test_convert_qrc20_address() { assert_eq!(actual, expected); // test wallet to wallet - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "QRC20", @@ -3058,7 +3058,7 @@ fn test_convert_qrc20_address() { assert_eq!(actual, expected); // test invalid address (invalid prefixes) - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "QRC20", @@ -3075,7 +3075,7 @@ fn test_convert_qrc20_address() { assert!(rc.1.contains("invalid address prefix")); // test invalid address - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "convertaddress", "coin": "QRC20", @@ -3147,7 +3147,7 @@ fn test_validateaddress() { // test valid ETH address - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "validateaddress", "coin": "ETH", @@ -3171,7 +3171,7 @@ fn test_validateaddress() { // test invalid RICK address (legacy address format activated) - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "validateaddress", "coin": "RICK", @@ -3194,7 +3194,7 @@ fn test_validateaddress() { // test invalid RICK address (invalid prefixes) - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "validateaddress", "coin": "RICK", @@ -3218,7 +3218,7 @@ fn test_validateaddress() { // test invalid ETH address - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "validateaddress", "coin": "ETH", @@ -3257,7 +3257,7 @@ fn test_validateaddress_segwit() { log!("enable_coins (alice): {:?}", electrum); // test valid Segwit address - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "validateaddress", "coin": "tBTC-Segwit", @@ -3280,7 +3280,7 @@ fn test_validateaddress_segwit() { assert_eq!(actual, expected); // test invalid tBTC Segwit address (invalid hrp) - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "validateaddress", "coin": "tBTC-Segwit", @@ -3309,13 +3309,13 @@ fn test_validateaddress_segwit() { #[cfg(not(target_arch = "wasm32"))] fn qrc20_activate_electrum() { let passphrase = "cV463HpebE2djP9ugJry5wZ9st5cc6AbkHXGryZVPXMH1XJK8cVU"; - let coins = json! ([ + let coins = json!([ {"coin":"QRC20","required_confirmations":0,"pubtype": 120,"p2shtype": 50,"wiftype": 128,"txfee": 0,"mm2": 1,"mature_confirmations":2000, "protocol":{"type":"QRC20","protocol_data":{"platform":"QTUM","contract_address":"0xd362e096e873eb7907e205fadc6175c6fec7bc44"}}}, ]); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 8999, "dht": "on", // Enable DHT without delay. @@ -3363,7 +3363,7 @@ fn test_qrc20_withdraw() { ]); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 8999, "dht": "on", // Enable DHT without delay. @@ -3404,7 +3404,7 @@ fn test_qrc20_withdraw() { let amount = 10; - let withdraw = block_on(mm.rpc(&json! ({ + let withdraw = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "withdraw", "coin": "QRC20", @@ -3424,7 +3424,7 @@ fn test_qrc20_withdraw() { log!("{}", withdraw_json); assert!(withdraw_json["tx_hex"].as_str().unwrap().contains("5403a02526012844a9059cbb0000000000000000000000000240b898276ad2cc0d2fe6f527e8e31104e7fde3000000000000000000000000000000000000000000000000000000003b9aca0014d362e096e873eb7907e205fadc6175c6fec7bc44c2")); - let send_tx = block_on(mm.rpc(&json! ({ + let send_tx = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "send_raw_transaction", "coin": "QRC20", @@ -3445,7 +3445,7 @@ fn test_qrc20_withdraw_error() { ]); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 8999, "dht": "on", // Enable DHT without delay. @@ -3479,7 +3479,7 @@ fn test_qrc20_withdraw_error() { assert_eq!(balance, "10"); // try to transfer more than balance - let withdraw = block_on(mm.rpc(&json! ({ + let withdraw = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "withdraw", "coin": "QRC20", @@ -3498,7 +3498,7 @@ fn test_qrc20_withdraw_error() { .contains("Not enough QRC20 to withdraw: available 10, required at least 11")); // try to transfer with zero QTUM balance - let withdraw = block_on(mm.rpc(&json! ({ + let withdraw = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "withdraw", "coin": "QRC20", @@ -3526,12 +3526,12 @@ fn test_qrc20_withdraw_error() { #[test] #[cfg(not(target_arch = "wasm32"))] fn test_get_raw_transaction() { - let coins = json! ([ + let coins = json!([ {"coin":"RICK","asset":"RICK","required_confirmations":0,"txversion":4,"overwintered":1,"protocol":{"type":"UTXO"}}, {"coin":"ETH","name":"ethereum","chain_id":1,"protocol":{"type":"ETH"}}, ]); let mm = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "passphrase": "boob", @@ -3548,7 +3548,7 @@ fn test_get_raw_transaction() { log!("log path: {}", mm.log_path.display()); // RICK let _electrum = block_on(enable_electrum(&mm, "RICK", false, DOC_ELECTRUM_ADDRS)); - let raw = block_on(mm.rpc(&json! ({ + let raw = block_on(mm.rpc(&json!({ "mmrpc": "2.0", "userpass": mm.userpass, "method": "get_raw_transaction", @@ -3566,7 +3566,7 @@ fn test_get_raw_transaction() { assert_eq!(res.result.tx_hex, expected_hex); // ETH - let eth = block_on(mm.rpc(&json! ({ + let eth = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "enable", "coin": "ETH", @@ -3577,7 +3577,7 @@ fn test_get_raw_transaction() { }))) .unwrap(); assert_eq!(eth.0, StatusCode::OK, "'enable' failed: {}", eth.1); - let raw = block_on(mm.rpc(&json! ({ + let raw = block_on(mm.rpc(&json!({ "mmrpc": "2.0", "userpass": mm.userpass, "method": "get_raw_transaction", @@ -3594,7 +3594,7 @@ fn test_get_raw_transaction() { json::from_str(&raw.1).expect("Expected 'RpcSuccessResponse'"); let expected_hex = "f9012a19851a0de19041830249f09424abe4c71fc658c91313b6552cd40cd808b3ea8080b8c49b415b2a167d3413b0116abb8e99f4c2d4cd39a64df9bc9950006c4ae884527258247dc100000000000000000000000000000000000000000000000006f05b59d3b200000000000000000000000000000d8775f648430679a709e98d2b0cb6250d2887ef0000000000000000000000000112679fc5e6338a52098ab095bee1e9a15bc630ba9528127bcff524677236f3739cef013311f42000000000000000000000000000000000000000000000000000000000000000000000000000000000619626fa25a0b143893550c8d0164278f94d5fa51ba71e3dfefa112e6f53a575bcb494633a07a00cc60b65e44ae5053257b91c1023b637a38d87ffc32c822591275a6283cd6ec5"; assert_eq!(res.result.tx_hex, expected_hex); - let raw = block_on(mm.rpc(&json! ({ + let raw = block_on(mm.rpc(&json!({ "mmrpc": "2.0", "userpass": mm.userpass, "method": "get_raw_transaction", @@ -3614,7 +3614,7 @@ fn test_get_raw_transaction() { // invalid coin let zombi_coin = String::from("ZOMBI"); - let raw = block_on(mm.rpc(&json! ({ + let raw = block_on(mm.rpc(&json!({ "mmrpc": "2.0", "userpass": mm.userpass, "method": "get_raw_transaction", @@ -3636,7 +3636,7 @@ fn test_get_raw_transaction() { assert_eq!(error.error_data, Some(expected_error)); // empty hash - let raw = block_on(mm.rpc(&json! ({ + let raw = block_on(mm.rpc(&json!({ "mmrpc": "2.0", "userpass": mm.userpass, "method": "get_raw_transaction", @@ -3655,7 +3655,7 @@ fn test_get_raw_transaction() { let error: RpcErrorResponse = json::from_str(&raw.1).unwrap(); assert_eq!(error.error_type, "InvalidHashError"); // invalid hash - let raw = block_on(mm.rpc(&json! ({ + let raw = block_on(mm.rpc(&json!({ "mmrpc": "2.0", "userpass": mm.userpass, "method": "get_raw_transaction", @@ -3675,7 +3675,7 @@ fn test_get_raw_transaction() { assert_eq!(error.error_type, "InvalidHashError"); // valid hash but hash not exist - let raw = block_on(mm.rpc(&json! ({ + let raw = block_on(mm.rpc(&json!({ "mmrpc": "2.0", "userpass": mm.userpass, "method": "get_raw_transaction", @@ -3694,7 +3694,7 @@ fn test_get_raw_transaction() { let error: RpcErrorResponse = json::from_str(&raw.1).unwrap(); assert_eq!(error.error_type, "HashNotExist"); // valid hash but hash not exist without 0x prefix - let raw = block_on(mm.rpc(&json! ({ + let raw = block_on(mm.rpc(&json!({ "mmrpc": "2.0", "userpass": mm.userpass, "method": "get_raw_transaction", @@ -3917,13 +3917,13 @@ fn test_tx_history_tbtc_non_segwit() { fn test_update_maker_order() { let bob_passphrase = get_passphrase(&".env.seed", "BOB_PASSPHRASE").unwrap(); - let coins = json! ([ + let coins = json!([ {"coin":"RICK","asset":"RICK","required_confirmations":0,"txversion":4,"overwintered":1,"protocol":{"type":"UTXO"}}, {"coin":"MORTY","asset":"MORTY","required_confirmations":0,"txversion":4,"overwintered":1,"protocol":{"type":"UTXO"}} ]); let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 8999, "dht": "on", // Enable DHT without delay. @@ -3945,7 +3945,7 @@ fn test_update_maker_order() { log!("{:?}", block_on(enable_coins_rick_morty_electrum(&mm_bob))); log!("Issue bob sell request"); - let setprice = block_on(mm_bob.rpc(&json! ({ + let setprice = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "setprice", "base": "RICK", @@ -3964,7 +3964,7 @@ fn test_update_maker_order() { let uuid: Uuid = json::from_value(setprice_json["result"]["uuid"].clone()).unwrap(); log!("Issue bob update maker order request"); - let update_maker_order = block_on(mm_bob.rpc(&json! ({ + let update_maker_order = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "update_maker_order", "uuid": uuid, @@ -3982,7 +3982,7 @@ fn test_update_maker_order() { assert_eq!(update_maker_order_json["result"]["min_base_vol"], Json::from("1")); log!("Issue another bob update maker order request"); - let update_maker_order = block_on(mm_bob.rpc(&json! ({ + let update_maker_order = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "update_maker_order", "uuid": uuid, @@ -4000,7 +4000,7 @@ fn test_update_maker_order() { assert_eq!(update_maker_order_json["result"]["min_base_vol"], Json::from("1")); log!("Get bob balance"); - let my_balance = block_on(mm_bob.rpc(&json! ({ + let my_balance = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "my_balance", "coin": "RICK", @@ -4031,7 +4031,7 @@ fn test_update_maker_order() { let max_volume = balance - trade_fee; log!("Issue another bob update maker order request"); - let update_maker_order = block_on(mm_bob.rpc(&json! ({ + let update_maker_order = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "update_maker_order", "uuid": uuid, @@ -4057,13 +4057,13 @@ fn test_update_maker_order() { fn test_update_maker_order_fail() { let bob_passphrase = get_passphrase(&".env.seed", "BOB_PASSPHRASE").unwrap(); - let coins = json! ([ + let coins = json!([ {"coin":"RICK","asset":"RICK","required_confirmations":0,"txversion":4,"overwintered":1,"protocol":{"type":"UTXO"}}, {"coin":"MORTY","asset":"MORTY","required_confirmations":0,"txversion":4,"overwintered":1,"protocol":{"type":"UTXO"}} ]); let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 8999, "dht": "on", // Enable DHT without delay. @@ -4085,7 +4085,7 @@ fn test_update_maker_order_fail() { log!("{:?}", block_on(enable_coins_rick_morty_electrum(&mm_bob))); log!("Issue bob sell request"); - let setprice = block_on(mm_bob.rpc(&json! ({ + let setprice = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "setprice", "base": "RICK", @@ -4103,7 +4103,7 @@ fn test_update_maker_order_fail() { let uuid: Uuid = json::from_value(setprice_json["result"]["uuid"].clone()).unwrap(); log!("Issue bob update maker order request that should fail because price is too low"); - let update_maker_order = block_on(mm_bob.rpc(&json! ({ + let update_maker_order = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "update_maker_order", "uuid": uuid, @@ -4117,7 +4117,7 @@ fn test_update_maker_order_fail() { ); log!("Issue bob update maker order request that should fail because New Volume is Less than Zero"); - let update_maker_order = block_on(mm_bob.rpc(&json! ({ + let update_maker_order = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "update_maker_order", "uuid": uuid, @@ -4131,7 +4131,7 @@ fn test_update_maker_order_fail() { ); log!("Issue bob update maker order request that should fail because Min base vol is too low"); - let update_maker_order = block_on(mm_bob.rpc(&json! ({ + let update_maker_order = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "update_maker_order", "uuid": uuid, @@ -4146,7 +4146,7 @@ fn test_update_maker_order_fail() { ); log!("Issue bob update maker order request that should fail because Max base vol is below Min base vol"); - let update_maker_order = block_on(mm_bob.rpc(&json! ({ + let update_maker_order = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "update_maker_order", "uuid": uuid, @@ -4161,7 +4161,7 @@ fn test_update_maker_order_fail() { ); log!("Issue bob update maker order request that should fail because Max base vol is too low"); - let update_maker_order = block_on(mm_bob.rpc(&json! ({ + let update_maker_order = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "update_maker_order", "uuid": uuid, @@ -4176,7 +4176,7 @@ fn test_update_maker_order_fail() { ); log!("Issue bob update maker order request that should fail because Max rel vol is too low"); - let update_maker_order = block_on(mm_bob.rpc(&json! ({ + let update_maker_order = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "update_maker_order", "uuid": uuid, @@ -4266,7 +4266,7 @@ fn test_trade_fee_returns_numbers_in_various_formats() { // start bob and immediately place the order let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "dht": "on", // Enable DHT without delay. @@ -4286,7 +4286,7 @@ fn test_trade_fee_returns_numbers_in_various_formats() { log!("Bob log path: {}", mm_bob.log_path.display()); block_on(enable_coins_rick_morty_electrum(&mm_bob)); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "get_trade_fee", "coin": "RICK", @@ -4308,7 +4308,7 @@ fn test_orderbook_is_mine_orders() { // start bob and immediately place the order let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "dht": "on", // Enable DHT without delay. @@ -4332,7 +4332,7 @@ fn test_orderbook_is_mine_orders() { block_on(enable_coins_rick_morty_electrum(&mm_bob)) ); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "setprice", "base": "RICK", @@ -4345,7 +4345,7 @@ fn test_orderbook_is_mine_orders() { let _bob_setprice: Json = json::from_str(&rc.1).unwrap(); let mm_alice = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "dht": "on", // Enable DHT without delay. @@ -4372,7 +4372,7 @@ fn test_orderbook_is_mine_orders() { // Bob orderbook must show 1 mine order log!("Get RICK/MORTY orderbook on Bob side"); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "orderbook", "base": "RICK", @@ -4390,7 +4390,7 @@ fn test_orderbook_is_mine_orders() { // Alice orderbook must show 1 not-mine order log!("Get RICK/MORTY orderbook on Alice side"); - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "orderbook", "base": "RICK", @@ -4407,7 +4407,7 @@ fn test_orderbook_is_mine_orders() { assert!(!is_mine); // make another order by Alice - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "setprice", "base": "RICK", @@ -4424,7 +4424,7 @@ fn test_orderbook_is_mine_orders() { // Bob orderbook must show 1 mine and 1 non-mine orders. // Request orderbook with reverse base and rel coins to check bids instead of asks log!("Get RICK/MORTY orderbook on Bob side"); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "orderbook", "base": "MORTY", @@ -4444,7 +4444,7 @@ fn test_orderbook_is_mine_orders() { // Alice orderbook must show 1 mine and 1 non-mine orders log!("Get RICK/MORTY orderbook on Alice side"); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "orderbook", "base": "RICK", @@ -4486,7 +4486,7 @@ fn test_mm2_db_migration() { // if there is an issue with migration the start will fail MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 8999, "dht": "on", // Enable DHT without delay. @@ -4647,7 +4647,7 @@ fn test_get_orderbook_with_same_orderbook_ticker() { let (_dump_log, _dump_dashboard) = mm.mm_dump(); log!("Log path: {}", mm.log_path.display()); - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "orderbook", "base": "RICK", @@ -4660,7 +4660,7 @@ fn test_get_orderbook_with_same_orderbook_ticker() { rc.1 ); - let rc = block_on(mm.rpc(&json! ({ + let rc = block_on(mm.rpc(&json!({ "userpass": mm.userpass, "method": "orderbook", "base": "RICK", @@ -4700,7 +4700,7 @@ fn test_conf_settings_in_orderbook() { ); log!("Issue set_price request for RICK/MORTY on Bob side"); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "setprice", "base": "RICK", @@ -4712,7 +4712,7 @@ fn test_conf_settings_in_orderbook() { assert!(rc.0.is_success(), "!setprice: {}", rc.1); log!("Issue set_price request for MORTY/RICK on Bob side"); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "setprice", "base": "MORTY", @@ -4745,7 +4745,7 @@ fn test_conf_settings_in_orderbook() { ); log!("Get RICK/MORTY orderbook on Alice side"); - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "orderbook", "base": "RICK", @@ -4823,7 +4823,7 @@ fn alice_can_see_confs_in_orderbook_after_sync() { ); log!("Issue sell request on Bob side"); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "method": "setprice", "base": "RICK", @@ -4834,7 +4834,7 @@ fn alice_can_see_confs_in_orderbook_after_sync() { .unwrap(); assert!(rc.0.is_success(), "!setprice: {}", rc.1); - let rc = block_on(mm_bob.rpc(&json! ({ + let rc = block_on(mm_bob.rpc(&json!({ "userpass": mm_bob.userpass, "mmrpc": "2.0", "method": "get_public_key", @@ -4874,7 +4874,7 @@ fn alice_can_see_confs_in_orderbook_after_sync() { // setting the price will trigger Alice's subscription to the orderbook topic // but won't request the actual orderbook - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "setprice", "base": "RICK", @@ -4894,7 +4894,7 @@ fn alice_can_see_confs_in_orderbook_after_sync() { .unwrap(); log!("Get RICK/MORTY orderbook on Alice side"); - let rc = block_on(mm_alice.rpc(&json! ({ + let rc = block_on(mm_alice.rpc(&json!({ "userpass": mm_alice.userpass, "method": "orderbook", "base": "RICK", @@ -4950,7 +4950,7 @@ fn test_sign_verify_message_utxo() { // start bob and immediately place the order let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), @@ -5017,7 +5017,7 @@ fn test_sign_verify_message_utxo_segwit() { // start bob and immediately place the order let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), @@ -5094,7 +5094,7 @@ fn test_sign_verify_message_eth() { // start bob and immediately place the order let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), @@ -5164,7 +5164,7 @@ fn test_no_login() { ]; for (base, rel, price, volume, min_volume) in orders.iter() { - let rc = block_on(seednode.rpc(&json! ({ + let rc = block_on(seednode.rpc(&json!({ "userpass": seednode.userpass, "method": "setprice", "base": base, @@ -5178,7 +5178,7 @@ fn test_no_login() { assert!(rc.0.is_success(), "!setprice: {}", rc.1); } - let orderbook = block_on(no_login_node.rpc(&json! ({ + let orderbook = block_on(no_login_node.rpc(&json!({ "userpass": no_login_node.userpass, "method": "orderbook", "base": "RICK", @@ -5190,7 +5190,7 @@ fn test_no_login() { assert_eq!(orderbook.asks.len(), 3); assert_eq!(orderbook.bids.len(), 2); - let orderbook_v2 = block_on(no_login_node.rpc(&json! ({ + let orderbook_v2 = block_on(no_login_node.rpc(&json!({ "userpass": no_login_node.userpass, "mmrpc": "2.0", "method": "orderbook", @@ -5206,7 +5206,7 @@ fn test_no_login() { assert_eq!(orderbook_v2.asks.len(), 3); assert_eq!(orderbook_v2.bids.len(), 2); - let best_orders = block_on(no_login_node.rpc(&json! ({ + let best_orders = block_on(no_login_node.rpc(&json!({ "userpass": no_login_node.userpass, "method": "best_orders", "coin": "RICK", @@ -5221,7 +5221,7 @@ fn test_no_login() { let expected_price: BigDecimal = "0.8".parse().unwrap(); assert_eq!(expected_price, best_morty_orders[0].price); - let best_orders_v2 = block_on(no_login_node.rpc(&json! ({ + let best_orders_v2 = block_on(no_login_node.rpc(&json!({ "userpass": no_login_node.userpass, "mmrpc": "2.0", "method": "best_orders", @@ -5242,7 +5242,7 @@ fn test_no_login() { let expected_price: BigDecimal = "0.7".parse().unwrap(); assert_eq!(expected_price, best_morty_orders[0].price.decimal); - let orderbook_depth = block_on(no_login_node.rpc(&json! ({ + let orderbook_depth = block_on(no_login_node.rpc(&json!({ "userpass": no_login_node.userpass, "method": "orderbook_depth", "pairs":[["RICK","MORTY"]] @@ -5258,7 +5258,7 @@ fn test_no_login() { assert_eq!(orderbook_depth[0].depth.asks, 3); assert_eq!(orderbook_depth[0].depth.bids, 2); - let version = block_on(no_login_node.rpc(&json! ({ + let version = block_on(no_login_node.rpc(&json!({ "userpass": no_login_node.userpass, "method": "version", }))) @@ -5543,7 +5543,7 @@ fn test_enable_btc_with_sync_starting_header() { let coins = json!([btc_with_sync_starting_header()]); let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), @@ -5573,7 +5573,7 @@ fn test_btc_block_header_sync() { let coins = json!([btc_with_spv_conf()]); let mm_bob = MarketMakerIt::start( - json! ({ + json!({ "gui": "nogui", "netid": 9998, "myipaddr": env::var ("BOB_TRADE_IP") .ok(), diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index 1fa45039a4..4a108cef9e 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -2247,10 +2247,11 @@ pub async fn my_swap_status(mm: &MarketMakerIt, uuid: &str) -> Result wait_until { panic!("Timed out waiting for swap {} status", uuid); } From d32f8f6dfc8655432c1e700ff3102a14a36509bf Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 10 Jun 2024 16:30:34 +0100 Subject: [PATCH 143/186] fix clippy --- mm2src/coins/lp_coins.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index defe998a6a..f9d9f0e92a 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -5111,14 +5111,17 @@ pub fn address_by_coin_conf_and_pubkey_str( } #[cfg(target_arch = "wasm32")] -async fn load_history_from_file_impl(coin: &T, ctx: &MmArc) -> TxHistoryResult> +async fn load_history_from_file_impl( + coin: &T, + ctx: &MmArc, + db_id: Option<&str>, +) -> TxHistoryResult> where T: MmCoin + ?Sized, { let ctx = ctx.clone(); let ticker = coin.ticker().to_owned(); let my_address = coin.my_address()?; - let db_id = coin.account_db_id().await; let coins_ctx = CoinsContext::from_ctx(&ctx).unwrap(); let db = coins_ctx.tx_history_db(db_id.as_deref()).await?; From 966607dec5f700ab5a7e0ebba7ba99fc4376de30 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 11 Jun 2024 01:00:57 +0100 Subject: [PATCH 144/186] fix bugs and improve code --- mm2src/coins/eth.rs | 30 ++--- mm2src/coins/lightning/ln_utils.rs | 17 +-- mm2src/coins/lp_coins.rs | 2 +- .../src/lightning_activation.rs | 4 +- mm2src/mm2_main/src/lp_swap.rs | 120 ++++++++++++------ mm2src/mm2_main/src/lp_swap/maker_swap.rs | 25 ++-- mm2src/mm2_main/src/lp_swap/saved_swap.rs | 4 - mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs | 41 ++++-- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 27 ++-- 9 files changed, 156 insertions(+), 114 deletions(-) diff --git a/mm2src/coins/eth.rs b/mm2src/coins/eth.rs index b458db4164..ca434bc54d 100644 --- a/mm2src/coins/eth.rs +++ b/mm2src/coins/eth.rs @@ -134,37 +134,27 @@ cfg_native! { use std::path::PathBuf; } +mod eip1559_gas_fee; mod eth_balance_events; +pub mod eth_hd_wallet; mod eth_rpc; #[cfg(test)] mod eth_tests; #[cfg(target_arch = "wasm32")] mod eth_wasm_tests; +mod eth_withdraw; #[cfg(any(test, target_arch = "wasm32"))] mod for_tests; pub(crate) mod nft_swap_v2; -mod web3_transport; - -use web3_transport::{http_transport::HttpTransportNode, Web3Transport}; - -pub mod eth_hd_wallet; - -use eth_hd_wallet::EthHDWallet; - -#[path = "eth/v2_activation.rs"] pub mod v2_activation; - -use v2_activation::{build_address_and_priv_key_policy, EthActivationV2Error}; - -mod eth_withdraw; - -use eth_withdraw::{EthWithdraw, InitEthWithdraw, StandardEthWithdraw}; - mod nonce; - -use nonce::ParityNonce; - -mod eip1559_gas_fee; +#[path = "eth/v2_activation.rs"] pub mod v2_activation; +mod web3_transport; pub(crate) use eip1559_gas_fee::FeePerGasEstimated; use eip1559_gas_fee::{BlocknativeGasApiCaller, FeePerGasSimpleEstimator, GasApiConfig, GasApiProvider, InfuraGasApiCaller}; +use eth_hd_wallet::EthHDWallet; +use eth_withdraw::{EthWithdraw, InitEthWithdraw, StandardEthWithdraw}; +use nonce::ParityNonce; +use v2_activation::{build_address_and_priv_key_policy, EthActivationV2Error}; +use web3_transport::{http_transport::HttpTransportNode, Web3Transport}; /// https://github.com/artemii235/etomic-swap/blob/master/contracts/EtomicSwap.sol /// Dev chain (195.201.137.5:8565) contract address: 0x83965C539899cC0F918552e5A26915de40ee8852 diff --git a/mm2src/coins/lightning/ln_utils.rs b/mm2src/coins/lightning/ln_utils.rs index 02e59112ee..9408724d3a 100644 --- a/mm2src/coins/lightning/ln_utils.rs +++ b/mm2src/coins/lightning/ln_utils.rs @@ -3,7 +3,6 @@ use crate::lightning::ln_db::LightningDB; use crate::lightning::ln_platform::{get_best_header, ln_best_block_update_loop, update_best_block}; use crate::lightning::ln_sql::SqliteLightningDB; use crate::lightning::ln_storage::{LightningStorage, NodesAddressesMap}; -use crate::lp_coinfind_any; use crate::utxo::rpc_clients::BestBlock as RpcBestBlock; use bitcoin::hash_types::BlockHash; use bitcoin_hashes::{sha256d, Hash}; @@ -69,18 +68,10 @@ pub async fn init_persister( Ok(persister) } -pub async fn init_db(ctx: &MmArc, ticker: String) -> EnableLightningResult { - let ticker_to_mm_coin = lp_coinfind_any(ctx, &ticker) - .await - .map_to_mm(|err| { - EnableLightningError::InvalidRequest(format!("{ticker} not found or is not activated yet! err=({err})")) - })? - .ok_or_else(|| EnableLightningError::InvalidRequest(format!("{ticker} not found or is not activated yet!")))?; - let shared = ctx - .sqlite_conn_opt(ticker_to_mm_coin.inner.account_db_id().await.as_deref()) - .or_mm_err(|| { - EnableLightningError::DbError("'MmCtx::sqlite_connection' is not found or initialized".to_owned()) - })?; +pub async fn init_db(ctx: &MmArc, ticker: String, db_id: Option<&str>) -> EnableLightningResult { + let shared = ctx.sqlite_conn_opt(db_id).or_mm_err(|| { + EnableLightningError::DbError("'MmCtx::sqlite_connection' is not found or initialized".to_owned()) + })?; let db = SqliteLightningDB::new(ticker, shared)?; diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index f9d9f0e92a..5498f75b93 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -5124,7 +5124,7 @@ where let my_address = coin.my_address()?; let coins_ctx = CoinsContext::from_ctx(&ctx).unwrap(); - let db = coins_ctx.tx_history_db(db_id.as_deref()).await?; + let db = coins_ctx.tx_history_db(db_id).await?; let err = match load_tx_history(&db, &ticker, &my_address).await { Ok(history) => return Ok(history), Err(e) => e, diff --git a/mm2src/coins_activation/src/lightning_activation.rs b/mm2src/coins_activation/src/lightning_activation.rs index 1d2f9ec232..5992f5ff29 100644 --- a/mm2src/coins_activation/src/lightning_activation.rs +++ b/mm2src/coins_activation/src/lightning_activation.rs @@ -15,7 +15,7 @@ use coins::lightning::ln_utils::{get_open_channels_nodes_addresses, init_channel use coins::lightning::{InvoicePayer, LightningCoin}; use coins::utxo::utxo_standard::UtxoStandardCoin; use coins::utxo::UtxoCommonOps; -use coins::{BalanceError, CoinBalance, CoinProtocol, MarketCoinOps, MmCoinEnum, RegisterCoinError}; +use coins::{BalanceError, CoinBalance, CoinProtocol, MarketCoinOps, MmCoin, MmCoinEnum, RegisterCoinError}; use common::executor::{SpawnFuture, Timer}; use crypto::hw_rpc_task::{HwRpcTaskAwaitingStatus, HwRpcTaskUserAction}; use derive_more::Display; @@ -371,7 +371,7 @@ async fn start_lightning( )); // Initialize DB - let db = init_db(ctx, conf.ticker.clone()).await?; + let db = init_db(ctx, conf.ticker.clone(), platform_coin.account_db_id().await.as_deref()).await?; // Initialize the ChannelManager task_handle.update_in_progress_status(LightningInProgressStatus::InitializingChannelManager)?; diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index a147204a39..f8091b4e92 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -62,8 +62,8 @@ use crate::mm2::lp_network::{broadcast_p2p_msg, Libp2pPeerId, P2PProcessError, P use crate::mm2::lp_swap::maker_swap_v2::{MakerSwapStateMachine, MakerSwapStorage}; use crate::mm2::lp_swap::taker_swap_v2::{TakerSwapStateMachine, TakerSwapStorage}; use bitcrypto::{dhash160, sha256}; -use coins::{find_unique_account_ids_active, lp_coinfind, lp_coinfind_or_err, CoinFindError, DexFee, MmCoin, - MmCoinEnum, TradeFee, TransactionEnum}; +use coins::{find_unique_account_ids_active, find_unique_account_ids_any, lp_coinfind, lp_coinfind_or_err, + CoinFindError, DexFee, MmCoin, MmCoinEnum, TradeFee, TransactionEnum}; use common::log::{debug, warn}; use common::now_sec; use common::time_cache::DuplicateCache; @@ -1117,36 +1117,70 @@ impl From for MySwapStatusResponse { /// Returns the status of swap performed on `my` node pub async fn my_swap_status(ctx: MmArc, req: Json) -> Result>, String> { let uuid: Uuid = try_s!(json::from_value(req["params"]["uuid"].clone())); - let db_id: Option = try_s!(json::from_value(req["params"]["db_id"].clone())); - let swap_type = try_s!(get_swap_type(&ctx, &uuid, db_id.as_deref()).await); - - match swap_type { - Some(LEGACY_SWAP_TYPE) => { - let status = match SavedSwap::load_my_swap_from_db(&ctx, db_id.as_deref(), uuid).await { - Ok(Some(status)) => status, - Ok(None) => return Err("swap data is not found".to_owned()), - Err(e) => return ERR!("{}", e), - }; + let db_ids = find_unique_account_ids_any(&ctx).await?; + let mut last_error = None; + + for db_id in db_ids.iter() { + match get_swap_type(&ctx, &uuid, Some(db_id)).await { + Ok(Some(LEGACY_SWAP_TYPE)) => { + let status = match SavedSwap::load_my_swap_from_db(&ctx, Some(db_id), uuid).await { + Ok(Some(status)) => status, + Ok(None) => { + last_error = Some("swap data is not found".to_owned()); + continue; + }, + Err(e) => { + last_error = Some(format!("{}", e)); + continue; + }, + }; - let res_js = json!({ "result": MySwapStatusResponse::from(status) }); - let res = try_s!(json::to_vec(&res_js)); - Ok(try_s!(Response::builder().body(res))) - }, - Some(MAKER_SWAP_V2_TYPE) => { - let swap_data = try_s!(get_maker_swap_data_for_rpc(&ctx, &uuid, db_id.as_deref()).await); - let res_js = json!({ "result": swap_data }); - let res = try_s!(json::to_vec(&res_js)); - Ok(try_s!(Response::builder().body(res))) - }, - Some(TAKER_SWAP_V2_TYPE) => { - let swap_data = try_s!(get_taker_swap_data_for_rpc(&ctx, &uuid, db_id.as_deref()).await); - let res_js = json!({ "result": swap_data }); - let res = try_s!(json::to_vec(&res_js)); - Ok(try_s!(Response::builder().body(res))) - }, - Some(unsupported_type) => ERR!("Got unsupported swap type from DB: {}", unsupported_type), - None => ERR!("No swap with uuid {}", uuid), + let res_js = json!({ "result": MySwapStatusResponse::from(status) }); + let res = try_s!(json::to_vec(&res_js)); + return Ok(try_s!(Response::builder().body(res))); + }, + Ok(Some(MAKER_SWAP_V2_TYPE)) => { + let swap_data = match get_maker_swap_data_for_rpc(&ctx, &uuid, Some(db_id)).await { + Ok(data) => data, + Err(e) => { + last_error = Some(format!("{}", e)); + continue; + }, + }; + + let res_js = json!({ "result": swap_data }); + let res = try_s!(json::to_vec(&res_js)); + return Ok(try_s!(Response::builder().body(res))); + }, + Ok(Some(TAKER_SWAP_V2_TYPE)) => { + let swap_data = match get_taker_swap_data_for_rpc(&ctx, &uuid, Some(db_id)).await { + Ok(data) => data, + Err(e) => { + last_error = Some(format!("{}", e)); + continue; + }, + }; + + let res_js = json!({ "result": swap_data }); + let res = try_s!(json::to_vec(&res_js)); + return Ok(try_s!(Response::builder().body(res))); + }, + Ok(Some(unsupported_type)) => { + last_error = Some(format!("Got unsupported swap type from DB: {}", unsupported_type)); + continue; + }, + Ok(None) => { + last_error = Some(format!("No swap type found for uuid {}", uuid)); + continue; + }, + Err(e) => { + last_error = Some(format!("{}", e)); + continue; + }, + } } + + Err(last_error.unwrap_or_else(|| format!("swap_status not found for {uuid:?}"))) } #[cfg(target_arch = "wasm32")] @@ -1339,7 +1373,7 @@ pub async fn latest_swaps_for_pair( /// Returns the data of recent swaps of `my` node. pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result>, String> { let req: MyRecentSwapsReq = try_s!(json::from_value(req)); - let db_ids = try_s!(find_unique_account_ids_active(&ctx).await); + let db_ids = try_s!(find_unique_account_ids_any(&ctx).await); let mut res_js = vec![]; for db_id in db_ids { @@ -1388,17 +1422,19 @@ pub async fn my_recent_swaps_rpc(ctx: MmArc, req: Json) -> Result, uuid: &Uuid) stats_maker_swap_dir(ctx, db_id).join(format!("{}.json", uuid)) } -async fn save_my_maker_swap_event(ctx: &MmArc, swap: &MakerSwap, event: MakerSavedEvent) -> Result<(), String> { - let db_id = swap.db_id(); - let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id.as_deref(), swap.uuid).await { +async fn save_my_maker_swap_event( + ctx: &MmArc, + swap: &MakerSwap, + event: MakerSavedEvent, + db_id: Option<&str>, +) -> Result<(), String> { + let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id, swap.uuid).await { Ok(Some(swap)) => swap, Ok(None) => SavedSwap::Maker(MakerSavedSwap { uuid: swap.uuid, @@ -114,7 +118,7 @@ async fn save_my_maker_swap_event(ctx: &MmArc, swap: &MakerSwap, event: MakerSav maker_swap.fetch_and_set_usd_prices().await; } let new_swap = SavedSwap::Maker(maker_swap); - try_s!(new_swap.save_to_db(ctx, db_id.as_deref()).await); + try_s!(new_swap.save_to_db(ctx, db_id).await); Ok(()) } else { ERR!("Expected SavedSwap::Maker, got {:?}", swap) @@ -281,9 +285,6 @@ impl MakerSwap { } } - #[inline] - fn db_id(&self) -> Option { self.r().data.db_id.clone() } - fn apply_event(&self, event: MakerSwapEvent) { match event { MakerSwapEvent::Started(data) => { @@ -2034,10 +2035,10 @@ impl RunMakerSwapInput { } } - async fn db_id(&self) -> Option { + fn maker_coin(&self) -> &MmCoinEnum { match self { - RunMakerSwapInput::StartNew(swap) => swap.db_id(), - RunMakerSwapInput::KickStart { maker_coin, .. } => maker_coin.account_db_id().await, + RunMakerSwapInput::StartNew(swap) => &swap.maker_coin, + RunMakerSwapInput::KickStart { maker_coin, .. } => maker_coin, } } } @@ -2047,8 +2048,8 @@ impl RunMakerSwapInput { /// because it's usually means that swap is in invalid state which is possible only if there's developer error. /// Every produced event is saved to local DB. Swap status is broadcasted to P2P network after completion. pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { + let db_id = swap.maker_coin().account_db_id().await; let uuid = swap.uuid().to_owned(); - let db_id = swap.db_id().await.to_owned(); let mut attempts = 0; let swap_lock = loop { match SwapLock::lock(&ctx, uuid, 40., db_id.as_deref()).await { @@ -2144,7 +2145,7 @@ pub async fn run_maker_swap(swap: RunMakerSwapInput, ctx: MmArc) { .dispatch_async(ctx.clone(), LpEvents::MakerSwapStatusChanged(event_to_send)) .await; drop(dispatcher); - save_my_maker_swap_event(&ctx, &running_swap, to_save) + save_my_maker_swap_event(&ctx, &running_swap, to_save, db_id.as_deref()) .await .expect("!save_my_maker_swap_event"); if event.should_ban_taker() { diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index 35e8544d24..eed96a0fb4 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -321,7 +321,6 @@ mod wasm_impl { } pub async fn migrate_swaps_data(ctx: &MmArc, db_id: Option<&str>) -> MmResult<(), SavedSwapError> { - info!("migrate_swaps_data: {db_id:?}"); let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; @@ -413,7 +412,6 @@ mod wasm_impl { db_id: Option<&str>, uuid: Uuid, ) -> SavedSwapResult> { - info!("load_my_swap_from_db: {db_id:?}"); let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; @@ -428,7 +426,6 @@ mod wasm_impl { } async fn load_all_my_swaps_from_db(ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult> { - info!("load_all_my_swaps_from_db: {db_id:?}"); let swaps_ctx = SwapsContext::from_ctx(ctx).map_to_mm(SavedSwapError::InternalError)?; let db = swaps_ctx.swap_db(db_id).await?; let transaction = db.transaction().await?; @@ -444,7 +441,6 @@ mod wasm_impl { } async fn save_to_db(&self, ctx: &MmArc, db_id: Option<&str>) -> SavedSwapResult<()> { - info!("save_to_db: {db_id:?}"); let saved_swap = json::to_value(self).map_to_mm(|e| SavedSwapError::ErrorSerializing(e.to_string()))?; let saved_swap_item = SavedSwapTable { uuid: *self.uuid(), diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs index 2d9db0dee8..b9eb5f36d9 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs @@ -5,7 +5,7 @@ use super::taker_swap::TakerSavedSwap; use super::taker_swap_v2::TakerSwapEvent; use super::{active_swaps, MySwapsFilter, SavedSwap, SavedSwapError, SavedSwapIo, LEGACY_SWAP_TYPE, MAKER_SWAP_V2_TYPE, TAKER_SWAP_V2_TYPE}; -use coins::find_unique_account_ids_active; +use coins::{find_unique_account_ids_active, find_unique_account_ids_any}; use common::log::{error, warn}; use common::{calc_total_pages, HttpStatusCode, PagingOptions}; use derive_more::Display; @@ -338,7 +338,6 @@ async fn get_swap_data_by_uuid_and_type( #[derive(Deserialize)] pub(crate) struct MySwapStatusRequest { uuid: Uuid, - db_id: Option, } #[derive(Display, Serialize, SerializeErrorType)] @@ -383,12 +382,38 @@ pub(crate) async fn my_swap_status_rpc( ctx: MmArc, req: MySwapStatusRequest, ) -> MmResult { - let swap_type = get_swap_type(&ctx, &req.uuid, req.db_id.as_deref()) - .await? - .or_mm_err(|| MySwapStatusError::NoSwapWithUuid(req.uuid))?; - get_swap_data_by_uuid_and_type(&ctx, None, req.uuid, swap_type) - .await? - .or_mm_err(|| MySwapStatusError::NoSwapWithUuid(req.uuid)) + let db_ids = find_unique_account_ids_any(&ctx) + .await + .map_to_mm(|_| MySwapStatusError::DbError("No db_ids found".to_string()))?; + + let mut last_error = MySwapStatusError::NoSwapWithUuid(req.uuid); + + for db_id in db_ids.iter() { + match get_swap_type(&ctx, &req.uuid, Some(db_id)).await { + Ok(Some(swap_type)) => match get_swap_data_by_uuid_and_type(&ctx, Some(db_id), req.uuid, swap_type).await { + Ok(Some(swap_data)) => { + return Ok(swap_data); + }, + Ok(None) => { + last_error = MySwapStatusError::NoSwapWithUuid(req.uuid); + }, + Err(e) => { + last_error = MySwapStatusError::DbError(format!( + "Error loading swap data for uuid {} in db_id: {}: {}", + req.uuid, db_id, e + )); + }, + }, + Ok(None) => { + last_error = MySwapStatusError::NoSwapWithUuid(req.uuid); + }, + Err(e) => { + last_error = MySwapStatusError::DbError(format!("Error getting swap type for db_id: {}: {}", db_id, e)); + }, + } + } + + Err(last_error.into()) } #[derive(Deserialize)] diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 39e966a700..d1da7a686d 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -107,9 +107,15 @@ pub fn stats_taker_swap_file_path(ctx: &MmArc, db_id: Option<&str>, uuid: &Uuid) stats_taker_swap_dir(ctx, db_id).join(format!("{}.json", uuid)) } -async fn save_my_taker_swap_event(ctx: &MmArc, swap: &TakerSwap, event: TakerSavedEvent) -> Result<(), String> { - let db_id = swap.db_id(); - let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id.as_deref(), swap.uuid).await { +async fn save_my_taker_swap_event( + ctx: &MmArc, + swap: &TakerSwap, + event: TakerSavedEvent, + db_id: Option<&str>, +) -> Result<(), String> { + info!("event: {event:?}"); + + let swap = match SavedSwap::load_my_swap_from_db(ctx, db_id, swap.uuid).await { Ok(Some(swap)) => swap, Ok(None) => SavedSwap::Taker(TakerSavedSwap { uuid: swap.uuid, @@ -145,7 +151,7 @@ async fn save_my_taker_swap_event(ctx: &MmArc, swap: &TakerSwap, event: TakerSav taker_swap.fetch_and_set_usd_prices().await; } let new_swap = SavedSwap::Taker(taker_swap); - try_s!(new_swap.save_to_db(ctx, db_id.as_deref()).await); + try_s!(new_swap.save_to_db(ctx, db_id).await); Ok(()) } else { ERR!("Expected SavedSwap::Taker, got {:?}", swap) @@ -381,10 +387,10 @@ impl RunTakerSwapInput { } } - async fn db_id(&self) -> Option { + fn taker_coin(&self) -> &MmCoinEnum { match self { - RunTakerSwapInput::StartNew(swap) => swap.db_id(), - RunTakerSwapInput::KickStart { taker_coin, .. } => taker_coin.account_db_id().await, + RunTakerSwapInput::StartNew(swap) => &swap.taker_coin, + RunTakerSwapInput::KickStart { taker_coin, .. } => taker_coin, } } } @@ -394,8 +400,8 @@ impl RunTakerSwapInput { /// because it's usually means that swap is in invalid state which is possible only if there's developer error /// Every produced event is saved to local DB. Swap status is broadcast to P2P network after completion. pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { + let db_id = swap.taker_coin().account_db_id().await; let uuid = swap.uuid().to_owned(); - let db_id = swap.db_id().await.to_owned(); let mut attempts = 0; let swap_lock = loop { match SwapLock::lock(&ctx, uuid, 40., db_id.as_deref()).await { @@ -479,7 +485,7 @@ pub async fn run_taker_swap(swap: RunTakerSwapInput, ctx: MmArc) { event: event.clone(), }; - save_my_taker_swap_event(&ctx, &running_swap, to_save) + save_my_taker_swap_event(&ctx, &running_swap, to_save, db_id.as_deref()) .await .expect("!save_my_taker_swap_event"); if event.should_ban_maker() { @@ -805,9 +811,6 @@ impl TakerSwap { #[inline] fn wait_refund_until(&self) -> u64 { self.r().data.taker_payment_lock + 3700 } - #[inline] - fn db_id(&self) -> Option { self.r().data.db_id.clone() } - pub(crate) fn apply_event(&self, event: TakerSwapEvent) { match event { TakerSwapEvent::Started(data) => { From 19930d3d65a66a7cfad45778f56fed8eff1d9ed6 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 12 Jun 2024 01:15:39 +0100 Subject: [PATCH 145/186] rename account_shared_db_id to shared_db_id --- mm2src/coins/lp_coins.rs | 2 +- mm2src/common/custom_futures/repeatable.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 5498f75b93..11ecdda9ff 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -3209,7 +3209,7 @@ pub trait MmCoin: async fn account_db_id(&self) -> Option { None } - fn account_shared_db_id(&self) -> Option { None } + async fn shared_db_id(&self) -> Option { None } /// Path to tx history file #[cfg(not(target_arch = "wasm32"))] diff --git a/mm2src/common/custom_futures/repeatable.rs b/mm2src/common/custom_futures/repeatable.rs index fbc1a8c9e8..13642fcebe 100644 --- a/mm2src/common/custom_futures/repeatable.rs +++ b/mm2src/common/custom_futures/repeatable.rs @@ -443,7 +443,7 @@ mod tests { actual, Err(RepeatError::AttemptsExceed { attempts: ACTUAL_ATTEMPTS, - error: "Not ready" + error: "Not ready", }) ); @@ -616,7 +616,7 @@ mod tests { actual, Err(RepeatError::AttemptsExceed { attempts: 1, - error: "Not ready" + error: "Not ready", }) ); } @@ -646,7 +646,7 @@ mod tests { actual, Err(RepeatError::AttemptsExceed { attempts: 1, - error: "Not ready" + error: "Not ready", }) ); From 527efbcb178701500b37728cf2b22bc5df6191da Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 12 Jun 2024 02:04:24 +0100 Subject: [PATCH 146/186] use HDACcount shared_db_id for tx_history --- mm2src/coins/lp_coins.rs | 4 ++++ mm2src/coins/my_tx_history_v2.rs | 2 +- mm2src/coins/utxo/bch.rs | 2 ++ mm2src/coins/utxo/qtum.rs | 2 ++ mm2src/coins/utxo/utxo_common.rs | 13 +++++++++++++ mm2src/coins/utxo/utxo_common_tests.rs | 6 +++--- mm2src/coins/utxo/utxo_standard.rs | 2 ++ .../src/standalone_coin/init_standalone_coin.rs | 3 ++- 8 files changed, 29 insertions(+), 5 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 11ecdda9ff..0f2b3920d2 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -3211,6 +3211,10 @@ pub trait MmCoin: async fn shared_db_id(&self) -> Option { None } + /// In normal wallet mode, this function returns the regular `db_id`, which is the RMD160 hash of the public key. + /// In HD wallet mode, it returns `hd_wallet_rmd160`, which is the RMD160 hash unique to the HD wallet/device. + async fn tx_history_db_id(&self) -> Option { None } + /// Path to tx history file #[cfg(not(target_arch = "wasm32"))] fn tx_history_path(&self, ctx: &MmArc, db_id: Option<&str>) -> PathBuf { diff --git a/mm2src/coins/my_tx_history_v2.rs b/mm2src/coins/my_tx_history_v2.rs index 039c4d2a41..b574f19e75 100644 --- a/mm2src/coins/my_tx_history_v2.rs +++ b/mm2src/coins/my_tx_history_v2.rs @@ -402,7 +402,7 @@ pub(crate) async fn my_tx_history_v2_impl( where Coin: CoinWithTxHistoryV2 + MmCoin, { - let tx_history_storage = TxHistoryStorageBuilder::new(&ctx, coin.account_db_id().await).build()?; + let tx_history_storage = TxHistoryStorageBuilder::new(&ctx, coin.tx_history_db_id().await).build()?; let wallet_id = coin.history_wallet_id(); let is_storage_init = tx_history_storage.is_initialized_for(&wallet_id).await?; diff --git a/mm2src/coins/utxo/bch.rs b/mm2src/coins/utxo/bch.rs index 1e3985521f..fa43785d0b 100644 --- a/mm2src/coins/utxo/bch.rs +++ b/mm2src/coins/utxo/bch.rs @@ -1360,6 +1360,8 @@ impl MmCoin for BchCoin { } async fn account_db_id(&self) -> Option { utxo_common::account_db_id(self.clone()).await } + + async fn tx_history_db_id(&self) -> Option { utxo_common::tx_history_db_id(self.clone()).await } } #[async_trait] diff --git a/mm2src/coins/utxo/qtum.rs b/mm2src/coins/utxo/qtum.rs index c75c9b5e5d..beb3f33632 100644 --- a/mm2src/coins/utxo/qtum.rs +++ b/mm2src/coins/utxo/qtum.rs @@ -981,6 +981,8 @@ impl MmCoin for QtumCoin { fn on_token_deactivated(&self, _ticker: &str) {} async fn account_db_id(&self) -> Option { utxo_common::account_db_id(self.clone()).await } + + async fn tx_history_db_id(&self) -> Option { utxo_common::tx_history_db_id(self.clone()).await } } #[async_trait] diff --git a/mm2src/coins/utxo/utxo_common.rs b/mm2src/coins/utxo/utxo_common.rs index 42d0857ea1..bb3b6012db 100644 --- a/mm2src/coins/utxo/utxo_common.rs +++ b/mm2src/coins/utxo/utxo_common.rs @@ -5154,6 +5154,19 @@ where None } +/// This function will return regular db_id(pubkey rmd160) for normal wallet mode and hd_wallet_rmd160 for hd mode. +pub async fn tx_history_db_id(coin: Coin) -> Option +where + Coin: CoinWithDerivationMethod + HDWalletCoinOps + HDCoinWithdrawOps + UtxoCommonOps, +{ + if let DerivationMethod::HDWallet(hd_wallet) = coin.derivation_method() { + // we can use hd_wallet_rmd160 as our db_id since it's unique to a device and not a single address + Some(hex::encode(hd_wallet.inner.hd_wallet_rmd160.as_slice())) + } else { + account_db_id(coin).await + } +} + #[test] fn test_increase_by_percent() { assert_eq!(increase_by_percent(4300, 1.), 4343); diff --git a/mm2src/coins/utxo/utxo_common_tests.rs b/mm2src/coins/utxo/utxo_common_tests.rs index 7b06b88d15..6e43fac7ed 100644 --- a/mm2src/coins/utxo/utxo_common_tests.rs +++ b/mm2src/coins/utxo/utxo_common_tests.rs @@ -288,10 +288,10 @@ pub(super) async fn test_hd_utxo_tx_history_impl(rpc_client: ElectrumClient) { #[cfg(not(target_arch = "wasm32"))] { let dbs = ctx.sqlite_conn_pool.as_option().unwrap(); - dbs.add_test_db("b591d089ee36906f96172761c78556f2f75953aa".to_string()); + dbs.add_test_db(coin.tx_history_db_id().await.unwrap()); } let current_balances = coin.my_addresses_balances().await.unwrap(); - let storage = TxHistoryStorageBuilder::new(&ctx, Some("b591d089ee36906f96172761c78556f2f75953aa".to_string())) + let storage = TxHistoryStorageBuilder::new(&ctx, coin.tx_history_db_id().await) .build() .unwrap(); spawn(utxo_history_loop( @@ -318,7 +318,7 @@ pub(super) async fn test_hd_utxo_tx_history_impl(rpc_client: ElectrumClient) { _ => unimplemented!(), } - let storage = TxHistoryStorageBuilder::new(&ctx, coin.account_db_id().await) + let storage = TxHistoryStorageBuilder::new(&ctx, coin.tx_history_db_id().await) .build() .unwrap(); spawn(utxo_history_loop( diff --git a/mm2src/coins/utxo/utxo_standard.rs b/mm2src/coins/utxo/utxo_standard.rs index 2052e6586c..e73e355d00 100644 --- a/mm2src/coins/utxo/utxo_standard.rs +++ b/mm2src/coins/utxo/utxo_standard.rs @@ -1008,6 +1008,8 @@ impl MmCoin for UtxoStandardCoin { fn on_token_deactivated(&self, _ticker: &str) {} async fn account_db_id(&self) -> Option { utxo_common::account_db_id(self.clone()).await } + + async fn tx_history_db_id(&self) -> Option { utxo_common::tx_history_db_id(self.clone()).await } } #[async_trait] diff --git a/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs b/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs index 9a265680cb..1ac6b83d06 100644 --- a/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs +++ b/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs @@ -221,9 +221,10 @@ where task_handle.clone(), ) .await?; + coin.start_history_background_fetching( self.ctx.metrics.clone(), - TxHistoryStorageBuilder::new(&self.ctx, coin_clone.into().account_db_id().await).build()?, + TxHistoryStorageBuilder::new(&self.ctx, coin_clone.into().tx_history_db_id().await).build()?, current_balances, ); } From bb41f41a3ee878bfaa3d1ce4dfb17af0d898d152 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 12 Jun 2024 03:31:26 +0100 Subject: [PATCH 147/186] cleanups and improve eth impl --- mm2src/coins/eth.rs | 16 ++++++++++++---- mm2src/coins/utxo/bch.rs | 4 ++-- mm2src/coins/utxo/qtum.rs | 4 ++-- mm2src/coins/utxo/utxo_common.rs | 21 ++++++++++++--------- mm2src/coins/utxo/utxo_standard.rs | 4 ++-- 5 files changed, 30 insertions(+), 19 deletions(-) diff --git a/mm2src/coins/eth.rs b/mm2src/coins/eth.rs index ca434bc54d..9808e2fe15 100644 --- a/mm2src/coins/eth.rs +++ b/mm2src/coins/eth.rs @@ -5729,16 +5729,24 @@ impl MmCoin for EthCoin { } async fn account_db_id(&self) -> Option { - let derivation_method = &self.deref().derivation_method; - if let DerivationMethod::HDWallet(hd_wallet) = derivation_method.as_ref() { + if let Some(hd_wallet) = self.derivation_method().hd_wallet() { if let Some(addr) = hd_wallet.get_enabled_address().await { - let addr = dhash160(addr.address.as_bytes()); - return Some(hex::encode(addr)); + return Some(hex::encode(dhash160(addr.pubkey().as_bytes()))); } } None } + + async fn tx_history_db_id(&self) -> Option { + if let Some(hd_wallet) = self.derivation_method().hd_wallet() { + // Use the hd_wallet_rmd160 as the db_id since it's unique to a device and not tied to a single address + return Some(hex::encode(hd_wallet.hd_wallet_rmd160.as_slice())); + }; + + // Fallback to the account db_id for non-HD wallets + self.account_db_id().await + } } pub trait TryToAddress { diff --git a/mm2src/coins/utxo/bch.rs b/mm2src/coins/utxo/bch.rs index fa43785d0b..acf18a5cde 100644 --- a/mm2src/coins/utxo/bch.rs +++ b/mm2src/coins/utxo/bch.rs @@ -1359,9 +1359,9 @@ impl MmCoin for BchCoin { }; } - async fn account_db_id(&self) -> Option { utxo_common::account_db_id(self.clone()).await } + async fn account_db_id(&self) -> Option { utxo_common::account_db_id(self).await } - async fn tx_history_db_id(&self) -> Option { utxo_common::tx_history_db_id(self.clone()).await } + async fn tx_history_db_id(&self) -> Option { utxo_common::tx_history_db_id(self).await } } #[async_trait] diff --git a/mm2src/coins/utxo/qtum.rs b/mm2src/coins/utxo/qtum.rs index beb3f33632..e34f9f522f 100644 --- a/mm2src/coins/utxo/qtum.rs +++ b/mm2src/coins/utxo/qtum.rs @@ -980,9 +980,9 @@ impl MmCoin for QtumCoin { fn on_token_deactivated(&self, _ticker: &str) {} - async fn account_db_id(&self) -> Option { utxo_common::account_db_id(self.clone()).await } + async fn account_db_id(&self) -> Option { utxo_common::account_db_id(self).await } - async fn tx_history_db_id(&self) -> Option { utxo_common::tx_history_db_id(self.clone()).await } + async fn tx_history_db_id(&self) -> Option { utxo_common::tx_history_db_id(self).await } } #[async_trait] diff --git a/mm2src/coins/utxo/utxo_common.rs b/mm2src/coins/utxo/utxo_common.rs index bb3b6012db..ac831e5aa8 100644 --- a/mm2src/coins/utxo/utxo_common.rs +++ b/mm2src/coins/utxo/utxo_common.rs @@ -5141,11 +5141,12 @@ where Ok(transaction) } -pub async fn account_db_id(coin: Coin) -> Option +pub async fn account_db_id(coin: &Coin) -> Option where Coin: CoinWithDerivationMethod + HDWalletCoinOps + HDCoinWithdrawOps + UtxoCommonOps, { - if let DerivationMethod::HDWallet(hd_wallet) = coin.derivation_method() { + if let Some(hd_wallet) = coin.derivation_method().hd_wallet() { + // we can use hd_wallet_rmd160 as our shared_db_id since it's unique to a device if let Some(addr) = hd_wallet.get_enabled_address().await { return Some(hex::encode(addr.pubkey().address_hash().as_slice())); } @@ -5154,17 +5155,19 @@ where None } -/// This function will return regular db_id(pubkey rmd160) for normal wallet mode and hd_wallet_rmd160 for hd mode. -pub async fn tx_history_db_id(coin: Coin) -> Option +/// In normal wallet mode, this function returns the regular `db_id`, which is the RMD160 hash of the public key. +/// In HD wallet mode, it returns `hd_wallet_rmd160`, which is the RMD160 hash unique to the HD wallet/device. +pub async fn tx_history_db_id(coin: &Coin) -> Option where Coin: CoinWithDerivationMethod + HDWalletCoinOps + HDCoinWithdrawOps + UtxoCommonOps, { - if let DerivationMethod::HDWallet(hd_wallet) = coin.derivation_method() { - // we can use hd_wallet_rmd160 as our db_id since it's unique to a device and not a single address - Some(hex::encode(hd_wallet.inner.hd_wallet_rmd160.as_slice())) - } else { - account_db_id(coin).await + if let Some(hd_wallet) = coin.derivation_method().hd_wallet() { + // Use the hd_wallet_rmd160 as the db_id since it's unique to a device and not tied to a single address + return Some(hex::encode(hd_wallet.inner.hd_wallet_rmd160.as_slice())); } + + // Fallback to the account db_id for non-HD wallets + account_db_id(coin).await } #[test] diff --git a/mm2src/coins/utxo/utxo_standard.rs b/mm2src/coins/utxo/utxo_standard.rs index e73e355d00..c113fed05b 100644 --- a/mm2src/coins/utxo/utxo_standard.rs +++ b/mm2src/coins/utxo/utxo_standard.rs @@ -1007,9 +1007,9 @@ impl MmCoin for UtxoStandardCoin { fn on_token_deactivated(&self, _ticker: &str) {} - async fn account_db_id(&self) -> Option { utxo_common::account_db_id(self.clone()).await } + async fn account_db_id(&self) -> Option { utxo_common::account_db_id(self).await } - async fn tx_history_db_id(&self) -> Option { utxo_common::tx_history_db_id(self.clone()).await } + async fn tx_history_db_id(&self) -> Option { utxo_common::tx_history_db_id(self).await } } #[async_trait] From 916ce6a33dfa2320ed2809674d217c08fceed6c3 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 12 Jun 2024 12:19:28 +0100 Subject: [PATCH 148/186] fix clear nft todo --- mm2src/coins/hd_wallet/storage/mod.rs | 2 - mm2src/coins/nft.rs | 61 +++++++++++++++++---------- 2 files changed, 38 insertions(+), 25 deletions(-) diff --git a/mm2src/coins/hd_wallet/storage/mod.rs b/mm2src/coins/hd_wallet/storage/mod.rs index 5fa96b9a42..77597c248f 100644 --- a/mm2src/coins/hd_wallet/storage/mod.rs +++ b/mm2src/coins/hd_wallet/storage/mod.rs @@ -220,7 +220,6 @@ impl Default for HDWalletCoinStorage { } impl HDWalletCoinStorage { - // TODO: Since hd_wallet_rmd160 is unique for a device, do we use it as db_id too? or we can just use mm2 shared_db_id and use hd_wallet_rmd160 for primary key as it's currently done pub async fn init(ctx: &MmArc, coin: String) -> HDWalletStorageResult { let crypto_ctx = CryptoCtx::from_ctx(ctx)?; let hd_wallet_rmd160 = crypto_ctx @@ -234,7 +233,6 @@ impl HDWalletCoinStorage { }) } - // TODO: Since hd_wallet_rmd160 is unique for a device, do we use it as db_id too? or we can just use mm2 shared_db_id and use hd_wallet_rmd160 for primary key as it's currently done pub async fn init_with_rmd160( ctx: &MmArc, coin: String, diff --git a/mm2src/coins/nft.rs b/mm2src/coins/nft.rs index 9a92eedc91..a7dfd84dc4 100644 --- a/mm2src/coins/nft.rs +++ b/mm2src/coins/nft.rs @@ -1580,32 +1580,47 @@ pub(crate) fn get_domain_from_url(url: Option<&str>) -> Option { /// Clears NFT data from the database for specified chains. pub async fn clear_nft_db(ctx: MmArc, req: ClearNftDbReq) -> MmResult<(), ClearNftDbError> { - // TODO: db_id - let db_id: Option = None; - if req.clear_all { - let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(ClearNftDbError::Internal)?; - let storage = nft_ctx.lock_db(None).await?; - storage.clear_all_nft_data().await?; - storage.clear_all_history_data().await?; - return Ok(()); - } + let db_ids = find_unique_nft_account_ids(&ctx, req.chains.clone()) + .await + .map_to_mm(ClearNftDbError::Internal)?; + + for (idx, (db_id, chains)) in db_ids.iter().enumerate() { + if req.clear_all { + let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(ClearNftDbError::Internal)?; + let storage = nft_ctx.lock_db(None).await?; + storage.clear_all_nft_data().await?; + storage.clear_all_history_data().await?; + if idx == db_ids.len() - 1 { + return Ok(()); + } + } - if req.chains.is_empty() { - return MmError::err(ClearNftDbError::InvalidRequest( - "Nothing to clear was specified".to_string(), - )); - } + let filtered_chains = chains + .iter() + .filter_map(|c| req.chains.contains(c).then_some(c)) + .collect::>(); + if filtered_chains.is_empty() { + if idx == db_ids.len() - 1 { + return MmError::err(ClearNftDbError::InvalidRequest( + "Nothing to clear was specified".to_string(), + )); + } else { + continue; + } + } - let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(ClearNftDbError::Internal)?; - let storage = nft_ctx.lock_db(db_id.as_deref()).await?; - let mut errors = Vec::new(); - for chain in req.chains.iter() { - if let Err(e) = clear_data_for_chain(&storage, chain).await { - errors.push(e); + let nft_ctx = NftCtx::from_ctx(&ctx).map_to_mm(ClearNftDbError::Internal)?; + let storage = nft_ctx.lock_db(Some(db_id)).await?; + let mut errors = Vec::new(); + for chain in filtered_chains { + if let Err(e) = clear_data_for_chain(&storage, chain).await { + errors.push(e); + } + } + + if !errors.is_empty() && idx == db_ids.len() - 1 { + return MmError::err(ClearNftDbError::DbError(format!("{:?}", errors))); } - } - if !errors.is_empty() { - return MmError::err(ClearNftDbError::DbError(format!("{:?}", errors))); } Ok(()) From 0f7c075a90f9267a77c47216248aaac353f800d9 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 13 Jun 2024 20:02:46 +0100 Subject: [PATCH 149/186] add docs and very minor improvs --- mm2src/coins/lp_coins.rs | 5 ++ mm2src/mm2_core/src/mm_ctx.rs | 3 +- mm2src/mm2_main/src/database/my_swaps.rs | 4 +- mm2src/mm2_main/src/database/stats_nodes.rs | 70 +++++++++------------ mm2src/mm2_main/src/lp_native_dex.rs | 2 +- mm2src/mm2_main/src/lp_stats.rs | 6 +- 6 files changed, 41 insertions(+), 49 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 0f2b3920d2..c2a0459074 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -3207,8 +3207,13 @@ pub trait MmCoin: /// Loop collecting coin transaction history and saving it to local DB fn process_history_loop(&self, ctx: MmArc) -> Box + Send>; + /// Retrieves a unique identifier for the account based on the coin's derivation method. + /// E.g, If the coin is derived from an HD wallet, it uses the public key hash of the enabled address as the database ID. + /// If the coin is not derived from an HD wallet, it returns `None`. async fn account_db_id(&self) -> Option { None } + // Retrieves a unique identifier for the account that is shared across different contexts, + /// such as different derivation methods (HD wallet vs. non-HD wallet) async fn shared_db_id(&self) -> Option { None } /// In normal wallet mode, this function returns the regular `db_id`, which is the RMD160 hash of the public key. diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index f58831a813..ea4112cd2f 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -60,7 +60,6 @@ const EXPORT_METRICS_INTERVAL: f64 = 5. * 60.; /// Only the pointers (`MmArc`, `MmWeak`) can be moved around. /// /// Threads only have the non-`mut` access to `MmCtx`, allowing us to directly share certain fields. -#[allow(clippy::type_complexity)] pub struct MmCtx { /// MM command-line configuration. pub conf: Json, @@ -120,7 +119,7 @@ pub struct MmCtx { /// The RPC sender forwarding requests to writing part of underlying stream. #[cfg(target_arch = "wasm32")] pub wasm_rpc: Constructible, - /// Deprecated, please create `shared_async_sqlite_conn` for new implementations and call db `KOMODEFI-shared.db` for shared_db. + /// Deprecated, please use `async_sqlite_conn_pool` for new implementations. #[cfg(not(target_arch = "wasm32"))] pub sqlite_conn_pool: Constructible, /// asynchronous handle for rusqlite connection. diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index 7575160625..c58eefe7a3 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -216,9 +216,7 @@ pub fn select_uuids_by_my_swaps_filter( if total_count == 0 { return Ok(MyRecentSwapsUuids { pubkey: db_id, - uuids_and_types: vec![], - skipped: 0, - total_count: 0, + ..MyRecentSwapsUuids::default() }); } diff --git a/mm2src/mm2_main/src/database/stats_nodes.rs b/mm2src/mm2_main/src/database/stats_nodes.rs index e5dfdf200c..da5de84693 100644 --- a/mm2src/mm2_main/src/database/stats_nodes.rs +++ b/mm2src/mm2_main/src/database/stats_nodes.rs @@ -1,7 +1,7 @@ /// This module contains code to work with nodes table for stats collection in MM2 SQLite DB use crate::mm2::lp_stats::{NodeInfo, NodeVersionStat}; use common::log::debug; -use db_common::sqlite::rusqlite::{params_from_iter, Connection, Error as SqlError, Result as SqlResult}; +use db_common::sqlite::rusqlite::{params_from_iter, Error as SqlError, Result as SqlResult}; use mm2_core::mm_ctx::MmArc; use std::collections::hash_map::HashMap; @@ -30,64 +30,54 @@ const SELECT_PEERS_NAMES: &str = "SELECT peer_id, name FROM nodes"; const INSERT_STAT: &str = "INSERT INTO stats_nodes (name, version, timestamp, error) VALUES (?1, ?2, ?3, ?4)"; -pub fn insert_node_info(ctx: &MmArc, node_info: &NodeInfo, db_id: Option<&str>) -> SqlResult<()> { - debug!("Inserting info about node {} to the SQLite database", node_info.name); - let params = vec![ - node_info.name.clone(), - node_info.address.clone(), - node_info.peer_id.clone(), - ]; +pub fn insert_node_info(ctx: &MmArc, node_info: NodeInfo, db_id: Option<&str>) -> SqlResult<()> { ctx.run_sql_query(db_id, move |conn| { + debug!("Inserting info about node {} to the SQLite database", node_info.name); + let params = vec![node_info.name.clone(), node_info.address.clone(), node_info.peer_id]; conn.execute(INSERT_NODE, params_from_iter(params.iter())).map(|_| ()) }) } pub fn delete_node_info(ctx: &MmArc, name: String, db_id: Option<&str>) -> SqlResult<()> { - debug!("Deleting info about node {} from the SQLite database", name); - let params = vec![name]; ctx.run_sql_query(db_id, move |conn| { + debug!("Deleting info about node {} from the SQLite database", name); + let params = vec![name]; conn.execute(DELETE_NODE, params_from_iter(params.iter())).map(|_| ()) }) } -fn select_peers_addresses_impl(conn: &Connection) -> SqlResult, SqlError> { - let mut stmt = conn.prepare(SELECT_PEERS_ADDRESSES)?; - let peers_addresses = stmt - .query_map([], |row| Ok((row.get(0)?, row.get(1)?)))? - .collect::>>()?; - Ok(peers_addresses) -} - pub fn select_peers_addresses(ctx: &MmArc, db_id: Option<&str>) -> SqlResult, SqlError> { - ctx.run_sql_query(db_id, move |conn| select_peers_addresses_impl(&conn)) -} - -fn select_peers_names_impl(conn: &Connection) -> SqlResult, SqlError> { - let mut stmt = conn.prepare(SELECT_PEERS_NAMES)?; - let peers_names = stmt - .query_map([], |row| Ok((row.get(0)?, row.get(1)?)))? - .collect::>>()?; - - Ok(peers_names) + ctx.run_sql_query(db_id, move |conn| { + let mut stmt = conn.prepare(SELECT_PEERS_ADDRESSES)?; + let peers_addresses = stmt + .query_map([], |row| Ok((row.get(0)?, row.get(1)?)))? + .collect::>>()?; + Ok(peers_addresses) + }) } pub fn select_peers_names(ctx: &MmArc, db_id: Option<&str>) -> SqlResult, SqlError> { - ctx.run_sql_query(db_id, move |conn| select_peers_names_impl(&conn)) + ctx.run_sql_query(db_id, move |conn| { + let mut stmt = conn.prepare(SELECT_PEERS_NAMES)?; + let peers_names = stmt + .query_map([], |row| Ok((row.get(0)?, row.get(1)?)))? + .collect::>>()?; + Ok(peers_names) + }) } pub fn insert_node_version_stat(ctx: &MmArc, node_version_stat: NodeVersionStat, db_id: Option<&str>) -> SqlResult<()> { - debug!( - "Inserting new version stat for node {} to the SQLite database", - node_version_stat.name - ); - let params = vec![ - node_version_stat.name, - node_version_stat.version.unwrap_or_default(), - node_version_stat.timestamp.to_string(), - node_version_stat.error.unwrap_or_default(), - ]; - ctx.run_sql_query(db_id, move |conn| { + debug!( + "Inserting new version stat for node {} to the SQLite database", + node_version_stat.name + ); + let params = vec![ + node_version_stat.name, + node_version_stat.version.unwrap_or_default(), + node_version_stat.timestamp.to_string(), + node_version_stat.error.unwrap_or_default(), + ]; conn.execute(INSERT_STAT, params_from_iter(params.iter())).map(|_| ()) }) } diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index d53dcf8417..802b700f68 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -592,7 +592,7 @@ async fn kick_start(ctx: MmArc, db_id: Option<&str>) -> MmInitResult<()> { .coins_needed_for_kick_start .lock() .map_to_mm(|poison| MmInitError::Internal(poison.to_string()))?; - // extend existing coins list needed for kickstart so even there's a new pubkey activation, the coins will added + // extend existing coins list needed for kickstart when there's a new pubkey activation lock.extend(coins_needed_for_kick_start); Ok(()) } diff --git a/mm2src/mm2_main/src/lp_stats.rs b/mm2src/mm2_main/src/lp_stats.rs index 90e692de34..734f9d700d 100644 --- a/mm2src/mm2_main/src/lp_stats.rs +++ b/mm2src/mm2_main/src/lp_stats.rs @@ -96,7 +96,7 @@ pub struct NodeVersionStat { fn insert_node_info_to_db(_ctx: &MmArc, _node_info: &NodeInfo) -> Result<(), String> { Ok(()) } #[cfg(not(target_arch = "wasm32"))] -fn insert_node_info_to_db(ctx: &MmArc, node_info: &NodeInfo, db_id: Option<&str>) -> Result<(), String> { +fn insert_node_info_to_db(ctx: &MmArc, node_info: NodeInfo, db_id: Option<&str>) -> Result<(), String> { crate::mm2::database::stats_nodes::insert_node_info(ctx, node_info, db_id).map_err(|e| e.to_string()) } @@ -168,9 +168,9 @@ pub async fn add_node_to_version_stat(ctx: MmArc, req: Json) -> NodeVersionResul .map(|db_id| { let ctx = ctx.clone(); let node_info_with_ipv4_addr = node_info_with_ipv4_addr.clone(); - let db_id = db_id.clone(); + let db_id = db_id.to_owned(); async_blocking(move || { - insert_node_info_to_db(&ctx, &node_info_with_ipv4_addr, Some(&db_id)) + insert_node_info_to_db(&ctx, node_info_with_ipv4_addr, Some(&db_id)) .map_to_mm(NodeVersionError::DatabaseError) }) }) From bf12b01a9542d634e23d055b9edde9f713058bfe Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 18 Jun 2024 18:32:53 +0100 Subject: [PATCH 150/186] minor changes --- mm2src/coins/eth.rs | 22 +++------------------- mm2src/coins/eth/v2_activation.rs | 20 ++++++++++++++++++++ mm2src/coins/utxo/utxo_common.rs | 19 +++++++++---------- 3 files changed, 32 insertions(+), 29 deletions(-) diff --git a/mm2src/coins/eth.rs b/mm2src/coins/eth.rs index 9808e2fe15..fed583173a 100644 --- a/mm2src/coins/eth.rs +++ b/mm2src/coins/eth.rs @@ -153,7 +153,7 @@ use eip1559_gas_fee::{BlocknativeGasApiCaller, FeePerGasSimpleEstimator, GasApiC use eth_hd_wallet::EthHDWallet; use eth_withdraw::{EthWithdraw, InitEthWithdraw, StandardEthWithdraw}; use nonce::ParityNonce; -use v2_activation::{build_address_and_priv_key_policy, EthActivationV2Error}; +use v2_activation::{build_address_and_priv_key_policy, eth_account_db_id, eth_shared_db_id, EthActivationV2Error}; use web3_transport::{http_transport::HttpTransportNode, Web3Transport}; /// https://github.com/artemii235/etomic-swap/blob/master/contracts/EtomicSwap.sol @@ -5728,25 +5728,9 @@ impl MmCoin for EthCoin { }; } - async fn account_db_id(&self) -> Option { - if let Some(hd_wallet) = self.derivation_method().hd_wallet() { - if let Some(addr) = hd_wallet.get_enabled_address().await { - return Some(hex::encode(dhash160(addr.pubkey().as_bytes()))); - } - } - - None - } + async fn account_db_id(&self) -> Option { eth_account_db_id(self).await } - async fn tx_history_db_id(&self) -> Option { - if let Some(hd_wallet) = self.derivation_method().hd_wallet() { - // Use the hd_wallet_rmd160 as the db_id since it's unique to a device and not tied to a single address - return Some(hex::encode(hd_wallet.hd_wallet_rmd160.as_slice())); - }; - - // Fallback to the account db_id for non-HD wallets - self.account_db_id().await - } + async fn tx_history_db_id(&self) -> Option { eth_shared_db_id(self).await.or(self.account_db_id().await) } } pub trait TryToAddress { diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 4c431a792c..0e6f63bd3c 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -960,3 +960,23 @@ async fn run_db_migraiton_for_new_eth_pubkey(ctx: &MmArc, pubkey: &KeyPair) -> M Ok(()) } + +pub(super) async fn eth_shared_db_id(coin: &EthCoin) -> Option { + // Use the hd_wallet_rmd160 as the db_id since it's unique to a device and not tied to a single address + coin.derivation_method().hd_wallet().and_then(|hd| { + shared_db_id_from_seed(&hex::encode(hd.hd_wallet_rmd160.as_slice())) + .ok() + .map(|id| hex::encode(id.as_slice())) + }) +} + +pub(super) async fn eth_account_db_id(coin: &EthCoin) -> Option { + if let Some(hd_wallet) = coin.derivation_method().hd_wallet() { + return hd_wallet + .get_enabled_address() + .await + .map(|addr| hex::encode(dhash160(addr.pubkey().as_bytes()))); + } + + None +} diff --git a/mm2src/coins/utxo/utxo_common.rs b/mm2src/coins/utxo/utxo_common.rs index ac831e5aa8..91c1c00598 100644 --- a/mm2src/coins/utxo/utxo_common.rs +++ b/mm2src/coins/utxo/utxo_common.rs @@ -5147,9 +5147,10 @@ where { if let Some(hd_wallet) = coin.derivation_method().hd_wallet() { // we can use hd_wallet_rmd160 as our shared_db_id since it's unique to a device - if let Some(addr) = hd_wallet.get_enabled_address().await { - return Some(hex::encode(addr.pubkey().address_hash().as_slice())); - } + return hd_wallet + .get_enabled_address() + .await + .map(|addr| hex::encode(addr.pubkey().address_hash().as_slice())); } None @@ -5161,13 +5162,11 @@ pub async fn tx_history_db_id(coin: &Coin) -> Option where Coin: CoinWithDerivationMethod + HDWalletCoinOps + HDCoinWithdrawOps + UtxoCommonOps, { - if let Some(hd_wallet) = coin.derivation_method().hd_wallet() { - // Use the hd_wallet_rmd160 as the db_id since it's unique to a device and not tied to a single address - return Some(hex::encode(hd_wallet.inner.hd_wallet_rmd160.as_slice())); - } - - // Fallback to the account db_id for non-HD wallets - account_db_id(coin).await + // Use the hd_wallet_rmd160 as the db_id since it's unique to a device and not tied to a single address + coin.derivation_method() + .hd_wallet() + .map(|hd| hex::encode(hd.inner.hd_wallet_rmd160.as_slice())) + .or(account_db_id(coin).await) // Fallback to the account db_id for non-HD wallets } #[test] From 54714ebd7ca7d4ae5dafa1898d87fd3ba1915fce Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 19 Jun 2024 11:54:02 +0100 Subject: [PATCH 151/186] use device rmd160 for hd_wallet storage --- mm2src/coins/eth/v2_activation.rs | 4 +--- mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs | 3 +-- mm2src/crypto/src/shared_db_id.rs | 2 +- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 0e6f63bd3c..389ac015ca 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -6,7 +6,6 @@ use crate::nft::nft_errors::{GetNftInfoError, ParseChainTypeError}; use crate::nft::nft_structs::Chain; #[cfg(target_arch = "wasm32")] use crate::EthMetamaskPolicy; use common::executor::AbortedError; -#[cfg(not(target_arch = "wasm32"))] use crypto::shared_db_id::shared_db_id_from_seed; use crypto::{trezor::TrezorError, Bip32Error, CryptoCtxError, HwError}; use enum_derives::EnumFromTrait; @@ -545,7 +544,6 @@ pub async fn eth_coin_from_conf_and_request_v2( ) => { let auth_address = key_pair.address(); let auth_address_str = display_eth_address(&auth_address); - // TODO: send migration request. build_web3_instances(ctx, ticker.to_string(), auth_address_str, key_pair, req.nodes.clone()).await? }, (EthRpcMode::Default, EthPrivKeyPolicy::Trezor) => { @@ -674,7 +672,7 @@ pub(crate) async fn build_address_and_priv_key_policy( .map_to_mm(|e| EthActivationV2Error::InternalError(e.to_string()))?; let bip39_secp_priv_key = global_hd_ctx.root_priv_key().clone(); - let hd_wallet_rmd160 = *ctx.rmd160(); + let hd_wallet_rmd160 = dhash160(activated_key.public().as_bytes()); let hd_wallet_storage = HDWalletCoinStorage::init_with_rmd160(ctx, ticker.to_string(), hd_wallet_rmd160) .await .mm_err(EthActivationV2Error::from)?; diff --git a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs index 55ae91e71e..2cf03f6174 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs @@ -219,8 +219,7 @@ pub trait UtxoFieldsWithGlobalHDBuilder: UtxoCoinBuilderCommonOps { }; let address_format = self.address_format()?; - let hd_wallet_rmd160 = *self.ctx().rmd160(); - // TODO shared_db_id + let hd_wallet_rmd160 = activated_key_pair.public().address_hash(); let hd_wallet_storage = HDWalletCoinStorage::init_with_rmd160(self.ctx(), self.ticker().to_owned(), hd_wallet_rmd160).await?; let accounts = load_hd_accounts_from_storage(&hd_wallet_storage, path_to_coin) diff --git a/mm2src/crypto/src/shared_db_id.rs b/mm2src/crypto/src/shared_db_id.rs index 8c78baaaf3..611594e26f 100644 --- a/mm2src/crypto/src/shared_db_id.rs +++ b/mm2src/crypto/src/shared_db_id.rs @@ -11,7 +11,7 @@ const SHARED_DB_MAGIC_SALT: &str = "uVa*6pcnpc9ki+VBX.6_L."; pub type SharedDbId = H160; -#[derive(Display, EnumFromStringify)] +#[derive(Debug, Display, EnumFromStringify)] pub enum SharedDbIdError { #[display(fmt = "Passphrase cannot be an empty string")] EmptyPassphrase, From 7699f55a8b8e59c593e98fd3671660031ae5200a Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 20 Jun 2024 16:50:20 +0100 Subject: [PATCH 152/186] implement account_db_id for pirate and fix othe review notes --- mm2src/coins/z_coin.rs | 11 +- mm2src/mm2_gui_storage/src/lib.rs | 1 - mm2src/mm2_gui_storage/src/rpc_commands.rs | 760 +++++++++--------- mm2src/mm2_main/src/rpc.rs | 7 +- .../mm2_main/src/rpc/dispatcher/dispatcher.rs | 10 +- .../tests/mm2_tests/gui_storage_tests.rs | 270 +++++++ .../tests/mm2_tests/mm2_tests_inner.rs | 271 ------- mm2src/mm2_test_helpers/src/for_tests.rs | 2 +- 8 files changed, 669 insertions(+), 663 deletions(-) create mode 100644 mm2src/mm2_main/tests/mm2_tests/gui_storage_tests.rs diff --git a/mm2src/coins/z_coin.rs b/mm2src/coins/z_coin.rs index 299ae53c0d..fa238186a8 100644 --- a/mm2src/coins/z_coin.rs +++ b/mm2src/coins/z_coin.rs @@ -863,13 +863,9 @@ impl<'a> UtxoCoinBuilder for ZCoinBuilder<'a> { async fn build(self) -> MmResult { let utxo = self.build_utxo_fields().await?; let utxo_arc = UtxoArc::new(utxo); - - #[cfg(target_arch = "wasm32")] let db_id = utxo_common::my_public_key(&utxo_arc) .ok() .map(|k| k.address_hash().to_string()); - #[cfg(not(target_arch = "wasm32"))] - let db_id: Option = None; let z_spending_key = match self.z_spending_key { Some(ref z_spending_key) => z_spending_key.clone(), @@ -1793,6 +1789,13 @@ impl MmCoin for ZCoin { fn on_disabled(&self) -> Result<(), AbortedError> { AbortableSystem::abort_all(&self.as_ref().abortable_system) } fn on_token_deactivated(&self, _ticker: &str) {} + + async fn account_db_id(&self) -> Option { + self.utxo_arc + .priv_key_policy + .activated_key() + .map(|activated_key| hex::encode(activated_key.public().address_hash().as_slice())) + } } #[async_trait] diff --git a/mm2src/mm2_gui_storage/src/lib.rs b/mm2src/mm2_gui_storage/src/lib.rs index 16535b3782..a158fe0dd6 100644 --- a/mm2src/mm2_gui_storage/src/lib.rs +++ b/mm2src/mm2_gui_storage/src/lib.rs @@ -1,3 +1,2 @@ pub(crate) mod account; pub(crate) mod context; -pub mod rpc_commands; diff --git a/mm2src/mm2_gui_storage/src/rpc_commands.rs b/mm2src/mm2_gui_storage/src/rpc_commands.rs index 2deb62be24..2d2d85263b 100644 --- a/mm2src/mm2_gui_storage/src/rpc_commands.rs +++ b/mm2src/mm2_gui_storage/src/rpc_commands.rs @@ -1,380 +1,380 @@ -// use crate::account::storage::AccountStorageError; -// use crate::account::{AccountId, AccountInfo, AccountWithCoins, AccountWithEnabledFlag, EnabledAccountId, -// MAX_ACCOUNT_DESCRIPTION_LENGTH, MAX_ACCOUNT_NAME_LENGTH, MAX_TICKER_LENGTH}; -// use crate::context::AccountContext; -// use common::{HttpStatusCode, StatusCode, SuccessResponse}; -// use derive_more::Display; -// use mm2_core::mm_ctx::MmArc; -// use mm2_err_handle::prelude::*; -// use mm2_number::BigDecimal; -// use ser_error_derive::SerializeErrorType; -// use serde::{Deserialize, Serialize}; -// use std::collections::BTreeSet; -// -// #[derive(Display, Serialize, SerializeErrorType)] -// #[serde(tag = "error_type", content = "error_data")] -// pub enum AccountRpcError { -// #[display(fmt = "Account name is too long, expected shorter or equal to {}", max_len)] -// NameTooLong { max_len: usize }, -// #[display(fmt = "Account description is too long, expected shorter or equal to {}", max_len)] -// DescriptionTooLong { max_len: usize }, -// #[display(fmt = "Coin ticker is too long, expected shorter or equal to {}", max_len)] -// TickerTooLong { max_len: usize }, -// #[display(fmt = "No such account {:?}", _0)] -// NoSuchAccount(AccountId), -// #[display(fmt = "No enabled account yet. Consider using 'enable_account' RPC")] -// NoEnabledAccount, -// #[display(fmt = "Account {:?} exists already", _0)] -// AccountExistsAlready(AccountId), -// #[display(fmt = "Error loading account: {}", _0)] -// ErrorLoadingAccount(String), -// #[display(fmt = "Error saving changes in accounts storage: {}", _0)] -// ErrorSavingAccount(String), -// #[display(fmt = "Internal error: {}", _0)] -// Internal(String), -// } -// -// impl From for AccountRpcError { -// fn from(e: AccountStorageError) -> Self { -// match e { -// AccountStorageError::NoSuchAccount(account_id) => AccountRpcError::NoSuchAccount(account_id), -// AccountStorageError::NoEnabledAccount => AccountRpcError::NoEnabledAccount, -// AccountStorageError::AccountExistsAlready(account_id) => AccountRpcError::AccountExistsAlready(account_id), -// AccountStorageError::ErrorDeserializing(e) | AccountStorageError::ErrorLoading(e) => { -// AccountRpcError::ErrorLoadingAccount(e) -// }, -// AccountStorageError::ErrorSaving(e) | AccountStorageError::ErrorSerializing(e) => { -// AccountRpcError::ErrorSavingAccount(e) -// }, -// AccountStorageError::Internal(internal) => AccountRpcError::Internal(internal), -// } -// } -// } -// -// impl HttpStatusCode for AccountRpcError { -// fn status_code(&self) -> StatusCode { -// match self { -// AccountRpcError::NameTooLong { .. } -// | AccountRpcError::DescriptionTooLong { .. } -// | AccountRpcError::TickerTooLong { .. } -// | AccountRpcError::NoSuchAccount(_) -// | AccountRpcError::NoEnabledAccount -// | AccountRpcError::AccountExistsAlready(_) => StatusCode::BAD_REQUEST, -// AccountRpcError::ErrorLoadingAccount(_) -// | AccountRpcError::ErrorSavingAccount(_) -// | AccountRpcError::Internal(_) => StatusCode::INTERNAL_SERVER_ERROR, -// } -// } -// } -// -// #[derive(Deserialize)] -// pub struct NewAccount { -// account_id: Id, -// name: String, -// #[serde(default)] -// description: String, -// #[serde(default)] -// balance_usd: BigDecimal, -// } -// -// impl From> for AccountInfo -// where -// AccountId: From, -// { -// fn from(orig: NewAccount) -> Self { -// AccountInfo { -// account_id: AccountId::from(orig.account_id), -// name: orig.name, -// description: orig.description, -// balance_usd: orig.balance_usd, -// } -// } -// } -// -// #[derive(Deserialize)] -// pub struct EnableAccountRequest { -// #[serde(flatten)] -// policy: EnableAccountPolicy, -// } -// -// #[derive(Deserialize)] -// #[serde(tag = "policy")] -// #[serde(rename_all = "snake_case")] -// pub enum EnableAccountPolicy { -// Existing(EnabledAccountId), -// New(NewAccount), -// } -// -// #[derive(Deserialize)] -// pub struct AddAccountRequest { -// #[serde(flatten)] -// account: NewAccount, -// } -// -// #[derive(Deserialize)] -// pub struct DeleteAccountRequest { -// account_id: AccountId, -// } -// -// #[derive(Deserialize)] -// pub struct SetAccountNameRequest { -// account_id: AccountId, -// name: String, -// } -// -// #[derive(Deserialize)] -// pub struct SetAccountDescriptionRequest { -// account_id: AccountId, -// description: String, -// } -// -// #[derive(Deserialize)] -// pub struct CoinRequest { -// account_id: AccountId, -// tickers: Vec, -// } -// -// #[derive(Deserialize)] -// pub struct GetAccountsRequest; -// -// #[derive(Deserialize)] -// pub struct GetAccountCoinsRequest { -// account_id: AccountId, -// } -// -// #[derive(Serialize)] -// pub struct GetAccountCoinsResponse { -// account_id: AccountId, -// coins: BTreeSet, -// } -// -// #[derive(Deserialize)] -// pub struct GetEnabledAccountRequest; -// -// #[derive(Deserialize)] -// pub struct SetBalanceRequest { -// account_id: AccountId, -// balance_usd: BigDecimal, -// } -// -// /// Sets the given account as an enabled (current active account). -// /// The behaviour depends on [`EnableAccountRequest::policy`]: -// /// * [`EnableAccountPolicy::Known`] => -// /// 1) Checks whether the given account exists in the storage. -// /// Returns [`AccountRpcError::NoSuchAccount`] if there is no account with the given `AccountId`; -// /// 2) Sets the account as an enabled. -// /// * [`EnableAccountPolicy::New`] => -// /// 1) Tries to upload the given account info to the storage. -// /// Returns [`AccountRpcError::AccountExistsAlready`] if there is an account with the same `AccountId` already; -// /// 2) Sets the account as an enabled. -// /// -// /// # Important -// /// -// /// This RPC affects the storage **only**. It doesn't affect MarketMaker. -// pub async fn enable_account(ctx: MmArc, req: EnableAccountRequest) -> MmResult { -// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; -// let account_id = match req.policy { -// EnableAccountPolicy::Existing(account_id) => account_id, -// EnableAccountPolicy::New(new_account) => { -// let account_id = new_account.account_id; -// account_ctx -// .storage() -// .await? -// .upload_account(AccountInfo::from(new_account)) -// .await?; -// account_id -// } -// }; -// account_ctx.storage().await?.enable_account(account_id).await?; -// Ok(SuccessResponse::new()) -// } -// -// /// Adds the given [`AddAccountRequest::account`] to the storage. -// /// Returns [`AccountRpcError::AccountExistsAlready`] if there is an account with the same `AccountId` already. -// /// -// /// # Important -// /// -// /// This RPC affects the storage **only**. It doesn't affect MarketMaker. -// pub async fn add_account(ctx: MmArc, req: AddAccountRequest) -> MmResult { -// validate_new_account(&req.account)?; -// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; -// account_ctx -// .storage() -// .await? -// .upload_account(AccountInfo::from(req.account)) -// .await?; -// Ok(SuccessResponse::new()) -// } -// -// /// Deletes the given [`AddAccountRequest::account_id`] account from the storage. -// /// Returns [`AccountRpcError::NoSuchAccount`] if there is no account with the same `AccountId`. -// /// -// /// # Important -// /// -// /// This RPC affects the storage **only**. It doesn't affect MarketMaker. -// pub async fn delete_account(ctx: MmArc, req: DeleteAccountRequest) -> MmResult { -// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; -// account_ctx.storage().await?.delete_account(req.account_id).await?; -// Ok(SuccessResponse::new()) -// } -// -// /// Loads accounts from the storage and marks one account as enabled **only**. -// /// If no account has been enabled yet, this RPC returns [`AccountRpcError::NoEnabledAccount`] error. -// /// -// /// # Note -// /// -// /// The returned accounts are sorted by `AccountId`. -// pub async fn get_accounts( -// ctx: MmArc, -// _req: GetAccountsRequest, -// ) -> MmResult, AccountRpcError> { -// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; -// let accounts = account_ctx -// .storage() -// .await? -// .load_accounts_with_enabled_flag() -// .await? -// // The given `BTreeMap` accounts are sorted by `AccountId`. -// .into_values() -// .collect(); -// Ok(accounts) -// } -// -// /// Loads activated coins of the given `account_id` from the storage. -// /// -// /// # Note -// /// -// /// The returned coins are sorted. -// pub async fn get_account_coins( -// ctx: MmArc, -// req: GetAccountCoinsRequest, -// ) -> MmResult { -// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; -// let coins = account_ctx -// .storage() -// .await? -// .load_account_coins(req.account_id.clone()) -// .await?; -// Ok(GetAccountCoinsResponse { -// account_id: req.account_id, -// coins, -// }) -// } -// -// /// Loads an enabled account with activated coins from the storage. -// /// If no account has been enabled yet, this RPC returns [`AccountRpcError::NoEnabledAccount`] error. -// /// -// /// # Note -// /// -// /// The account coins are sorted. -// pub async fn get_enabled_account( -// ctx: MmArc, -// _req: GetEnabledAccountRequest, -// ) -> MmResult { -// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; -// let account = account_ctx.storage().await?.load_enabled_account_with_coins().await?; -// Ok(account) -// } -// -// /// Sets the account name. -// pub async fn set_account_name(ctx: MmArc, req: SetAccountNameRequest) -> MmResult { -// validate_account_name(&req.name)?; -// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; -// account_ctx.storage().await?.set_name(req.account_id, req.name).await?; -// Ok(SuccessResponse::new()) -// } -// -// /// Sets the account description. -// pub async fn set_account_description( -// ctx: MmArc, -// req: SetAccountDescriptionRequest, -// ) -> MmResult { -// validate_account_desc(&req.description)?; -// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; -// account_ctx -// .storage() -// .await? -// .set_description(req.account_id, req.description) -// .await?; -// Ok(SuccessResponse::new()) -// } -// -// /// Sets the account USD balance. -// /// -// /// # Important -// /// -// /// This RPC affects the storage **only**. It doesn't affect MarketMaker. -// pub async fn set_account_balance(ctx: MmArc, req: SetBalanceRequest) -> MmResult { -// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; -// account_ctx -// .storage() -// .await? -// .set_balance(req.account_id, req.balance_usd) -// .await?; -// Ok(SuccessResponse::new()) -// } -// -// /// Activates the given [`CoinRequest::tickers`] for the specified [`CoinRequest::account_id`] account. -// /// -// /// # Important -// /// -// /// This RPC affects the storage **only**. It doesn't affect MarketMaker. -// pub async fn activate_coins(ctx: MmArc, req: CoinRequest) -> MmResult { -// validate_tickers(&req.tickers)?; -// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; -// account_ctx -// .storage() -// .await? -// .activate_coins(req.account_id, req.tickers) -// .await?; -// Ok(SuccessResponse::new()) -// } -// -// /// Deactivates the given [`CoinRequest::tickers`] for the specified [`CoinRequest::account_id`] account. -// /// -// /// # Important -// /// -// /// This RPC affects the storage **only**. It doesn't affect MarketMaker. -// pub async fn deactivate_coins(ctx: MmArc, req: CoinRequest) -> MmResult { -// let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; -// account_ctx -// .storage() -// .await? -// .deactivate_coins(req.account_id, req.tickers) -// .await?; -// Ok(SuccessResponse::new()) -// } -// -// fn validate_new_account(account: &NewAccount) -> MmResult<(), AccountRpcError> { -// validate_account_name(&account.name)?; -// validate_account_desc(&account.description) -// } -// -// fn validate_account_name(name: &str) -> MmResult<(), AccountRpcError> { -// if name.len() > MAX_ACCOUNT_NAME_LENGTH { -// return MmError::err(AccountRpcError::NameTooLong { -// max_len: MAX_ACCOUNT_NAME_LENGTH, -// }); -// } -// Ok(()) -// } -// -// fn validate_account_desc(description: &str) -> MmResult<(), AccountRpcError> { -// if description.len() > MAX_ACCOUNT_DESCRIPTION_LENGTH { -// return MmError::err(AccountRpcError::DescriptionTooLong { -// max_len: MAX_ACCOUNT_NAME_LENGTH, -// }); -// } -// Ok(()) -// } -// -// fn validate_tickers(tickers: &[String]) -> MmResult<(), AccountRpcError> { -// for ticker in tickers { -// if ticker.len() > MAX_TICKER_LENGTH { -// return MmError::err(AccountRpcError::TickerTooLong { -// max_len: MAX_TICKER_LENGTH, -// }); -// } -// } -// Ok(()) -// } +use crate::account::storage::AccountStorageError; +use crate::account::{AccountId, AccountInfo, AccountWithCoins, AccountWithEnabledFlag, EnabledAccountId, + MAX_ACCOUNT_DESCRIPTION_LENGTH, MAX_ACCOUNT_NAME_LENGTH, MAX_TICKER_LENGTH}; +use crate::context::AccountContext; +use common::{HttpStatusCode, StatusCode, SuccessResponse}; +use derive_more::Display; +use mm2_core::mm_ctx::MmArc; +use mm2_err_handle::prelude::*; +use mm2_number::BigDecimal; +use ser_error_derive::SerializeErrorType; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeSet; + +#[derive(Display, Serialize, SerializeErrorType)] +#[serde(tag = "error_type", content = "error_data")] +pub enum AccountRpcError { + #[display(fmt = "Account name is too long, expected shorter or equal to {}", max_len)] + NameTooLong { max_len: usize }, + #[display(fmt = "Account description is too long, expected shorter or equal to {}", max_len)] + DescriptionTooLong { max_len: usize }, + #[display(fmt = "Coin ticker is too long, expected shorter or equal to {}", max_len)] + TickerTooLong { max_len: usize }, + #[display(fmt = "No such account {:?}", _0)] + NoSuchAccount(AccountId), + #[display(fmt = "No enabled account yet. Consider using 'enable_account' RPC")] + NoEnabledAccount, + #[display(fmt = "Account {:?} exists already", _0)] + AccountExistsAlready(AccountId), + #[display(fmt = "Error loading account: {}", _0)] + ErrorLoadingAccount(String), + #[display(fmt = "Error saving changes in accounts storage: {}", _0)] + ErrorSavingAccount(String), + #[display(fmt = "Internal error: {}", _0)] + Internal(String), +} + +impl From for AccountRpcError { + fn from(e: AccountStorageError) -> Self { + match e { + AccountStorageError::NoSuchAccount(account_id) => AccountRpcError::NoSuchAccount(account_id), + AccountStorageError::NoEnabledAccount => AccountRpcError::NoEnabledAccount, + AccountStorageError::AccountExistsAlready(account_id) => AccountRpcError::AccountExistsAlready(account_id), + AccountStorageError::ErrorDeserializing(e) | AccountStorageError::ErrorLoading(e) => { + AccountRpcError::ErrorLoadingAccount(e) + }, + AccountStorageError::ErrorSaving(e) | AccountStorageError::ErrorSerializing(e) => { + AccountRpcError::ErrorSavingAccount(e) + }, + AccountStorageError::Internal(internal) => AccountRpcError::Internal(internal), + } + } +} + +impl HttpStatusCode for AccountRpcError { + fn status_code(&self) -> StatusCode { + match self { + AccountRpcError::NameTooLong { .. } + | AccountRpcError::DescriptionTooLong { .. } + | AccountRpcError::TickerTooLong { .. } + | AccountRpcError::NoSuchAccount(_) + | AccountRpcError::NoEnabledAccount + | AccountRpcError::AccountExistsAlready(_) => StatusCode::BAD_REQUEST, + AccountRpcError::ErrorLoadingAccount(_) + | AccountRpcError::ErrorSavingAccount(_) + | AccountRpcError::Internal(_) => StatusCode::INTERNAL_SERVER_ERROR, + } + } +} + +#[derive(Deserialize)] +pub struct NewAccount { + account_id: Id, + name: String, + #[serde(default)] + description: String, + #[serde(default)] + balance_usd: BigDecimal, +} + +impl From> for AccountInfo +where + AccountId: From, +{ + fn from(orig: NewAccount) -> Self { + AccountInfo { + account_id: AccountId::from(orig.account_id), + name: orig.name, + description: orig.description, + balance_usd: orig.balance_usd, + } + } +} + +#[derive(Deserialize)] +pub struct EnableAccountRequest { + #[serde(flatten)] + policy: EnableAccountPolicy, +} + +#[derive(Deserialize)] +#[serde(tag = "policy")] +#[serde(rename_all = "snake_case")] +pub enum EnableAccountPolicy { + Existing(EnabledAccountId), + New(NewAccount), +} + +#[derive(Deserialize)] +pub struct AddAccountRequest { + #[serde(flatten)] + account: NewAccount, +} + +#[derive(Deserialize)] +pub struct DeleteAccountRequest { + account_id: AccountId, +} + +#[derive(Deserialize)] +pub struct SetAccountNameRequest { + account_id: AccountId, + name: String, +} + +#[derive(Deserialize)] +pub struct SetAccountDescriptionRequest { + account_id: AccountId, + description: String, +} + +#[derive(Deserialize)] +pub struct CoinRequest { + account_id: AccountId, + tickers: Vec, +} + +#[derive(Deserialize)] +pub struct GetAccountsRequest; + +#[derive(Deserialize)] +pub struct GetAccountCoinsRequest { + account_id: AccountId, +} + +#[derive(Serialize)] +pub struct GetAccountCoinsResponse { + account_id: AccountId, + coins: BTreeSet, +} + +#[derive(Deserialize)] +pub struct GetEnabledAccountRequest; + +#[derive(Deserialize)] +pub struct SetBalanceRequest { + account_id: AccountId, + balance_usd: BigDecimal, +} + +/// Sets the given account as an enabled (current active account). +/// The behaviour depends on [`EnableAccountRequest::policy`]: +/// * [`EnableAccountPolicy::Known`] => +/// 1) Checks whether the given account exists in the storage. +/// Returns [`AccountRpcError::NoSuchAccount`] if there is no account with the given `AccountId`; +/// 2) Sets the account as an enabled. +/// * [`EnableAccountPolicy::New`] => +/// 1) Tries to upload the given account info to the storage. +/// Returns [`AccountRpcError::AccountExistsAlready`] if there is an account with the same `AccountId` already; +/// 2) Sets the account as an enabled. +/// +/// # Important +/// +/// This RPC affects the storage **only**. It doesn't affect MarketMaker. +pub async fn enable_account(ctx: MmArc, req: EnableAccountRequest) -> MmResult { + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; + let account_id = match req.policy { + EnableAccountPolicy::Existing(account_id) => account_id, + EnableAccountPolicy::New(new_account) => { + let account_id = new_account.account_id; + account_ctx + .storage() + .await? + .upload_account(AccountInfo::from(new_account)) + .await?; + account_id + }, + }; + account_ctx.storage().await?.enable_account(account_id).await?; + Ok(SuccessResponse::new()) +} + +/// Adds the given [`AddAccountRequest::account`] to the storage. +/// Returns [`AccountRpcError::AccountExistsAlready`] if there is an account with the same `AccountId` already. +/// +/// # Important +/// +/// This RPC affects the storage **only**. It doesn't affect MarketMaker. +pub async fn add_account(ctx: MmArc, req: AddAccountRequest) -> MmResult { + validate_new_account(&req.account)?; + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; + account_ctx + .storage() + .await? + .upload_account(AccountInfo::from(req.account)) + .await?; + Ok(SuccessResponse::new()) +} + +/// Deletes the given [`AddAccountRequest::account_id`] account from the storage. +/// Returns [`AccountRpcError::NoSuchAccount`] if there is no account with the same `AccountId`. +/// +/// # Important +/// +/// This RPC affects the storage **only**. It doesn't affect MarketMaker. +pub async fn delete_account(ctx: MmArc, req: DeleteAccountRequest) -> MmResult { + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; + account_ctx.storage().await?.delete_account(req.account_id).await?; + Ok(SuccessResponse::new()) +} + +/// Loads accounts from the storage and marks one account as enabled **only**. +/// If no account has been enabled yet, this RPC returns [`AccountRpcError::NoEnabledAccount`] error. +/// +/// # Note +/// +/// The returned accounts are sorted by `AccountId`. +pub async fn get_accounts( + ctx: MmArc, + _req: GetAccountsRequest, +) -> MmResult, AccountRpcError> { + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; + let accounts = account_ctx + .storage() + .await? + .load_accounts_with_enabled_flag() + .await? + // The given `BTreeMap` accounts are sorted by `AccountId`. + .into_values() + .collect(); + Ok(accounts) +} + +/// Loads activated coins of the given `account_id` from the storage. +/// +/// # Note +/// +/// The returned coins are sorted. +pub async fn get_account_coins( + ctx: MmArc, + req: GetAccountCoinsRequest, +) -> MmResult { + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; + let coins = account_ctx + .storage() + .await? + .load_account_coins(req.account_id.clone()) + .await?; + Ok(GetAccountCoinsResponse { + account_id: req.account_id, + coins, + }) +} + +/// Loads an enabled account with activated coins from the storage. +/// If no account has been enabled yet, this RPC returns [`AccountRpcError::NoEnabledAccount`] error. +/// +/// # Note +/// +/// The account coins are sorted. +pub async fn get_enabled_account( + ctx: MmArc, + _req: GetEnabledAccountRequest, +) -> MmResult { + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; + let account = account_ctx.storage().await?.load_enabled_account_with_coins().await?; + Ok(account) +} + +/// Sets the account name. +pub async fn set_account_name(ctx: MmArc, req: SetAccountNameRequest) -> MmResult { + validate_account_name(&req.name)?; + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; + account_ctx.storage().await?.set_name(req.account_id, req.name).await?; + Ok(SuccessResponse::new()) +} + +/// Sets the account description. +pub async fn set_account_description( + ctx: MmArc, + req: SetAccountDescriptionRequest, +) -> MmResult { + validate_account_desc(&req.description)?; + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; + account_ctx + .storage() + .await? + .set_description(req.account_id, req.description) + .await?; + Ok(SuccessResponse::new()) +} + +/// Sets the account USD balance. +/// +/// # Important +/// +/// This RPC affects the storage **only**. It doesn't affect MarketMaker. +pub async fn set_account_balance(ctx: MmArc, req: SetBalanceRequest) -> MmResult { + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; + account_ctx + .storage() + .await? + .set_balance(req.account_id, req.balance_usd) + .await?; + Ok(SuccessResponse::new()) +} + +/// Activates the given [`CoinRequest::tickers`] for the specified [`CoinRequest::account_id`] account. +/// +/// # Important +/// +/// This RPC affects the storage **only**. It doesn't affect MarketMaker. +pub async fn activate_coins(ctx: MmArc, req: CoinRequest) -> MmResult { + validate_tickers(&req.tickers)?; + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; + account_ctx + .storage() + .await? + .activate_coins(req.account_id, req.tickers) + .await?; + Ok(SuccessResponse::new()) +} + +/// Deactivates the given [`CoinRequest::tickers`] for the specified [`CoinRequest::account_id`] account. +/// +/// # Important +/// +/// This RPC affects the storage **only**. It doesn't affect MarketMaker. +pub async fn deactivate_coins(ctx: MmArc, req: CoinRequest) -> MmResult { + let account_ctx = AccountContext::from_ctx(&ctx, None).map_to_mm(AccountRpcError::Internal)?; + account_ctx + .storage() + .await? + .deactivate_coins(req.account_id, req.tickers) + .await?; + Ok(SuccessResponse::new()) +} + +fn validate_new_account(account: &NewAccount) -> MmResult<(), AccountRpcError> { + validate_account_name(&account.name)?; + validate_account_desc(&account.description) +} + +fn validate_account_name(name: &str) -> MmResult<(), AccountRpcError> { + if name.len() > MAX_ACCOUNT_NAME_LENGTH { + return MmError::err(AccountRpcError::NameTooLong { + max_len: MAX_ACCOUNT_NAME_LENGTH, + }); + } + Ok(()) +} + +fn validate_account_desc(description: &str) -> MmResult<(), AccountRpcError> { + if description.len() > MAX_ACCOUNT_DESCRIPTION_LENGTH { + return MmError::err(AccountRpcError::DescriptionTooLong { + max_len: MAX_ACCOUNT_NAME_LENGTH, + }); + } + Ok(()) +} + +fn validate_tickers(tickers: &[String]) -> MmResult<(), AccountRpcError> { + for ticker in tickers { + if ticker.len() > MAX_TICKER_LENGTH { + return MmError::err(AccountRpcError::TickerTooLong { + max_len: MAX_TICKER_LENGTH, + }); + } + } + Ok(()) +} diff --git a/mm2src/mm2_main/src/rpc.rs b/mm2src/mm2_main/src/rpc.rs index 92422738f9..88181bd651 100644 --- a/mm2src/mm2_main/src/rpc.rs +++ b/mm2src/mm2_main/src/rpc.rs @@ -51,7 +51,7 @@ mod dispatcher_legacy; pub mod lp_commands_legacy; #[path = "rpc/rate_limiter.rs"] mod rate_limiter; -/// Lists the RPC method not requiring the "userpass" authentication. +/// Lists the RPC method not requiring the "userpass" authentication. /// None is also public to skip auth and display proper error in case of method is missing const PUBLIC_METHODS: &[Option<&str>] = &[ // Sorted alphanumerically (on the first letter) for readability. @@ -93,6 +93,8 @@ pub enum DispatcherError { UserpassIsInvalid(RateLimitError), #[display(fmt = "Error parsing mmrpc version: {}", _0)] InvalidMmRpcVersion(String), + #[display(fmt = "Not allowed rpc: {}", _0)] + DisabledNameSpace(String), } impl HttpStatusCode for DispatcherError { @@ -104,7 +106,8 @@ impl HttpStatusCode for DispatcherError { DispatcherError::LocalHostOnly | DispatcherError::UserpassIsNotSet | DispatcherError::UserpassIsInvalid(_) - | DispatcherError::Banned => StatusCode::FORBIDDEN, + | DispatcherError::Banned + | DispatcherError::DisabledNameSpace(_) => StatusCode::FORBIDDEN, } } } diff --git a/mm2src/mm2_main/src/rpc/dispatcher/dispatcher.rs b/mm2src/mm2_main/src/rpc/dispatcher/dispatcher.rs index 1075acacaf..433d4c8b9f 100644 --- a/mm2src/mm2_main/src/rpc/dispatcher/dispatcher.rs +++ b/mm2src/mm2_main/src/rpc/dispatcher/dispatcher.rs @@ -149,10 +149,12 @@ async fn dispatcher_v2(request: MmRpcRequest, ctx: MmArc) -> DispatcherResult> = json::from_str(&resp.1).unwrap(); + let expected = vec![ + gui_storage::AccountWithEnabledFlag { + account_id: gui_storage::AccountId::Iguana, + name: "New Iguana account name".to_string(), + description: "Another description".to_string(), + balance_usd: BigDecimal::from(0i32), + enabled: true, + }, + gui_storage::AccountWithEnabledFlag { + account_id: gui_storage::AccountId::HW { + device_pubkey: "1549128bbfb33b997949b4105b6a6371c998e212".to_string(), + }, + name: "My HW".to_string(), + description: "Any description".to_string(), + balance_usd: BigDecimal::from(123567i32) / BigDecimal::from(1000i32), + enabled: false, + }, + ]; + assert_eq!(actual.result, expected); +} + +#[test] +#[cfg(not(target_arch = "wasm32"))] +fn test_gui_storage_coins_functionality() { + let passphrase = "test_gui_storage passphrase"; + + let conf = Mm2TestConf::seednode(passphrase, &json!([])); + let mm = block_on(MarketMakerIt::start_async(conf.conf, conf.rpc_password, None)).unwrap(); + let (_bob_dump_log, _bob_dump_dashboard) = mm.mm_dump(); + log!("Log path: {}", mm.log_path.display()); + + let resp = block_on(mm.rpc(&json!({ + "userpass": mm.userpass, + "mmrpc": "2.0", + "method": "gui_storage::enable_account", + "params": { + "policy": "new", + "account_id": { + "type": "iguana" + }, + "name": "My Iguana wallet", + }, + }))) + .unwrap(); + assert!(resp.0.is_success(), "!gui_storage::enable_account: {}", resp.1); + + let resp = block_on(mm.rpc(&json!({ + "userpass": mm.userpass, + "mmrpc": "2.0", + "method": "gui_storage::add_account", + "params": { + "account_id": { + "type": "hw", + "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" + }, + "description": "Any description", + "name": "My HW", + }, + }))) + .unwrap(); + assert!(resp.0.is_success(), "!gui_storage::add_account: {}", resp.1); + + let resp = block_on(mm.rpc(&json!({ + "userpass": mm.userpass, + "mmrpc": "2.0", + "method": "gui_storage::activate_coins", + "params": { + "account_id": { + "type": "iguana" + }, + "tickers": ["RICK", "MORTY", "KMD"], + }, + }))) + .unwrap(); + assert!(resp.0.is_success(), "!gui_storage::activate_coins: {}", resp.1); + + let resp = block_on(mm.rpc(&json!({ + "userpass": mm.userpass, + "mmrpc": "2.0", + "method": "gui_storage::activate_coins", + "params": { + "account_id": { + "type": "hw", + "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" + }, + "tickers": ["KMD", "MORTY", "BCH"], + }, + }))) + .unwrap(); + assert!(resp.0.is_success(), "!gui_storage::activate_coins: {}", resp.1); + + let resp = block_on(mm.rpc(&json!({ + "userpass": mm.userpass, + "mmrpc": "2.0", + "method": "gui_storage::deactivate_coins", + "params": { + "account_id": { + "type": "hw", + "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" + }, + "tickers": ["BTC", "MORTY"], + }, + }))) + .unwrap(); + assert!(resp.0.is_success(), "!gui_storage::deactivate_coins: {}", resp.1); + + let resp = block_on(mm.rpc(&json!({ + "userpass": mm.userpass, + "mmrpc": "2.0", + "method": "gui_storage::get_enabled_account", + }))) + .unwrap(); + assert!(resp.0.is_success(), "!gui_storage::get_enabled_account: {}", resp.1); + let actual: RpcV2Response = json::from_str(&resp.1).unwrap(); + let expected = gui_storage::AccountWithCoins { + account_id: gui_storage::AccountId::Iguana, + name: "My Iguana wallet".to_string(), + description: String::new(), + balance_usd: BigDecimal::from(0i32), + coins: vec!["RICK".to_string(), "MORTY".to_string(), "KMD".to_string()] + .into_iter() + .collect(), + }; + assert_eq!(actual.result, expected); + + let resp = block_on(mm.rpc(&json!({ + "userpass": mm.userpass, + "mmrpc": "2.0", + "method": "gui_storage::get_account_coins", + "params": { + "account_id": { + "type": "hw", + "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" + } + } + }))) + .unwrap(); + assert!(resp.0.is_success(), "!gui_storage::get_enabled_account: {}", resp.1); + let actual: RpcV2Response = json::from_str(&resp.1).unwrap(); + let expected = gui_storage::AccountCoins { + account_id: gui_storage::AccountId::HW { + device_pubkey: "1549128bbfb33b997949b4105b6a6371c998e212".to_string(), + }, + coins: vec!["KMD".to_string(), "BCH".to_string()].into_iter().collect(), + }; + assert_eq!(actual.result, expected); +} diff --git a/mm2src/mm2_main/tests/mm2_tests/mm2_tests_inner.rs b/mm2src/mm2_main/tests/mm2_tests/mm2_tests_inner.rs index 2015abc7e5..89970b4f94 100644 --- a/mm2src/mm2_main/tests/mm2_tests/mm2_tests_inner.rs +++ b/mm2src/mm2_main/tests/mm2_tests/mm2_tests_inner.rs @@ -5266,277 +5266,6 @@ fn test_no_login() { assert!(version.0.is_success(), "!version: {}", version.1); } -// #[test] -// #[cfg(not(target_arch = "wasm32"))] -// fn test_gui_storage_accounts_functionality() { -// let passphrase = "test_gui_storage passphrase"; - -// let conf = Mm2TestConf::seednode(passphrase, &json!([])); -// let mm = block_on(MarketMakerIt::start_async(conf.conf, conf.rpc_password, None)).unwrap(); -// let (_bob_dump_log, _bob_dump_dashboard) = mm.mm_dump(); -// log!("Log path: {}", mm.log_path.display()); - -// let resp = block_on(mm.rpc(&json!({ -// "userpass": mm.userpass, -// "mmrpc": "2.0", -// "method": "gui_storage::enable_account", -// "params": { -// "policy": "new", -// "account_id": { -// "type": "iguana" -// }, -// "name": "My Iguana wallet", -// }, -// }))) -// .unwrap(); -// assert!(resp.0.is_success(), "!gui_storage::enable_account: {}", resp.1); - -// let resp = block_on(mm.rpc(&json!({ -// "userpass": mm.userpass, -// "mmrpc": "2.0", -// "method": "gui_storage::add_account", -// "params": { -// "account_id": { -// "type": "hw", -// "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" -// }, -// "description": "Any description", -// "name": "My HW", -// }, -// }))) -// .unwrap(); -// assert!(resp.0.is_success(), "!gui_storage::add_account: {}", resp.1); - -// // Add `HD{1}` account that will be deleted later. -// let resp = block_on(mm.rpc(&json!({ -// "userpass": mm.userpass, -// "mmrpc": "2.0", -// "method": "gui_storage::add_account", -// "params": { -// "account_id": { -// "type": "hd", -// "account_idx": 1, -// }, -// "name": "An HD account" -// }, -// }))) -// .unwrap(); -// assert!(resp.0.is_success(), "!gui_storage::add_account: {}", resp.1); - -// let resp = block_on(mm.rpc(&json!({ -// "userpass": mm.userpass, -// "mmrpc": "2.0", -// "method": "gui_storage::delete_account", -// "params": { -// "account_id": { -// "type": "hd", -// "account_idx": 1, -// } -// }, -// }))) -// .unwrap(); -// assert!(resp.0.is_success(), "!gui_storage::delete_account: {}", resp.1); - -// let resp = block_on(mm.rpc(&json!({ -// "userpass": mm.userpass, -// "mmrpc": "2.0", -// "method": "gui_storage::set_account_balance", -// "params": { -// "account_id": { -// "type": "hw", -// "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" -// }, -// "balance_usd": "123.567", -// }, -// }))) -// .unwrap(); -// assert!(resp.0.is_success(), "!gui_storage::set_account_balance: {}", resp.1); - -// let resp = block_on(mm.rpc(&json!({ -// "userpass": mm.userpass, -// "mmrpc": "2.0", -// "method": "gui_storage::set_account_name", -// "params": { -// "account_id": { -// "type": "iguana" -// }, -// "name": "New Iguana account name", -// }, -// }))) -// .unwrap(); -// assert!(resp.0.is_success(), "!gui_storage::set_account_name: {}", resp.1); - -// let resp = block_on(mm.rpc(&json!({ -// "userpass": mm.userpass, -// "mmrpc": "2.0", -// "method": "gui_storage::set_account_description", -// "params": { -// "account_id": { -// "type": "iguana" -// }, -// "description": "Another description", -// }, -// }))) -// .unwrap(); -// assert!(resp.0.is_success(), "!gui_storage::set_account_description: {}", resp.1); - -// let resp = block_on(mm.rpc(&json!({ -// "userpass": mm.userpass, -// "mmrpc": "2.0", -// "method": "gui_storage::get_accounts" -// }))) -// .unwrap(); -// assert!(resp.0.is_success(), "!gui_storage::get_accounts: {}", resp.1); - -// let actual: RpcV2Response> = json::from_str(&resp.1).unwrap(); -// let expected = vec![ -// gui_storage::AccountWithEnabledFlag { -// account_id: gui_storage::AccountId::Iguana, -// name: "New Iguana account name".to_string(), -// description: "Another description".to_string(), -// balance_usd: BigDecimal::from(0i32), -// enabled: true, -// }, -// gui_storage::AccountWithEnabledFlag { -// account_id: gui_storage::AccountId::HW { -// device_pubkey: "1549128bbfb33b997949b4105b6a6371c998e212".to_string(), -// }, -// name: "My HW".to_string(), -// description: "Any description".to_string(), -// balance_usd: BigDecimal::from(123567i32) / BigDecimal::from(1000i32), -// enabled: false, -// }, -// ]; -// assert_eq!(actual.result, expected); -// } - -// #[test] -// #[cfg(not(target_arch = "wasm32"))] -// fn test_gui_storage_coins_functionality() { -// let passphrase = "test_gui_storage passphrase"; - -// let conf = Mm2TestConf::seednode(passphrase, &json!([])); -// let mm = block_on(MarketMakerIt::start_async(conf.conf, conf.rpc_password, None)).unwrap(); -// let (_bob_dump_log, _bob_dump_dashboard) = mm.mm_dump(); -// log!("Log path: {}", mm.log_path.display()); - -// let resp = block_on(mm.rpc(&json!({ -// "userpass": mm.userpass, -// "mmrpc": "2.0", -// "method": "gui_storage::enable_account", -// "params": { -// "policy": "new", -// "account_id": { -// "type": "iguana" -// }, -// "name": "My Iguana wallet", -// }, -// }))) -// .unwrap(); -// assert!(resp.0.is_success(), "!gui_storage::enable_account: {}", resp.1); - -// let resp = block_on(mm.rpc(&json!({ -// "userpass": mm.userpass, -// "mmrpc": "2.0", -// "method": "gui_storage::add_account", -// "params": { -// "account_id": { -// "type": "hw", -// "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" -// }, -// "description": "Any description", -// "name": "My HW", -// }, -// }))) -// .unwrap(); -// assert!(resp.0.is_success(), "!gui_storage::add_account: {}", resp.1); - -// let resp = block_on(mm.rpc(&json!({ -// "userpass": mm.userpass, -// "mmrpc": "2.0", -// "method": "gui_storage::activate_coins", -// "params": { -// "account_id": { -// "type": "iguana" -// }, -// "tickers": ["RICK", "MORTY", "KMD"], -// }, -// }))) -// .unwrap(); -// assert!(resp.0.is_success(), "!gui_storage::activate_coins: {}", resp.1); - -// let resp = block_on(mm.rpc(&json!({ -// "userpass": mm.userpass, -// "mmrpc": "2.0", -// "method": "gui_storage::activate_coins", -// "params": { -// "account_id": { -// "type": "hw", -// "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" -// }, -// "tickers": ["KMD", "MORTY", "BCH"], -// }, -// }))) -// .unwrap(); -// assert!(resp.0.is_success(), "!gui_storage::activate_coins: {}", resp.1); - -// let resp = block_on(mm.rpc(&json!({ -// "userpass": mm.userpass, -// "mmrpc": "2.0", -// "method": "gui_storage::deactivate_coins", -// "params": { -// "account_id": { -// "type": "hw", -// "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" -// }, -// "tickers": ["BTC", "MORTY"], -// }, -// }))) -// .unwrap(); -// assert!(resp.0.is_success(), "!gui_storage::deactivate_coins: {}", resp.1); - -// let resp = block_on(mm.rpc(&json!({ -// "userpass": mm.userpass, -// "mmrpc": "2.0", -// "method": "gui_storage::get_enabled_account", -// }))) -// .unwrap(); -// assert!(resp.0.is_success(), "!gui_storage::get_enabled_account: {}", resp.1); -// let actual: RpcV2Response = json::from_str(&resp.1).unwrap(); -// let expected = gui_storage::AccountWithCoins { -// account_id: gui_storage::AccountId::Iguana, -// name: "My Iguana wallet".to_string(), -// description: String::new(), -// balance_usd: BigDecimal::from(0i32), -// coins: vec!["RICK".to_string(), "MORTY".to_string(), "KMD".to_string()] -// .into_iter() -// .collect(), -// }; -// assert_eq!(actual.result, expected); - -// let resp = block_on(mm.rpc(&json!({ -// "userpass": mm.userpass, -// "mmrpc": "2.0", -// "method": "gui_storage::get_account_coins", -// "params": { -// "account_id": { -// "type": "hw", -// "device_pubkey": "1549128bbfb33b997949b4105b6a6371c998e212" -// } -// } -// }))) -// .unwrap(); -// assert!(resp.0.is_success(), "!gui_storage::get_enabled_account: {}", resp.1); -// let actual: RpcV2Response = json::from_str(&resp.1).unwrap(); -// let expected = gui_storage::AccountCoins { -// account_id: gui_storage::AccountId::HW { -// device_pubkey: "1549128bbfb33b997949b4105b6a6371c998e212".to_string(), -// }, -// coins: vec!["KMD".to_string(), "BCH".to_string()].into_iter().collect(), -// }; -// assert_eq!(actual.result, expected); -// } - #[test] #[cfg(not(target_arch = "wasm32"))] fn test_enable_btc_with_sync_starting_header() { diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index 4a108cef9e..5e2e720410 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -2251,7 +2251,7 @@ pub async fn wait_for_swap_status(mm: &MarketMakerIt, uuid: &str, wait_sec: i64) if swap_status.is_ok() { break; } - println!("swap_statuss: {swap_status:?}"); + if get_utc_timestamp() > wait_until { panic!("Timed out waiting for swap {} status", uuid); } From e9ef70113a5e11e40d45664bd71c4ce48e482875 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 20 Jun 2024 16:53:43 +0100 Subject: [PATCH 153/186] use appropriate pubkey in UtxoCoinBuilder::build --- mm2src/coins/z_coin.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm2src/coins/z_coin.rs b/mm2src/coins/z_coin.rs index fa238186a8..a2cae2dfc4 100644 --- a/mm2src/coins/z_coin.rs +++ b/mm2src/coins/z_coin.rs @@ -863,9 +863,10 @@ impl<'a> UtxoCoinBuilder for ZCoinBuilder<'a> { async fn build(self) -> MmResult { let utxo = self.build_utxo_fields().await?; let utxo_arc = UtxoArc::new(utxo); - let db_id = utxo_common::my_public_key(&utxo_arc) - .ok() - .map(|k| k.address_hash().to_string()); + let db_id = utxo_arc + .priv_key_policy + .activated_key() + .map(|activated_key| hex::encode(activated_key.public().address_hash().as_slice())); let z_spending_key = match self.z_spending_key { Some(ref z_spending_key) => z_spending_key.clone(), From 1b3931b9bccb52ffd3b20c7cde99eaaedd6a393c Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 24 Jun 2024 06:05:45 +0100 Subject: [PATCH 154/186] save shared_db_id as string and use lock --- mm2src/mm2_core/src/sql_connection_pool.rs | 68 +++++++++++----------- 1 file changed, 33 insertions(+), 35 deletions(-) diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 3091b9d6b1..7b20242590 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -6,7 +6,6 @@ use db_common::sqlite::rusqlite::Connection; use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::lock::Mutex as AsyncMutex; use gstuff::try_s; -use primitives::hash::H160; use std::collections::{HashMap, HashSet}; use std::path::PathBuf; use std::sync::{Arc, Mutex, MutexGuard, RwLock}; @@ -28,7 +27,7 @@ pub struct SqliteConnPool { // default db_id rmd160_hex: String, // default shared_db_id - shared_db_id: H160, + shared_db_id: String, db_root: Option, } @@ -44,14 +43,10 @@ impl SqliteConnPool { } /// Internal implementation to initialize a database connection. - fn init_impl(ctx: &MmCtx, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind) -> Result<(), String> { - let db_id_default = match db_id_conn_kind { - DbIdConnKind::Shared => hex::encode(ctx.shared_db_id().as_slice()), - DbIdConnKind::Single => ctx.rmd160_hex(), - }; - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| db_id_default); + fn init_impl(ctx: &MmCtx, db_id: Option<&str>, kind: DbIdConnKind) -> Result<(), String> { + let db_id = Self::db_id_from_ctx(ctx, db_id, &kind); - let sqlite_file_path = match db_id_conn_kind { + let sqlite_file_path = match kind { DbIdConnKind::Shared => ctx.shared_dbdir(Some(&db_id)).join(SQLITE_SHARED_DB_ID), DbIdConnKind::Single => ctx.dbdir(Some(&db_id)).join(SYNC_SQLITE_DB_ID), }; @@ -72,7 +67,7 @@ impl SqliteConnPool { try_s!(ctx.sqlite_conn_pool.pin(Self { connections, rmd160_hex: ctx.rmd160_hex(), - shared_db_id: *ctx.shared_db_id(), + shared_db_id: hex::encode(*ctx.shared_db_id()), db_root: db_root.map(|d| d.to_owned()) })); @@ -86,13 +81,8 @@ impl SqliteConnPool { pub fn init_shared_test(ctx: &MmCtx) -> Result<(), String> { Self::init_impl_test(ctx, None, DbIdConnKind::Shared) } /// Internal test implementation to initialize a database connection in-memory. - fn init_impl_test(ctx: &MmCtx, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind) -> Result<(), String> { - let db_id_default = match db_id_conn_kind { - DbIdConnKind::Shared => hex::encode(ctx.shared_db_id().as_slice()), - DbIdConnKind::Single => ctx.rmd160_hex(), - }; - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| db_id_default); - + fn init_impl_test(ctx: &MmCtx, db_id: Option<&str>, kind: DbIdConnKind) -> Result<(), String> { + let db_id = Self::db_id_from_ctx(ctx, db_id, &kind); if let Some(pool) = ctx.sqlite_conn_pool.as_option() { let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); let mut pool = pool.connections.write().unwrap(); @@ -107,7 +97,7 @@ impl SqliteConnPool { try_s!(ctx.sqlite_conn_pool.pin(Self { connections, rmd160_hex: ctx.rmd160_hex(), - shared_db_id: *ctx.shared_db_id(), + shared_db_id: hex::encode(*ctx.shared_db_id()), db_root: db_root.map(|d| d.to_owned()) })); @@ -125,13 +115,8 @@ impl SqliteConnPool { } /// Internal implementation to retrieve or create a connection. - fn sqlite_conn_impl(&self, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind) -> Arc> { - let db_id_default = match db_id_conn_kind { - DbIdConnKind::Shared => hex::encode(self.shared_db_id.as_slice()), - DbIdConnKind::Single => self.rmd160_hex.clone(), - }; - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| db_id_default); - + fn sqlite_conn_impl(&self, db_id: Option<&str>, kind: DbIdConnKind) -> Arc> { + let db_id = self.db_id(db_id, &kind); let connections = self.connections.read().unwrap(); if let Some(connection) = connections.get(&db_id) { return Arc::clone(connection); @@ -139,7 +124,7 @@ impl SqliteConnPool { drop(connections); let mut connections = self.connections.write().unwrap(); - let sqlite_file_path = self.db_dir(&db_id).join(match db_id_conn_kind { + let sqlite_file_path = self.db_dir(&db_id).join(match kind { DbIdConnKind::Shared => SQLITE_SHARED_DB_ID, DbIdConnKind::Single => SYNC_SQLITE_DB_ID, }); @@ -168,17 +153,12 @@ impl SqliteConnPool { } /// Internal run a sql query. - fn run_sql_query_impl(&self, db_id: Option<&str>, db_id_conn_kind: DbIdConnKind, f: F) -> R + fn run_sql_query_impl(&self, db_id: Option<&str>, kind: DbIdConnKind, f: F) -> R where F: FnOnce(MutexGuard) -> R + Send + 'static, R: Send + 'static, { - let db_id_default = match db_id_conn_kind { - DbIdConnKind::Shared => hex::encode(self.shared_db_id.as_slice()), - DbIdConnKind::Single => self.rmd160_hex.clone(), - }; - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| db_id_default); - + let db_id = self.db_id(db_id, &kind); let connections = self.connections.read().unwrap(); if let Some(connection) = connections.get(&db_id) { let conn = connection.lock().unwrap(); @@ -186,7 +166,7 @@ impl SqliteConnPool { } drop(connections); - let sqlite_file_path = self.db_dir(&db_id).join(match db_id_conn_kind { + let sqlite_file_path = self.db_dir(&db_id).join(match kind { DbIdConnKind::Shared => SQLITE_SHARED_DB_ID, DbIdConnKind::Single => SYNC_SQLITE_DB_ID, }); @@ -194,7 +174,7 @@ impl SqliteConnPool { let connection = Self::open_connection(sqlite_file_path); connections.insert(db_id, Arc::clone(&connection)); - f(connection.try_lock().unwrap()) + f(connection.lock().unwrap()) } pub fn add_test_db(&self, db_id: String) { @@ -218,6 +198,24 @@ impl SqliteConnPool { } fn db_dir(&self, db_id: &str) -> PathBuf { path_to_dbdir(self.db_root.as_deref(), db_id) } + fn db_id(&self, db_id: Option<&str>, kind: &DbIdConnKind) -> String { + match kind { + DbIdConnKind::Shared => db_id + .map(|e| e.to_owned()) + .unwrap_or_else(|| self.shared_db_id.to_owned()), + DbIdConnKind::Single => db_id + .map(|e| e.to_owned()) + .unwrap_or_else(|| self.rmd160_hex.to_owned()), + } + } + fn db_id_from_ctx(ctx: &MmCtx, db_id: Option<&str>, kind: &DbIdConnKind) -> String { + match kind { + DbIdConnKind::Shared => db_id + .map(|e| e.to_owned()) + .unwrap_or_else(|| hex::encode(ctx.shared_db_id().as_slice())), + DbIdConnKind::Single => db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160_hex()), + } + } } /// A pool for managing async SQLite connections, where each connection is keyed by a unique string identifier. From 4962351401ef527bb3167cf3f1f5b27f5d64db6e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 24 Jun 2024 06:13:10 +0100 Subject: [PATCH 155/186] use path_to_db_root in path_to_dbdir --- mm2src/mm2_core/src/mm_ctx.rs | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index ea4112cd2f..352343c827 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -433,16 +433,7 @@ fn path_to_db_root(db_root: Option<&str>) -> PathBuf { /// This function can be used later by an FFI function to open a GUI storage. #[cfg(not(target_arch = "wasm32"))] -pub fn path_to_dbdir(db_root: Option<&str>, db_id: &str) -> PathBuf { - const DEFAULT_ROOT: &str = "DB"; - - let path = match db_root { - Some(dbdir) if !dbdir.is_empty() => Path::new(dbdir), - _ => Path::new(DEFAULT_ROOT), - }; - - path.join(db_id) -} +pub fn path_to_dbdir(db_root: Option<&str>, db_id: &str) -> PathBuf { path_to_db_root(db_root).join(db_id) } // We don't want to send `MmCtx` across threads, it will only obstruct the normal use case // (and might result in undefined behaviour if there's a C struct or value in the context that is aliased from the various MM threads). From f449cdd28c88209b162b60199e32031d6d9d146a Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 24 Jun 2024 06:21:20 +0100 Subject: [PATCH 156/186] make getter fns non async --- mm2src/coins/eth/v2_activation.rs | 3 +-- mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs | 3 +-- .../coins_activation/src/tendermint_with_assets_activation.rs | 3 +-- mm2src/mm2_core/src/sql_connection_pool.rs | 4 ++-- mm2src/mm2_main/src/lp_native_dex.rs | 2 +- 5 files changed, 6 insertions(+), 9 deletions(-) diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 389ac015ca..b3f4a6bd1a 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -942,8 +942,7 @@ async fn run_db_migraiton_for_new_eth_pubkey(ctx: &MmArc, pubkey: &KeyPair) -> M .db_migration_watcher .as_option() .expect("Db migration watcher isn't intialized yet!") - .get_sender() - .await; + .get_sender(); let mut db_migration_sender = db_migration_sender.lock().await; db_migration_sender .send(DbIds { diff --git a/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs index deef3bf83d..bc308a8c4e 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs @@ -722,8 +722,7 @@ async fn run_db_migraiton_for_new_eth_pubkey(ctx: &MmArc, pubkey: H160) -> MmRes .db_migration_watcher .as_option() .expect("Db migration watcher isn't intialized yet!") - .get_sender() - .await; + .get_sender(); let mut db_migration_sender = db_migration_sender.lock().await; db_migration_sender .send(DbIds { diff --git a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs index a9b91db97f..6f1f12028a 100644 --- a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs +++ b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs @@ -412,8 +412,7 @@ async fn run_db_migraiton_for_new_tendermint_pubkey( .db_migration_watcher .as_option() .expect("Db migration watcher isn't intialized yet!") - .get_sender() - .await; + .get_sender(); let mut db_migration_sender = db_migration_sender.lock().await; db_migration_sender .send(DbIds { diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 7b20242590..5dccd84ee1 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -359,7 +359,7 @@ impl DbMigrationWatcher { true } - pub async fn get_receiver(&self) -> DbMigrationHandler { self.receiver.clone() } + pub fn get_receiver(&self) -> DbMigrationHandler { self.receiver.clone() } - pub async fn get_sender(&self) -> DbMigrationSender { self.sender.clone() } + pub fn get_sender(&self) -> DbMigrationSender { self.sender.clone() } } diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 802b700f68..65c04a44ad 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -462,7 +462,7 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { .expect("db_migration_watcher initialization failed"); let watcher_clone = db_migration_watcher.clone(); - let receiver = db_migration_watcher.get_receiver().await; + let receiver = db_migration_watcher.get_receiver(); let mut guard = receiver.lock().await; while let Some(ids) = guard.next().await { From a4ac369cec610ba22a600260f561ea9ccf52b6e7 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 24 Jun 2024 11:05:41 +0100 Subject: [PATCH 157/186] fix other review notes --- mm2src/mm2_core/src/sql_connection_pool.rs | 23 +-------------------- mm2src/mm2_main/src/lp_native_dex.rs | 12 ++++++++--- mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs | 22 +++++++------------- 3 files changed, 17 insertions(+), 40 deletions(-) diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 5dccd84ee1..f831110006 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -6,7 +6,7 @@ use db_common::sqlite::rusqlite::Connection; use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::lock::Mutex as AsyncMutex; use gstuff::try_s; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::path::PathBuf; use std::sync::{Arc, Mutex, MutexGuard, RwLock}; @@ -321,7 +321,6 @@ pub type DbMigrationHandler = Arc>>; pub type DbMigrationSender = Arc>>; pub struct DbMigrationWatcher { - migrations: Arc>>, sender: DbMigrationSender, receiver: DbMigrationHandler, } @@ -331,7 +330,6 @@ impl DbMigrationWatcher { let (sender, receiver) = channel(1); let selfi = Arc::new(Self { - migrations: Default::default(), sender: Arc::new(AsyncMutex::new(sender)), receiver: Arc::new(AsyncMutex::new(receiver)), }); @@ -340,25 +338,6 @@ impl DbMigrationWatcher { Ok(selfi) } - /// This function verifies if a migration has already been executed for the provided - /// db_id. If the migration has not been run for the given db_id, it adds - /// the `db_id` to the list of migrated databases and returns `false`. If no db_id is provided, - /// it assumes that the migration has been run for the default db_id. - pub async fn check_db_id_is_migrated(&self, db_id: Option<&str>) -> bool { - if let Some(db_id) = db_id { - let mut guard = self.migrations.lock().await; - if guard.get(db_id).is_some() { - // migration has been ran for db with id - return true; - }; - // migration hasn't been ran for db with this id - guard.insert(db_id.to_owned()); - return false; - } - // migration has been ran when no db id is provided we assume it's the default db id - true - } - pub fn get_receiver(&self) -> DbMigrationHandler { self.receiver.clone() } pub fn get_sender(&self) -> DbMigrationSender { self.sender.clone() } diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 65c04a44ad..c53f6f3315 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -456,25 +456,31 @@ fn init_wasm_event_streaming(ctx: &MmArc) { #[cfg(not(target_arch = "wasm32"))] async fn init_db_migration_watcher_loop(ctx: MmArc) { + use std::collections::HashSet; + + let mut migrations = HashSet::new(); let db_migration_watcher = &ctx .init_db_migration_watcher() .await - .expect("db_migration_watcher initialization failed"); + .expect("db_m igration_watcher initialization failed"); - let watcher_clone = db_migration_watcher.clone(); let receiver = db_migration_watcher.get_receiver(); let mut guard = receiver.lock().await; while let Some(ids) = guard.next().await { - if watcher_clone.check_db_id_is_migrated(Some(&ids.db_id)).await { + if migrations.contains(&ids.db_id) { debug!("{} migrated, skipping migration..", ids.db_id); continue; } + // run db migration for db_id if new activated pubkey is unique. if let Err(err) = run_db_migration_impl(&ctx, Some(&ids.db_id), Some(&ids.shared_db_id)).await { error!("{err:?}"); continue; }; + + // insert new db_id to migration list + migrations.insert(ids.db_id.clone()); // Fetch and extend ctx.coins_needed_for_kick_start from new intialized db. if let Err(err) = kick_start(ctx.clone(), Some(&ids.db_id)).await { error!("{err:?}"); diff --git a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs index b9eb5f36d9..10da61f30e 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_v2_rpcs.rs @@ -22,7 +22,7 @@ cfg_native!( use crate::mm2::database::my_swaps::SELECT_MY_SWAP_V2_FOR_RPC_BY_UUID; use common::async_blocking; use db_common::sqlite::query_single_row; - use db_common::sqlite::rusqlite::{Result as SqlResult, Connection, Row, Error as SqlError}; + use db_common::sqlite::rusqlite::{Result as SqlResult, Row, Error as SqlError}; use db_common::sqlite::rusqlite::types::Type as SqlType; ); @@ -167,19 +167,6 @@ pub(super) async fn get_taker_swap_data_for_rpc( get_swap_data_for_rpc_impl(ctx, uuid, db_id).await } -#[cfg(not(target_arch = "wasm32"))] -fn get_swap_data_for_rpc_impl_inner( - conn: &Connection, - uuid: String, -) -> SqlResult>> { - query_single_row( - conn, - SELECT_MY_SWAP_V2_FOR_RPC_BY_UUID, - &[(":uuid", uuid.as_str())], - MySwapForRpc::from_row, - ) -} - #[cfg(not(target_arch = "wasm32"))] async fn get_swap_data_for_rpc_impl( ctx: &MmArc, @@ -192,7 +179,12 @@ async fn get_swap_data_for_rpc_impl( async_blocking(move || { Ok(ctx.run_sql_query(db_id.as_deref(), move |conn| { - get_swap_data_for_rpc_impl_inner(&conn, uuid) + query_single_row( + &conn, + SELECT_MY_SWAP_V2_FOR_RPC_BY_UUID, + &[(":uuid", uuid.as_str())], + MySwapForRpc::from_row, + ) })?) }) .await From 99e6ca6492fdc71b513399a2af28e885e3d5a26e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 24 Jun 2024 11:39:22 +0100 Subject: [PATCH 158/186] improve migration watcher -handling --- mm2src/mm2_core/src/mm_ctx.rs | 4 ++-- mm2src/mm2_core/src/sql_connection_pool.rs | 10 +++------- mm2src/mm2_main/src/lp_native_dex.rs | 4 +--- 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 352343c827..f9790bb1cf 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -29,7 +29,7 @@ cfg_wasm32! { cfg_native! { use db_common::sqlite::rusqlite::Connection; - use crate::sql_connection_pool::{AsyncSqliteConnPool, SqliteConnPool, DbMigrationWatcher}; + use crate::sql_connection_pool::{AsyncSqliteConnPool, SqliteConnPool, DbMigrationWatcher, DbMigrationHandler}; use rustls::ServerName; use mm2_metrics::prometheus; use mm2_metrics::MmMetricsError; @@ -388,7 +388,7 @@ impl MmCtx { } #[cfg(not(target_arch = "wasm32"))] - pub async fn init_db_migration_watcher(&self) -> Result, String> { + pub async fn init_db_migration_watcher(&self) -> Result { DbMigrationWatcher::init(self).await } } diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index f831110006..6a50382090 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -322,23 +322,19 @@ pub type DbMigrationSender = Arc>>; pub struct DbMigrationWatcher { sender: DbMigrationSender, - receiver: DbMigrationHandler, } impl DbMigrationWatcher { - pub async fn init(ctx: &MmCtx) -> Result, String> { + pub async fn init(ctx: &MmCtx) -> Result { let (sender, receiver) = channel(1); let selfi = Arc::new(Self { sender: Arc::new(AsyncMutex::new(sender)), - receiver: Arc::new(AsyncMutex::new(receiver)), }); - try_s!(ctx.db_migration_watcher.pin(selfi.clone())); + try_s!(ctx.db_migration_watcher.pin(selfi)); - Ok(selfi) + Ok(Arc::new(AsyncMutex::new(receiver))) } - pub fn get_receiver(&self) -> DbMigrationHandler { self.receiver.clone() } - pub fn get_sender(&self) -> DbMigrationSender { self.sender.clone() } } diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index c53f6f3315..d4b4e496cc 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -459,12 +459,10 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { use std::collections::HashSet; let mut migrations = HashSet::new(); - let db_migration_watcher = &ctx + let receiver = &ctx .init_db_migration_watcher() .await .expect("db_m igration_watcher initialization failed"); - - let receiver = db_migration_watcher.get_receiver(); let mut guard = receiver.lock().await; while let Some(ids) = guard.next().await { From a94c791ba0ae7b3961bebf54877d574f5831bd03 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 25 Jun 2024 12:20:51 +0100 Subject: [PATCH 159/186] fix TendermintKeyPair derivation and minor fixes --- mm2src/coins/tendermint/tendermint_coin.rs | 11 ++++------- mm2src/mm2_core/src/mm_ctx.rs | 4 +--- mm2src/mm2_core/src/sql_connection_pool.rs | 2 +- mm2src/mm2_main/src/lp_native_dex.rs | 1 - 4 files changed, 6 insertions(+), 12 deletions(-) diff --git a/mm2src/coins/tendermint/tendermint_coin.rs b/mm2src/coins/tendermint/tendermint_coin.rs index 1da1ab6a91..1fd056104f 100644 --- a/mm2src/coins/tendermint/tendermint_coin.rs +++ b/mm2src/coins/tendermint/tendermint_coin.rs @@ -2976,14 +2976,11 @@ pub fn tendermint_priv_key_policy( kind: TendermintInitErrorKind::InvalidPrivKey(e.to_string()), })?; let bip39_secp_priv_key = global_hd.root_priv_key().clone(); - let pubkey = Public::from_slice(&bip39_secp_priv_key.public_key().to_bytes()).map_to_mm(|e| { - TendermintInitError { - ticker: ticker.to_string(), - kind: TendermintInitErrorKind::Internal(e.to_string()), - } + let keypair = key_pair_from_secret(activated_priv_key.as_ref()).mm_err(|e| TendermintInitError { + ticker: ticker.to_string(), + kind: TendermintInitErrorKind::Internal(e.to_string()), })?; - - let tendermint_pair = TendermintKeyPair::new(activated_priv_key, pubkey); + let tendermint_pair = TendermintKeyPair::new(activated_priv_key, *keypair.public()); Ok(TendermintPrivKeyPolicy::HDWallet { path_to_coin: path_to_coin.clone(), diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index f9790bb1cf..956b835742 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -388,9 +388,7 @@ impl MmCtx { } #[cfg(not(target_arch = "wasm32"))] - pub async fn init_db_migration_watcher(&self) -> Result { - DbMigrationWatcher::init(self).await - } + pub fn init_db_migration_watcher(&self) -> Result { DbMigrationWatcher::init(self) } } impl Default for MmCtx { diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 6a50382090..4d0c76cc40 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -325,7 +325,7 @@ pub struct DbMigrationWatcher { } impl DbMigrationWatcher { - pub async fn init(ctx: &MmCtx) -> Result { + pub fn init(ctx: &MmCtx) -> Result { let (sender, receiver) = channel(1); let selfi = Arc::new(Self { diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index d4b4e496cc..b7c159d008 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -461,7 +461,6 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { let mut migrations = HashSet::new(); let receiver = &ctx .init_db_migration_watcher() - .await .expect("db_m igration_watcher initialization failed"); let mut guard = receiver.lock().await; From 33ba505c45ea283036e5df9651d0abaadbc2c3a5 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 26 Jun 2024 10:30:34 +0100 Subject: [PATCH 160/186] hd wallet and some review notes fixes --- mm2src/coins/eth/v2_activation.rs | 10 +++--- mm2src/coins/tendermint/tendermint_coin.rs | 10 +++++- .../utxo/utxo_builder/utxo_arc_builder.rs | 12 +++---- .../src/platform_coin_with_tokens.rs | 2 +- .../standalone_coin/init_standalone_coin.rs | 6 ++-- .../src/tendermint_with_assets_activation.rs | 33 ++++++++++++++++--- mm2src/mm2_core/src/mm_ctx.rs | 1 - mm2src/mm2_core/src/sql_connection_pool.rs | 24 +++++--------- mm2src/mm2_main/src/lp_native_dex.rs | 7 ---- 9 files changed, 60 insertions(+), 45 deletions(-) diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index b3f4a6bd1a..fe111fe672 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -670,6 +670,9 @@ pub(crate) async fn build_address_and_priv_key_policy( .mm_err(|e| EthActivationV2Error::InternalError(e.to_string()))?; let activated_key = KeyPair::from_secret_slice(raw_priv_key.as_slice()) .map_to_mm(|e| EthActivationV2Error::InternalError(e.to_string()))?; + #[cfg(not(target_arch = "wasm32"))] + run_db_migraiton_for_new_eth_pubkey(ctx, &activated_key).await?; + let bip39_secp_priv_key = global_hd_ctx.root_priv_key().clone(); let hd_wallet_rmd160 = dhash160(activated_key.public().as_bytes()); @@ -687,9 +690,6 @@ pub(crate) async fn build_address_and_priv_key_policy( gap_limit, }; - #[cfg(not(target_arch = "wasm32"))] - run_db_migraiton_for_new_eth_pubkey(ctx, &activated_key).await?; - let derivation_method = DerivationMethod::HDWallet(hd_wallet); Ok(( EthPrivKeyPolicy::HDWallet { @@ -932,8 +932,8 @@ fn compress_public_key(uncompressed: H520) -> MmResult MmResult<(), EthActivationV2Error> { - let db_id = hex::encode(pubkey.address().to_bytes()); +async fn run_db_migraiton_for_new_eth_pubkey(ctx: &MmArc, keypair: &KeyPair) -> MmResult<(), EthActivationV2Error> { + let db_id = hex::encode(dhash160(keypair.public().as_bytes())); let shared_db_id = shared_db_id_from_seed(&db_id) .mm_err(|err| EthActivationV2Error::InternalError(err.to_string()))? .to_string(); diff --git a/mm2src/coins/tendermint/tendermint_coin.rs b/mm2src/coins/tendermint/tendermint_coin.rs index 1fd056104f..6c8ac8dd30 100644 --- a/mm2src/coins/tendermint/tendermint_coin.rs +++ b/mm2src/coins/tendermint/tendermint_coin.rs @@ -230,7 +230,7 @@ impl TendermintActivationPolicy { } } - fn public_key(&self) -> Result { + pub fn public_key(&self) -> Result { match self { Self::PrivateKey(private_key_policy) => match private_key_policy { PrivKeyPolicy::Iguana(pair) => PublicKey::from_raw_secp256k1(&pair.public_key.to_bytes()) @@ -2318,6 +2318,14 @@ impl MmCoin for TendermintCoin { None } + + async fn tx_history_db_id(&self) -> Option { + self.activation_policy + .public_key() + .ok() + .map(|k| hex::encode(dhash160(&k.to_bytes()))) + .or(self.account_db_id().await) // Fallback to the account db_id for non-HD wallets + } } #[async_trait] diff --git a/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs index bc308a8c4e..301805d3a1 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs @@ -8,7 +8,6 @@ use crate::utxo::{generate_and_send_tx, FeePolicy, GetUtxoListOps, UtxoArc, Utxo UtxoWeak}; use crate::{DerivationMethod, PrivKeyBuildPolicy, UtxoActivationParams}; use async_trait::async_trait; -#[cfg(not(target_arch = "wasm32"))] use bitcrypto::dhash160; use chain::{BlockHeader, TransactionOutput}; use common::executor::{AbortSettings, SpawnAbortable, Timer}; use common::log::{debug, error, info, warn}; @@ -134,17 +133,16 @@ where } if let EventInitStatus::Failed(err) = - EventBehaviour::spawn_if_active(UtxoStandardCoin::from(utxo_arc), stream_config).await + EventBehaviour::spawn_if_active(UtxoStandardCoin::from(utxo_arc.clone()), stream_config).await { return MmError::err(UtxoCoinBuildError::FailedSpawningBalanceEvents(err)); } } #[cfg(not(target_arch = "wasm32"))] - if let PrivKeyBuildPolicy::GlobalHDAccount(hd) = self.priv_key_policy() { - let rmd = dhash160(&hd.root_priv_key().public_key().to_bytes()); - run_db_migraiton_for_new_eth_pubkey(self.ctx, rmd).await? - }; + if let Some(hd) = utxo_arc.derivation_method.hd_wallet() { + run_db_migraiton_for_new_utxo_pubkey(self.ctx, hd.inner.hd_wallet_rmd160).await? + } Ok(result_coin) } @@ -712,7 +710,7 @@ fn spawn_block_header_utxo_loop( } #[cfg(not(target_arch = "wasm32"))] -async fn run_db_migraiton_for_new_eth_pubkey(ctx: &MmArc, pubkey: H160) -> MmResult<(), UtxoCoinBuildError> { +async fn run_db_migraiton_for_new_utxo_pubkey(ctx: &MmArc, pubkey: H160) -> MmResult<(), UtxoCoinBuildError> { let db_id = hex::encode(pubkey.as_slice()); let shared_db_id = shared_db_id_from_seed(&db_id) .mm_err(|err| UtxoCoinBuildError::Internal(err.to_string()))? diff --git a/mm2src/coins_activation/src/platform_coin_with_tokens.rs b/mm2src/coins_activation/src/platform_coin_with_tokens.rs index 0b97403dce..3319381cc2 100644 --- a/mm2src/coins_activation/src/platform_coin_with_tokens.rs +++ b/mm2src/coins_activation/src/platform_coin_with_tokens.rs @@ -476,7 +476,7 @@ where if req.request.tx_history() { platform_coin.start_history_background_fetching( ctx.clone(), - TxHistoryStorageBuilder::new(&ctx, platform_coin.clone().into().account_db_id().await).build()?, + TxHistoryStorageBuilder::new(&ctx, platform_coin.clone().into().tx_history_db_id().await).build()?, activation_result.get_platform_balance(), ); } diff --git a/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs b/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs index 1ac6b83d06..40f8f8475d 100644 --- a/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs +++ b/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs @@ -6,7 +6,7 @@ use crate::standalone_coin::init_standalone_coin_error::{CancelInitStandaloneCoi use async_trait::async_trait; use coins::my_tx_history_v2::TxHistoryStorage; use coins::tx_history_storage::{CreateTxHistoryStorageError, TxHistoryStorageBuilder}; -use coins::{lp_coinfind, lp_register_coin, CoinsContext, MmCoinEnum, RegisterCoinError, RegisterCoinParams}; +use coins::{lp_coinfind, lp_register_coin, CoinsContext, MmCoin, MmCoinEnum, RegisterCoinError, RegisterCoinParams}; use common::{log, SuccessResponse}; use mm2_core::mm_ctx::MmArc; use mm2_err_handle::prelude::*; @@ -31,7 +31,7 @@ pub struct InitStandaloneCoinReq { } #[async_trait] -pub trait InitStandaloneCoinActivationOps: Into + Send + Sync + 'static { +pub trait InitStandaloneCoinActivationOps: Into + MmCoin + Send + Sync + 'static { type ActivationRequest: TxHistory + Clone + Send + Sync; type StandaloneProtocol: TryFromCoinProtocol + Clone + Send + Sync; // The following types are related to `RpcTask` management. @@ -224,7 +224,7 @@ where coin.start_history_background_fetching( self.ctx.metrics.clone(), - TxHistoryStorageBuilder::new(&self.ctx, coin_clone.into().tx_history_db_id().await).build()?, + TxHistoryStorageBuilder::new(&self.ctx, coin_clone.tx_history_db_id().await).build()?, current_balances, ); } diff --git a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs index 6f1f12028a..bc4737f434 100644 --- a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs +++ b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs @@ -260,11 +260,27 @@ impl PlatformCoinWithTokensActivationOps for TendermintCoin { ticker: ticker.clone(), kind: TendermintInitErrorKind::Internal(e.to_string()), })?; + if let PrivKeyBuildPolicy::GlobalHDAccount(_) = private_key_policy { + let tendermint_private_key_policy = tendermint_priv_key_policy( + &conf, + &ticker, + private_key_policy.clone(), + activation_request.path_to_address, + )?; + + let result = TendermintActivationPolicy::with_private_key_policy(tendermint_private_key_policy); + let pubkey = result.public_key().map_to_mm(|e| TendermintInitError { + ticker: ticker.clone(), + kind: TendermintInitErrorKind::Internal(e.to_string()), + })?; + run_db_migraiton_for_new_tendermint_pubkey(&ctx, pubkey, ticker.clone()).await?; - let tendermint_private_key_policy = - tendermint_priv_key_policy(&conf, &ticker, private_key_policy, activation_request.path_to_address)?; - - TendermintActivationPolicy::with_private_key_policy(tendermint_private_key_policy) + result + } else { + let tendermint_private_key_policy = + tendermint_priv_key_policy(&conf, &ticker, private_key_policy, activation_request.path_to_address)?; + TendermintActivationPolicy::with_private_key_policy(tendermint_private_key_policy) + } }; TendermintCoin::init( @@ -430,3 +446,12 @@ async fn run_db_migraiton_for_new_tendermint_pubkey( Ok(()) } + +#[cfg(target_arch = "wasm32")] +async fn run_db_migraiton_for_new_tendermint_pubkey( + _ctx: &MmArc, + _pubkey: TendermintPublicKey, + _ticker: String, +) -> MmResult<(), TendermintInitError> { + Ok(()) +} diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 956b835742..70c6ad2b98 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -306,7 +306,6 @@ impl MmCtx { #[cfg(not(target_arch = "wasm32"))] pub fn dbdir(&self, db_id: Option<&str>) -> PathBuf { let db_id = db_id.map(|t| t.to_owned()).unwrap_or_else(|| self.rmd160_hex()); - path_to_dbdir(self.conf["dbdir"].as_str(), &db_id) } diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 4d0c76cc40..627444e3be 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -45,7 +45,6 @@ impl SqliteConnPool { /// Internal implementation to initialize a database connection. fn init_impl(ctx: &MmCtx, db_id: Option<&str>, kind: DbIdConnKind) -> Result<(), String> { let db_id = Self::db_id_from_ctx(ctx, db_id, &kind); - let sqlite_file_path = match kind { DbIdConnKind::Shared => ctx.shared_dbdir(Some(&db_id)).join(SQLITE_SHARED_DB_ID), DbIdConnKind::Single => ctx.dbdir(Some(&db_id)).join(SYNC_SQLITE_DB_ID), @@ -124,10 +123,7 @@ impl SqliteConnPool { drop(connections); let mut connections = self.connections.write().unwrap(); - let sqlite_file_path = self.db_dir(&db_id).join(match kind { - DbIdConnKind::Shared => SQLITE_SHARED_DB_ID, - DbIdConnKind::Single => SYNC_SQLITE_DB_ID, - }); + let sqlite_file_path = self.sqlite_file_path(&db_id, &kind); let connection = Self::open_connection(sqlite_file_path); connections.insert(db_id, Arc::clone(&connection)); @@ -166,11 +162,8 @@ impl SqliteConnPool { } drop(connections); - let sqlite_file_path = self.db_dir(&db_id).join(match kind { - DbIdConnKind::Shared => SQLITE_SHARED_DB_ID, - DbIdConnKind::Single => SYNC_SQLITE_DB_ID, - }); let mut connections = self.connections.write().unwrap(); + let sqlite_file_path = self.sqlite_file_path(&db_id, &kind); let connection = Self::open_connection(sqlite_file_path); connections.insert(db_id, Arc::clone(&connection)); @@ -183,7 +176,6 @@ impl SqliteConnPool { } /// Opens a database connection based on the database ID and connection kind. - #[cfg(not(test))] fn open_connection(sqlite_file_path: PathBuf) -> Arc> { log_sqlite_file_open_attempt(&sqlite_file_path); Arc::new(Mutex::new( @@ -191,12 +183,6 @@ impl SqliteConnPool { )) } - /// Opens a database connection based on the database ID and connection kind. - #[cfg(test)] - fn open_connection(_sqlite_file_path: PathBuf) -> Arc> { - Arc::new(Mutex::new(Connection::open_in_memory().unwrap())) - } - fn db_dir(&self, db_id: &str) -> PathBuf { path_to_dbdir(self.db_root.as_deref(), db_id) } fn db_id(&self, db_id: Option<&str>, kind: &DbIdConnKind) -> String { match kind { @@ -216,6 +202,12 @@ impl SqliteConnPool { DbIdConnKind::Single => db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160_hex()), } } + fn sqlite_file_path(&self, db_id: &str, kind: &DbIdConnKind) -> PathBuf { + self.db_dir(&db_id).join(match kind { + DbIdConnKind::Shared => SQLITE_SHARED_DB_ID, + DbIdConnKind::Single => SYNC_SQLITE_DB_ID, + }) + } } /// A pool for managing async SQLite connections, where each connection is keyed by a unique string identifier. diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index b7c159d008..980a73d22a 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -335,7 +335,6 @@ fn default_seednodes(netid: u16) -> Vec { #[cfg(not(target_arch = "wasm32"))] pub fn fix_directories(ctx: &MmCtx, db_id: Option<&str>, shared_db_id: Option<&str>) -> MmInitResult<()> { fix_shared_dbdir(ctx, shared_db_id)?; - let dbdir = ctx.dbdir(db_id); fs::create_dir_all(&dbdir).map_to_mm(|e| MmInitError::ErrorCreatingDbDir { path: dbdir.clone(), @@ -542,12 +541,6 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { pub async fn lp_init(ctx: MmArc, version: String, datetime: String) -> MmInitResult<()> { info!("Version: {} DT {}", version, datetime); - #[cfg(not(target_arch = "wasm32"))] - fs::create_dir_all(ctx.dbdir(None)).map_to_mm(|e| MmInitError::ErrorCreatingDbDir { - path: ctx.dbdir(None), - error: e.to_string(), - })?; - // This either initializes the cryptographic context or sets up the context for "no login mode". initialize_wallet_passphrase(&ctx).await?; From 8a01341552459cf13e30eef9b216bc1078099a9e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 26 Jun 2024 17:45:26 +0100 Subject: [PATCH 161/186] fix clippy --- .../coins_activation/src/tendermint_with_assets_activation.rs | 2 -- mm2src/mm2_core/src/mm_ctx.rs | 2 +- mm2src/mm2_core/src/sql_connection_pool.rs | 2 +- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs index bc4737f434..86ed3376ef 100644 --- a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs +++ b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs @@ -249,8 +249,6 @@ impl PlatformCoinWithTokensActivationOps for TendermintCoin { kind: TendermintInitErrorKind::CantUseWatchersWithPubkeyPolicy, }); } - - #[cfg(not(target_arch = "wasm32"))] run_db_migraiton_for_new_tendermint_pubkey(&ctx, pubkey, ticker.clone()).await?; TendermintActivationPolicy::with_public_key(pubkey) diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 70c6ad2b98..f613ca2085 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -327,7 +327,7 @@ impl MmCtx { pub fn is_watcher(&self) -> bool { self.conf["is_watcher"].as_bool().unwrap_or_default() } - pub fn use_watchers(&self) -> bool { self.conf["use_watchers"].as_bool().unwrap_or(true) } + pub fn use_watchers(&self) -> bool { self.conf["use_watchers"].as_bool().unwrap_or(false) } pub fn netid(&self) -> u16 { let netid = self.conf["netid"].as_u64().unwrap_or(0); diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 627444e3be..377457594c 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -203,7 +203,7 @@ impl SqliteConnPool { } } fn sqlite_file_path(&self, db_id: &str, kind: &DbIdConnKind) -> PathBuf { - self.db_dir(&db_id).join(match kind { + self.db_dir(db_id).join(match kind { DbIdConnKind::Shared => SQLITE_SHARED_DB_ID, DbIdConnKind::Single => SYNC_SQLITE_DB_ID, }) From b19d2ba222898a80a8777a690bd0944dc358f531 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 27 Jun 2024 12:50:44 +0100 Subject: [PATCH 162/186] minor improvements and fixes --- mm2src/mm2_core/src/mm_ctx.rs | 2 +- mm2src/mm2_core/src/sql_connection_pool.rs | 2 +- mm2src/mm2_main/src/database/stats_swaps.rs | 4 +- mm2src/mm2_main/src/lp_native_dex.rs | 1 + mm2src/mm2_main/src/lp_ordermatch.rs | 6 +- mm2src/mm2_main/src/lp_swap.rs | 20 +-- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 128 +++++++++++++++--- mm2src/mm2_main/src/lp_swap/pubkey_banning.rs | 1 - mm2src/mm2_main/src/lp_swap/saved_swap.rs | 2 +- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 11 +- mm2src/mm2_test_helpers/src/for_tests.rs | 4 +- 11 files changed, 138 insertions(+), 43 deletions(-) diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index f613ca2085..70c6ad2b98 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -327,7 +327,7 @@ impl MmCtx { pub fn is_watcher(&self) -> bool { self.conf["is_watcher"].as_bool().unwrap_or_default() } - pub fn use_watchers(&self) -> bool { self.conf["use_watchers"].as_bool().unwrap_or(false) } + pub fn use_watchers(&self) -> bool { self.conf["use_watchers"].as_bool().unwrap_or(true) } pub fn netid(&self) -> u16 { let netid = self.conf["netid"].as_u64().unwrap_or(0); diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 377457594c..1a20c49ad0 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -14,7 +14,7 @@ pub const ASYNC_SQLITE_DB_ID: &str = "KOMODEFI.db"; const SYNC_SQLITE_DB_ID: &str = "MM2.db"; const SQLITE_SHARED_DB_ID: &str = "MM2-shared.db"; -/// Represents the kind of database connection ID: either shared or single-user. +/// Represents the kind of database connection ID: either shared or single-db. enum DbIdConnKind { Shared, Single, diff --git a/mm2src/mm2_main/src/database/stats_swaps.rs b/mm2src/mm2_main/src/database/stats_swaps.rs index e800a3cb0e..81c27c6988 100644 --- a/mm2src/mm2_main/src/database/stats_swaps.rs +++ b/mm2src/mm2_main/src/database/stats_swaps.rs @@ -48,8 +48,8 @@ const INSERT_STATS_SWAP: &str = "INSERT INTO stats_swaps ( taker_coin_usd_price, maker_pubkey, taker_pubkey -) VALUES (:maker_coin, :maker_coin_ticker, :maker_coin_platform, :taker_coin, :taker_coin_ticker, -:taker_coin_platform, :uuid, :started_at, :finished_at, :maker_amount, :taker_amount, :is_success, +) VALUES (:maker_coin, :maker_coin_ticker, :maker_coin_platform, :taker_coin, :taker_coin_ticker, +:taker_coin_platform, :uuid, :started_at, :finished_at, :maker_amount, :taker_amount, :is_success, :maker_coin_usd_price, :taker_coin_usd_price, :maker_pubkey, :taker_pubkey)"; pub const ADD_STARTED_AT_INDEX: &str = "CREATE INDEX timestamp_index ON stats_swaps (started_at);"; diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 980a73d22a..4fde5fcbe8 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -576,6 +576,7 @@ pub async fn lp_init(ctx: MmArc, version: String, datetime: String) -> MmInitRes } async fn kick_start(ctx: MmArc, db_id: Option<&str>) -> MmInitResult<()> { + println!("kick_start: {db_id:?}"); let mut coins_needed_for_kick_start = swap_kick_starts(ctx.clone(), db_id) .await .map_to_mm(MmInitError::SwapsKickStartError)?; diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 58ae8c95ac..18ce367371 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -3052,7 +3052,8 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO lock_time, maker_order.p2p_privkey.map(SerializableSecp256k1Keypair::into_inner), secret, - ); + ) + .await; run_maker_swap(RunMakerSwapInput::StartNew(maker_swap), ctx).await; } }; @@ -3218,7 +3219,8 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat taker_order.p2p_privkey.map(SerializableSecp256k1Keypair::into_inner), #[cfg(any(test, feature = "run-docker-tests"))] fail_at, - ); + ) + .await; run_taker_swap(RunTakerSwapInput::StartNew(taker_swap), ctx).await } }; diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index f8091b4e92..45db7a9c71 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -337,8 +337,7 @@ pub async fn process_swap_msg(ctx: MmArc, topic: &str, msg: &[u8]) -> P2PRequest return match json::from_slice::(msg) { Ok(mut status) => { status.data.fetch_and_set_usd_prices().await; - let account_id = status.data.account_db_id().await; - if let Err(e) = save_stats_swap(&ctx, &status.data, account_id.as_deref()).await { + if let Err(e) = save_stats_swap(&ctx, &status.data).await { error!("Error saving the swap {} status: {}", status.data.uuid(), e); } Ok(()) @@ -1034,9 +1033,10 @@ fn add_swap_to_db_index(ctx: &MmArc, swap: &SavedSwap, db_id: Option<&str>) { } #[cfg(not(target_arch = "wasm32"))] -async fn save_stats_swap(ctx: &MmArc, swap: &SavedSwap, db_id: Option<&str>) -> Result<(), String> { - try_s!(swap.save_to_stats_db(ctx, db_id).await); - add_swap_to_db_index(ctx, swap, db_id); +async fn save_stats_swap(ctx: &MmArc, swap: &SavedSwap) -> Result<(), String> { + let db_id = swap.account_db_id().await; + try_s!(swap.save_to_stats_db(ctx, db_id.as_deref()).await); + add_swap_to_db_index(ctx, swap, db_id.as_deref()); Ok(()) } @@ -1226,7 +1226,7 @@ async fn broadcast_my_swap_status(ctx: &MmArc, uuid: Uuid, db_id: Option<&str>) status.hide_secrets(); #[cfg(not(target_arch = "wasm32"))] - try_s!(save_stats_swap(ctx, &status, db_id).await); + try_s!(save_stats_swap(ctx, &status).await); let status = SwapStatus { method: "swapstatus".into(), @@ -2438,7 +2438,7 @@ mod lp_swap_tests { taker_coin_nota: false, }; - let mut maker_swap = MakerSwap::new( + let mut maker_swap = block_on(MakerSwap::new( maker_ctx.clone(), taker_key_pair.public().compressed_unprefixed().unwrap().into(), maker_amount.clone(), @@ -2452,14 +2452,14 @@ mod lp_swap_tests { lock_duration, None, Default::default(), - ); + )); maker_swap.fail_at = maker_fail_at; #[cfg(any(test, feature = "run-docker-tests"))] let fail_at = std::env::var("TAKER_FAIL_AT").map(taker_swap::FailAt::from).ok(); - let taker_swap = TakerSwap::new( + let taker_swap = block_on(TakerSwap::new( taker_ctx.clone(), maker_key_pair.public().compressed_unprefixed().unwrap().into(), maker_amount.into(), @@ -2474,7 +2474,7 @@ mod lp_swap_tests { None, #[cfg(any(test, feature = "run-docker-tests"))] fail_at, - ); + )); block_on(futures::future::join( run_maker_swap(RunMakerSwapInput::StartNew(maker_swap), maker_ctx.clone()), diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index 70bf3f7865..9d01012036 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -362,7 +362,7 @@ impl MakerSwap { } #[allow(clippy::too_many_arguments)] - pub fn new( + pub async fn new( ctx: MmArc, taker: bits256, maker_amount: BigDecimal, @@ -378,6 +378,7 @@ impl MakerSwap { secret: H256, ) -> Self { let secret_hash_algo = detect_secret_hash_algo(&maker_coin, &taker_coin); + let db_id = maker_coin.account_db_id().await; MakerSwap { maker_coin, taker_coin, @@ -395,7 +396,10 @@ impl MakerSwap { payment_locktime, p2p_privkey, mutable: RwLock::new(MakerSwapMut { - data: MakerSwapData::default(), + data: MakerSwapData { + db_id, + ..MakerSwapData::default() + }, other_maker_coin_htlc_pub: H264::default(), other_taker_coin_htlc_pub: H264::default(), taker_fee: None, @@ -1318,10 +1322,10 @@ impl MakerSwap { SavedSwap::Maker(swap) => swap, SavedSwap::Taker(_) => return ERR!("Can not load MakerSwap from SavedSwap::Taker uuid: {}", swap_uuid), }; - Self::load_from_saved(ctx, maker_coin, taker_coin, saved) + Self::load_from_saved(ctx, maker_coin, taker_coin, saved).await } - pub fn load_from_saved( + pub async fn load_from_saved( ctx: MmArc, maker_coin: MmCoinEnum, taker_coin: MmCoinEnum, @@ -1374,7 +1378,8 @@ impl MakerSwap { data.lock_duration, data.p2p_privkey.map(SerializableSecp256k1Keypair::into_inner), data.secret.into(), - ); + ) + .await; let command = saved.events.last().unwrap().get_command(); for saved_event in saved.events { swap.apply_event(saved_event.event); @@ -2440,7 +2445,13 @@ mod maker_swap_tests { .mock_safe(|_, _| MockResult::Return(Box::pin(futures::future::ready(Ok(None))))); let maker_coin = MmCoinEnum::Test(TestCoin::default()); let taker_coin = MmCoinEnum::Test(TestCoin::default()); - let (maker_swap, _) = MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, maker_saved_swap).unwrap(); + let (maker_swap, _) = block_on(MakerSwap::load_from_saved( + ctx, + maker_coin, + taker_coin, + maker_saved_swap, + )) + .unwrap(); let actual = block_on(maker_swap.recover_funds()).unwrap(); let expected = RecoveredSwap { action: RecoveredSwapAction::RefundedMyPayment, @@ -2475,7 +2486,13 @@ mod maker_swap_tests { .mock_safe(|_, _| MockResult::Return(Box::pin(futures::future::ready(Ok(None))))); let maker_coin = MmCoinEnum::Test(TestCoin::default()); let taker_coin = MmCoinEnum::Test(TestCoin::default()); - let (maker_swap, _) = MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, maker_saved_swap).unwrap(); + let (maker_swap, _) = block_on(MakerSwap::load_from_saved( + ctx, + maker_coin, + taker_coin, + maker_saved_swap, + )) + .unwrap(); let actual = block_on(maker_swap.recover_funds()).unwrap(); let expected = RecoveredSwap { action: RecoveredSwapAction::RefundedMyPayment, @@ -2504,7 +2521,13 @@ mod maker_swap_tests { }); let maker_coin = MmCoinEnum::Test(TestCoin::default()); let taker_coin = MmCoinEnum::Test(TestCoin::default()); - let (maker_swap, _) = MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, maker_saved_swap).unwrap(); + let (maker_swap, _) = block_on(MakerSwap::load_from_saved( + ctx, + maker_coin, + taker_coin, + maker_saved_swap, + )) + .unwrap(); assert!(block_on(maker_swap.recover_funds()).is_err()); } @@ -2536,7 +2559,13 @@ mod maker_swap_tests { }); let maker_coin = MmCoinEnum::Test(TestCoin::default()); let taker_coin = MmCoinEnum::Test(TestCoin::default()); - let (maker_swap, _) = MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, maker_saved_swap).unwrap(); + let (maker_swap, _) = block_on(MakerSwap::load_from_saved( + ctx, + maker_coin, + taker_coin, + maker_saved_swap, + )) + .unwrap(); let err = block_on(maker_swap.recover_funds()).expect_err("Expected an error"); assert!(err.contains("Taker payment was already refunded")); assert!(unsafe { SEARCH_FOR_SWAP_TX_SPEND_MY_CALLED }); @@ -2566,7 +2595,13 @@ mod maker_swap_tests { .mock_safe(|_, _| MockResult::Return(Box::pin(futures::future::ready(Ok(None))))); let maker_coin = MmCoinEnum::Test(TestCoin::default()); let taker_coin = MmCoinEnum::Test(TestCoin::default()); - let (maker_swap, _) = MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, maker_saved_swap).unwrap(); + let (maker_swap, _) = block_on(MakerSwap::load_from_saved( + ctx, + maker_coin, + taker_coin, + maker_saved_swap, + )) + .unwrap(); let error = block_on(maker_swap.recover_funds()).unwrap_err(); assert!(error.contains("Too early to refund")); assert!(unsafe { MY_PAYMENT_SENT_CALLED }); @@ -2591,7 +2626,13 @@ mod maker_swap_tests { }); let maker_coin = MmCoinEnum::Test(TestCoin::default()); let taker_coin = MmCoinEnum::Test(TestCoin::default()); - let (maker_swap, _) = MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, maker_saved_swap).unwrap(); + let (maker_swap, _) = block_on(MakerSwap::load_from_saved( + ctx, + maker_coin, + taker_coin, + maker_saved_swap, + )) + .unwrap(); assert!(block_on(maker_swap.recover_funds()).is_err()); assert!(unsafe { MY_PAYMENT_SENT_CALLED }); } @@ -2608,7 +2649,13 @@ mod maker_swap_tests { TestCoin::swap_contract_address.mock_safe(|_| MockResult::Return(None)); let maker_coin = MmCoinEnum::Test(TestCoin::default()); let taker_coin = MmCoinEnum::Test(TestCoin::default()); - let (maker_swap, _) = MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, maker_saved_swap).unwrap(); + let (maker_swap, _) = block_on(MakerSwap::load_from_saved( + ctx, + maker_coin, + taker_coin, + maker_saved_swap, + )) + .unwrap(); assert!(block_on(maker_swap.recover_funds()).is_err()); } @@ -2641,7 +2688,13 @@ mod maker_swap_tests { let maker_coin = MmCoinEnum::Test(TestCoin::default()); let taker_coin = MmCoinEnum::Test(TestCoin::default()); - let (maker_swap, _) = MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, maker_saved_swap).unwrap(); + let (maker_swap, _) = block_on(MakerSwap::load_from_saved( + ctx, + maker_coin, + taker_coin, + maker_saved_swap, + )) + .unwrap(); let err = block_on(maker_swap.recover_funds()).expect_err("Expected an error"); assert!(err.contains("Taker payment was already spent")); assert!(unsafe { SEARCH_FOR_SWAP_TX_SPEND_MY_CALLED }); @@ -2660,7 +2713,13 @@ mod maker_swap_tests { TestCoin::swap_contract_address.mock_safe(|_| MockResult::Return(None)); let maker_coin = MmCoinEnum::Test(TestCoin::default()); let taker_coin = MmCoinEnum::Test(TestCoin::default()); - let (maker_swap, _) = MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, maker_saved_swap).unwrap(); + let (maker_swap, _) = block_on(MakerSwap::load_from_saved( + ctx, + maker_coin, + taker_coin, + maker_saved_swap, + )) + .unwrap(); assert!(block_on(maker_swap.recover_funds()).is_err()); } @@ -2699,7 +2758,13 @@ mod maker_swap_tests { let maker_coin = MmCoinEnum::Test(TestCoin::default()); let taker_coin = MmCoinEnum::Test(TestCoin::default()); - let (maker_swap, _) = MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, maker_saved_swap).unwrap(); + let (maker_swap, _) = block_on(MakerSwap::load_from_saved( + ctx, + maker_coin, + taker_coin, + maker_saved_swap, + )) + .unwrap(); let expected = Ok(RecoveredSwap { coin: "ticker".into(), action: RecoveredSwapAction::SpentOtherPayment, @@ -2743,7 +2808,13 @@ mod maker_swap_tests { let maker_coin = MmCoinEnum::Test(TestCoin::default()); let taker_coin = MmCoinEnum::Test(TestCoin::default()); - let (maker_swap, _) = MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, maker_saved_swap).unwrap(); + let (maker_swap, _) = block_on(MakerSwap::load_from_saved( + ctx, + maker_coin, + taker_coin, + maker_saved_swap, + )) + .unwrap(); let err = block_on(maker_swap.recover_funds()).unwrap_err(); assert!(err.contains("Taker payment spend transaction has been sent and confirmed")); assert!(unsafe { !SEARCH_FOR_SWAP_TX_SPEND_MY_CALLED }); @@ -2764,8 +2835,13 @@ mod maker_swap_tests { TestCoin::swap_contract_address.mock_safe(|_| MockResult::Return(None)); let maker_coin = MmCoinEnum::Test(TestCoin::default()); let taker_coin = MmCoinEnum::Test(TestCoin::default()); - let (_maker_swap, _) = - MakerSwap::load_from_saved(ctx.clone(), maker_coin, taker_coin, maker_saved_swap).unwrap(); + let (_maker_swap, _) = block_on(MakerSwap::load_from_saved( + ctx.clone(), + maker_coin, + taker_coin, + maker_saved_swap, + )) + .unwrap(); let actual = get_locked_amount(&ctx, "ticker"); assert_eq!(actual, MmNumber::from(0)); @@ -2787,7 +2863,13 @@ mod maker_swap_tests { }); let maker_coin = MmCoinEnum::Test(TestCoin::default()); let taker_coin = MmCoinEnum::Test(TestCoin::default()); - let (maker_swap, _) = MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, maker_saved_swap).unwrap(); + let (maker_swap, _) = block_on(MakerSwap::load_from_saved( + ctx, + maker_coin, + taker_coin, + maker_saved_swap, + )) + .unwrap(); assert_eq!(unsafe { SWAP_CONTRACT_ADDRESS_CALLED }, 2); assert_eq!( @@ -2816,7 +2898,13 @@ mod maker_swap_tests { }); let maker_coin = MmCoinEnum::Test(TestCoin::default()); let taker_coin = MmCoinEnum::Test(TestCoin::default()); - let (maker_swap, _) = MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, maker_saved_swap).unwrap(); + let (maker_swap, _) = block_on(MakerSwap::load_from_saved( + ctx, + maker_coin, + taker_coin, + maker_saved_swap, + )) + .unwrap(); assert_eq!(unsafe { SWAP_CONTRACT_ADDRESS_CALLED }, 1); let expected_addr = addr_from_str(ETH_SEPOLIA_SWAP_CONTRACT).unwrap(); diff --git a/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs b/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs index 0c44c745ad..085e923358 100644 --- a/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs +++ b/mm2src/mm2_main/src/lp_swap/pubkey_banning.rs @@ -77,7 +77,6 @@ enum UnbanPubkeysReq { pub async fn unban_pubkeys_rpc(ctx: MmArc, req: Json) -> Result>, String> { let req: UnbanPubkeysReq = try_s!(json::from_value(req["unban_by"].clone())); - // TODO: db_id let ctx = try_s!(SwapsContext::from_ctx(&ctx)); let mut banned_pubs = try_s!(ctx.banned_pubkeys.lock()); let mut unbanned = HashMap::new(); diff --git a/mm2src/mm2_main/src/lp_swap/saved_swap.rs b/mm2src/mm2_main/src/lp_swap/saved_swap.rs index eed96a0fb4..a2608b3c5d 100644 --- a/mm2src/mm2_main/src/lp_swap/saved_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/saved_swap.rs @@ -110,7 +110,7 @@ impl SavedSwap { }; match self { SavedSwap::Maker(saved) => { - let (maker_swap, _) = try_s!(MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, saved)); + let (maker_swap, _) = try_s!(MakerSwap::load_from_saved(ctx, maker_coin, taker_coin, saved).await); Ok(try_s!(maker_swap.recover_funds().await)) }, SavedSwap::Taker(saved) => { diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index d1da7a686d..8600dc5c3f 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -898,7 +898,7 @@ impl TakerSwap { } #[allow(clippy::too_many_arguments)] - pub fn new( + pub async fn new( ctx: MmArc, maker: bits256, maker_amount: MmNumber, @@ -913,6 +913,7 @@ impl TakerSwap { p2p_privkey: Option, #[cfg(any(test, feature = "run-docker-tests"))] fail_at: Option, ) -> Self { + let db_id = taker_coin.account_db_id().await; TakerSwap { maker_coin, taker_coin, @@ -930,7 +931,10 @@ impl TakerSwap { payment_locktime, p2p_privkey, mutable: RwLock::new(TakerSwapMut { - data: TakerSwapData::default(), + data: TakerSwapData { + db_id, + ..TakerSwapData::default() + }, other_maker_coin_htlc_pub: H264::default(), other_taker_coin_htlc_pub: H264::default(), taker_fee: None, @@ -2055,7 +2059,8 @@ impl TakerSwap { data.p2p_privkey.map(SerializableSecp256k1Keypair::into_inner), #[cfg(any(test, feature = "run-docker-tests"))] fail_at, - ); + ) + .await; for saved_event in &saved.events { swap.apply_event(saved_event.event.clone()); diff --git a/mm2src/mm2_test_helpers/src/for_tests.rs b/mm2src/mm2_test_helpers/src/for_tests.rs index 5e2e720410..f33226ee1b 100644 --- a/mm2src/mm2_test_helpers/src/for_tests.rs +++ b/mm2src/mm2_test_helpers/src/for_tests.rs @@ -3014,8 +3014,8 @@ pub async fn init_utxo_electrum( "rpc": "Electrum", "rpc_data": { "servers": servers, - "path_to_address": path_to_address, - } + }, + "path_to_address": path_to_address } }); if let Some(priv_key_policy) = priv_key_policy { From f35f37154007c4f914fa9f80474b2f8df5588955 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 28 Jun 2024 00:49:32 +0100 Subject: [PATCH 163/186] fix review notes --- mm2src/mm2_main/src/lp_native_dex.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 4fde5fcbe8..a935c160bf 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -492,7 +492,7 @@ async fn run_db_migration_impl(ctx: &MmArc, db_id: Option<&str>, shared_db_id: O .await .map_to_mm(MmInitError::ErrorSqliteInitializing)?; SqliteConnPool::init(ctx, db_id).map_to_mm(MmInitError::ErrorSqliteInitializing)?; - SqliteConnPool::init_shared(ctx, db_id).map_to_mm(MmInitError::ErrorSqliteInitializing)?; + SqliteConnPool::init_shared(ctx, shared_db_id).map_to_mm(MmInitError::ErrorSqliteInitializing)?; init_and_migrate_sql_db(ctx, db_id).await?; migrate_db(ctx, db_id)?; From 9a1f89d82c6421b3b239391c6f482819ba184145 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 28 Jun 2024 01:14:15 +0100 Subject: [PATCH 164/186] cargo fmt --- mm2src/coins/eth.rs | 475 +++++++++--------- .../storage/blockdb/blockdb_sql_storage.rs | 30 +- 2 files changed, 250 insertions(+), 255 deletions(-) diff --git a/mm2src/coins/eth.rs b/mm2src/coins/eth.rs index c1d0f5cd7c..d3779d17be 100644 --- a/mm2src/coins/eth.rs +++ b/mm2src/coins/eth.rs @@ -82,8 +82,7 @@ use mm2_event_stream::behaviour::{EventBehaviour, EventInitStatus}; use mm2_net::transport::{GuiAuthValidation, GuiAuthValidationGenerator}; use mm2_number::bigdecimal_custom::CheckedDivision; use mm2_number::{BigDecimal, BigUint, MmNumber}; -#[cfg(test)] -use mocktopus::macros::*; +#[cfg(test)] use mocktopus::macros::*; use rand::seq::SliceRandom; use rlp::{DecoderError, Encodable, RlpStream}; use rpc::v1::types::Bytes as BytesJson; @@ -139,17 +138,13 @@ mod eip1559_gas_fee; mod eth_balance_events; pub mod eth_hd_wallet; mod eth_rpc; -#[cfg(test)] -mod eth_tests; -#[cfg(target_arch = "wasm32")] -mod eth_wasm_tests; +#[cfg(test)] mod eth_tests; +#[cfg(target_arch = "wasm32")] mod eth_wasm_tests; mod eth_withdraw; -#[cfg(any(test, target_arch = "wasm32"))] -mod for_tests; +#[cfg(any(test, target_arch = "wasm32"))] mod for_tests; pub(crate) mod nft_swap_v2; mod nonce; -#[path = "eth/v2_activation.rs"] -pub mod v2_activation; +#[path = "eth/v2_activation.rs"] pub mod v2_activation; mod web3_transport; pub(crate) use eip1559_gas_fee::FeePerGasEstimated; @@ -265,7 +260,7 @@ lazy_static! { } pub type EthDerivationMethod = DerivationMethod; -pub type Web3RpcFut = Box> + Send>; +pub type Web3RpcFut = Box> + Send>; pub type Web3RpcResult = Result>; type EthPrivKeyPolicy = PrivKeyPolicy; @@ -313,9 +308,9 @@ impl PayForGasOption { fn get_fee_per_gas(&self) -> (Option, Option) { match self { PayForGasOption::Eip1559(Eip1559FeePerGas { - max_fee_per_gas, - max_priority_fee_per_gas, - }) => (Some(*max_fee_per_gas), Some(*max_priority_fee_per_gas)), + max_fee_per_gas, + max_priority_fee_per_gas, + }) => (Some(*max_fee_per_gas), Some(*max_priority_fee_per_gas)), PayForGasOption::Legacy(..) => (None, None), } } @@ -364,10 +359,10 @@ impl From for Web3RpcError { match e { web3::Error::InvalidResponse(_) | web3::Error::Decoder(_) | web3::Error::Rpc(_) => { Web3RpcError::InvalidResponse(error_str) - } + }, web3::Error::Unreachable | web3::Error::Transport(_) | web3::Error::Io(_) => { Web3RpcError::Transport(error_str) - } + }, _ => Web3RpcError::Internal(error_str), } } @@ -383,7 +378,7 @@ impl From for RawTransactionError { | Web3RpcError::InvalidGasApiConfig(internal) => RawTransactionError::InternalError(internal), Web3RpcError::NftProtocolNotSupported => { RawTransactionError::InternalError("Nft Protocol is not supported yet!".to_string()) - } + }, } } } @@ -490,7 +485,7 @@ impl From for BalanceError { | Web3RpcError::InvalidGasApiConfig(internal) => BalanceError::Internal(internal), Web3RpcError::NftProtocolNotSupported => { BalanceError::Internal("Nft Protocol is not supported yet!".to_string()) - } + }, } } } @@ -557,7 +552,7 @@ impl EthPrivKeyBuildPolicy { // Use an internal private key as the coin secret. let priv_key = crypto_ctx.mm2_internal_privkey_secret(); Ok(EthPrivKeyBuildPolicy::IguanaPrivKey(priv_key)) - } + }, KeyPairPolicy::GlobalHDAccount(global_hd) => Ok(EthPrivKeyBuildPolicy::GlobalHDAccount(global_hd.clone())), } } @@ -895,12 +890,12 @@ pub async fn withdraw_erc1155(ctx: MmArc, withdraw_type: WithdrawErc1155) -> Wit Token::Bytes("0x".into()), ])?; (0.into(), data, token_addr, eth_coin.ticker()) - } + }, EthCoinType::Erc20 { .. } => { return MmError::err(WithdrawError::InternalError( "Erc20 coin type doesnt support withdraw nft".to_owned(), )); - } + }, EthCoinType::Nft { .. } => return MmError::err(WithdrawError::NftProtocolNotSupported), }; let (gas, pay_for_gas_option) = get_eth_gas_details_from_withdraw_fee( @@ -912,7 +907,7 @@ pub async fn withdraw_erc1155(ctx: MmArc, withdraw_type: WithdrawErc1155) -> Wit call_addr, false, ) - .await?; + .await?; let address_lock = eth_coin.get_address_lock(my_address.to_string()).await; let _nonce_lock = address_lock.lock().await; let (nonce, _) = eth_coin @@ -984,12 +979,12 @@ pub async fn withdraw_erc721(ctx: MmArc, withdraw_type: WithdrawErc721) -> Withd Token::Uint(token_id_u256), ])?; (0.into(), data, token_addr, eth_coin.ticker()) - } + }, EthCoinType::Erc20 { .. } => { return MmError::err(WithdrawError::InternalError( "Erc20 coin type doesnt support withdraw nft".to_owned(), )); - } + }, // TODO: start to use NFT GLOBAL TOKEN for withdraw EthCoinType::Nft { .. } => return MmError::err(WithdrawError::NftProtocolNotSupported), }; @@ -1002,7 +997,7 @@ pub async fn withdraw_erc721(ctx: MmArc, withdraw_type: WithdrawErc721) -> Withd call_addr, false, ) - .await?; + .await?; let address_lock = eth_coin.get_address_lock(my_address.to_string()).await; let _nonce_lock = address_lock.lock().await; @@ -1064,7 +1059,7 @@ impl SwapOps for EthCoin { address, try_tx_fus!(wei_from_big_decimal(&dex_fee.fee_amount().into(), self.decimals)), ) - .map(TransactionEnum::from), + .map(TransactionEnum::from), ) } @@ -1140,7 +1135,7 @@ impl SwapOps for EthCoin { fn check_if_my_payment_sent( &self, if_my_payment_sent_args: CheckIfMyPaymentSentArgs, - ) -> Box, Error=String> + Send> { + ) -> Box, Error = String> + Send> { let id = self.etomic_swap_id( try_fus!(if_my_payment_sent_args.time_lock.try_into()), if_my_payment_sent_args.secret_hash, @@ -1190,13 +1185,13 @@ impl SwapOps for EthCoin { Some(t) => break Ok(Some(try_s!(signed_tx_from_web3_tx(t)).into())), None => break Ok(None), } - } + }, None => { if to_block >= current_block { break Ok(None); } from_block = to_block; - } + }, } } }; @@ -1215,7 +1210,7 @@ impl SwapOps for EthCoin { input.search_from_block, input.watcher_reward, ) - .await + .await } async fn search_for_swap_tx_spend_other( @@ -1230,7 +1225,7 @@ impl SwapOps for EthCoin { input.search_from_block, input.watcher_reward, ) - .await + .await } fn check_tx_signed_by_pub(&self, _tx: &[u8], _expected_pub: &[u8]) -> Result> { @@ -1299,7 +1294,7 @@ impl SwapOps for EthCoin { return Ok(self.fallback_swap_contract.map(|addr| addr.0.to_vec().into())); } MmError::err(NegotiateSwapContractAddrErr::UnexpectedOtherAddr(bytes.into())) - } + }, None => self .fallback_swap_contract .map(|addr| Some(addr.0.to_vec().into())) @@ -1379,7 +1374,7 @@ impl SwapOps for EthCoin { &String::from_utf8(instructions.to_vec()) .map_err(|err| ValidateInstructionsErr::DeserializationErr(err.to_string()))?, ) - .map_err(|err| ValidateInstructionsErr::DeserializationErr(err.to_string()))?; + .map_err(|err| ValidateInstructionsErr::DeserializationErr(err.to_string()))?; // TODO: Reward can be validated here Ok(PaymentInstructions::WatcherReward(watcher_reward)) @@ -1516,12 +1511,12 @@ impl WatcherOps for EthCoin { contract_address, expected_swap_contract_address, ))); } - } + }, Create => { return MmError::err(ValidatePaymentError::WrongPaymentTx( "Tx action must be Call, found Create instead".to_string(), )); - } + }, }; let actual_status = selfi @@ -1570,7 +1565,7 @@ impl WatcherOps for EthCoin { ValidatePaymentError::WrongPaymentTx("Invalid type for secret hash argument".to_string()) })?; dhash160(&secret_input).to_vec() - } + }, WatcherSpendType::TakerPaymentRefund => get_function_input_data(&decoded, function, 2) .map_to_mm(ValidatePaymentError::TxDeserializationError)? .into_fixed_bytes() @@ -1665,7 +1660,7 @@ impl WatcherOps for EthCoin { } else { trade_amount } - } + }, WatcherSpendType::TakerPaymentRefund => trade_amount + expected_reward_amount, }; if amount_input != Token::Uint(total_amount) { @@ -1685,7 +1680,7 @@ impl WatcherOps for EthCoin { Token::Address(Address::default()), ))); } - } + }, EthCoinType::Erc20 { platform: _, token_addr, @@ -1709,7 +1704,7 @@ impl WatcherOps for EthCoin { Token::Address(*token_addr), ))); } - } + }, EthCoinType::Nft { .. } => return MmError::err(ValidatePaymentError::NftProtocolNotSupported), } @@ -1858,7 +1853,7 @@ impl WatcherOps for EthCoin { validate_watcher_reward(expected_reward_amount.as_u64(), reward_amount_input.as_u64(), false)?; // TODO: Validate the value - } + }, EthCoinType::Erc20 { platform: _, token_addr, @@ -1950,7 +1945,7 @@ impl WatcherOps for EthCoin { tx_from_rpc.value, reward_amount_input ))); } - } + }, EthCoinType::Nft { .. } => return MmError::err(ValidatePaymentError::NftProtocolNotSupported), } @@ -1977,7 +1972,7 @@ impl WatcherOps for EthCoin { input.search_from_block, true, ) - .await + .await } async fn get_taker_watcher_reward( @@ -2043,14 +2038,14 @@ impl WatcherOps for EthCoin { )) })? } - } + }, EthCoinType::Nft { .. } => { return MmError::err(WatcherRewardError::InternalError( "Nft Protocol is not supported yet!".to_string(), )); - } + }, } - } + }, }; let send_contract_reward_on_spend = other_coin.is_eth(); @@ -2088,7 +2083,7 @@ impl MarketCoinOps for EthCoin { } => { let uncompressed_without_prefix = hex::encode(key_pair.public()); Ok(format!("04{}", uncompressed_without_prefix)) - } + }, EthPrivKeyPolicy::Trezor => { let public_key = self .deref() @@ -2101,11 +2096,11 @@ impl MarketCoinOps for EthCoin { .pubkey(); let uncompressed_without_prefix = hex::encode(public_key); Ok(format!("04{}", uncompressed_without_prefix)) - } + }, #[cfg(target_arch = "wasm32")] EthPrivKeyPolicy::Metamask(ref metamask_policy) => { Ok(format!("{:02x}", metamask_policy.public_key_uncompressed)) - } + }, } } @@ -2168,7 +2163,7 @@ impl MarketCoinOps for EthCoin { } } - fn send_raw_tx(&self, mut tx: &str) -> Box + Send> { + fn send_raw_tx(&self, mut tx: &str) -> Box + Send> { if tx.starts_with("0x") { tx = &tx[2..]; } @@ -2186,7 +2181,7 @@ impl MarketCoinOps for EthCoin { Box::new(fut.boxed().compat()) } - fn send_raw_tx_bytes(&self, tx: &[u8]) -> Box + Send> { + fn send_raw_tx_bytes(&self, tx: &[u8]) -> Box + Send> { let coin = self.clone(); let tx = tx.to_owned(); @@ -2208,7 +2203,7 @@ impl MarketCoinOps for EthCoin { } } - fn wait_for_confirmations(&self, input: ConfirmPaymentInput) -> Box + Send> { + fn wait_for_confirmations(&self, input: ConfirmPaymentInput) -> Box + Send> { macro_rules! update_status_with_error { ($status: ident, $error: ident) => { match $error.get_inner() { @@ -2242,7 +2237,7 @@ impl MarketCoinOps for EthCoin { Err(e) => { update_status_with_error!(status, e); return Err(e.to_string()); - } + }, }; // checking that confirmed_at is greater than zero to prevent overflow. @@ -2281,11 +2276,11 @@ impl MarketCoinOps for EthCoin { status.append(" Confirmed."); break Ok(()); } - } + }, Err(e) => { update_status_with_error!(status, e); return Err(e.to_string()); - } + }, } Timer::sleep(check_every).await; @@ -2307,7 +2302,7 @@ impl MarketCoinOps for EthCoin { return Box::new(futures01::future::err(TransactionErr::Plain(ERRL!( "Invalid payment action: the payment action cannot be create" )))); - } + }, }, }; @@ -2318,7 +2313,7 @@ impl MarketCoinOps for EthCoin { return Box::new(futures01::future::err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported yet!" )))); - } + }, }; let payment_func = try_tx_fus!(SWAP_CONTRACT.function(&func_name)); @@ -2330,7 +2325,7 @@ impl MarketCoinOps for EthCoin { "Expected Token::FixedBytes, got {:?}", invalid_token )))); - } + }, }; let selfi = self.clone(); let from_block = args.from_block; @@ -2352,7 +2347,7 @@ impl MarketCoinOps for EthCoin { error!("Error getting block number: {}", e); Timer::sleep(5.).await; continue; - } + }, }; let events = match selfi @@ -2365,7 +2360,7 @@ impl MarketCoinOps for EthCoin { error!("Error getting spend events: {}", e); Timer::sleep(5.).await; continue; - } + }, }; let found = events.iter().find(|event| &event.data.0[..32] == id.as_slice()); @@ -2378,12 +2373,12 @@ impl MarketCoinOps for EthCoin { info!("Tx {} not found yet", tx_hash); Timer::sleep(check_every).await; continue; - } + }, Err(e) => { error!("Get tx {} error: {}", tx_hash, e); Timer::sleep(check_every).await; continue; - } + }, }; return Ok(TransactionEnum::from(try_tx_s!(signed_tx_from_web3_tx(transaction)))); @@ -2402,7 +2397,7 @@ impl MarketCoinOps for EthCoin { .map_to_mm(TxMarshalingErr::InvalidInput) } - fn current_block(&self) -> Box + Send> { + fn current_block(&self) -> Box + Send> { let coin = self.clone(); let fut = async move { @@ -2455,7 +2450,7 @@ lazy_static! { static ref NONCE_LOCK: AddressNonceLocks = Mutex::new(HashMap::new()); } -type EthTxFut = Box + Send + 'static>; +type EthTxFut = Box + Send + 'static>; /// Signs an Eth transaction using `key_pair`. /// @@ -2622,12 +2617,12 @@ async fn sign_raw_eth_tx(coin: &EthCoin, args: &SignEthTransactionParams) -> Raw &pay_for_gas_option, my_address, ) - .await - .map(|(signed_tx, _)| RawTransactionRes { - tx_hex: signed_tx.tx_hex().into(), - }) - .map_to_mm(|err| RawTransactionError::TransactionError(err.get_plain_text_format())) - } + .await + .map(|(signed_tx, _)| RawTransactionRes { + tx_hex: signed_tx.tx_hex().into(), + }) + .map_to_mm(|err| RawTransactionError::TransactionError(err.get_plain_text_format())) + }, #[cfg(target_arch = "wasm32")] EthPrivKeyPolicy::Metamask(_) => MmError::err(RawTransactionError::InvalidParam( "sign raw eth tx not implemented for Metamask".into(), @@ -2669,14 +2664,14 @@ impl RpcCommonOps for EthCoin { // Bring the live client to the front of rpc_clients clients.rotate_left(i); return Ok(client); - } + }, Ok(Err(rpc_error)) => { debug!("Could not get client version on: {:?}. Error: {}", &client, rpc_error); if let Web3Transport::Websocket(socket_transport) = client.web3.transport() { socket_transport.stop_connection_loop().await; }; - } + }, Err(timeout_error) => { debug!( "Client version timeout exceed on: {:?}. Error: {}", @@ -2686,7 +2681,7 @@ impl RpcCommonOps for EthCoin { if let Web3Transport::Websocket(socket_transport) = client.web3.transport() { socket_transport.stop_connection_loop().await; }; - } + }, }; } @@ -2707,7 +2702,7 @@ impl EthCoin { swap_contract_address: Address, from_block: u64, to_block: u64, - ) -> Box, Error=String> + Send> { + ) -> Box, Error = String> + Send> { let contract_event = try_fus!(SWAP_CONTRACT.event("SenderRefunded")); let filter = FilterBuilder::default() .topics(Some(vec![contract_event.signature()]), None, None, None) @@ -2793,7 +2788,7 @@ impl EthCoin { &ERRL!("Error on getting my address: {}", e), ); return; - } + }, }; let mut success_iteration = 0i32; loop { @@ -2819,7 +2814,7 @@ impl EthCoin { ); Timer::sleep(10.).await; continue; - } + }, }; let mut saved_traces = match self.load_saved_traces(ctx, my_address, self.account_db_id().await.as_deref()) @@ -2847,7 +2842,7 @@ impl EthCoin { &ERRL!("Error {} on 'load_history_from_file', stop the history loop", e), ); return; - } + }, }; // AP: AFAIK ETH RPC doesn't support conditional filters like `get this OR this` so we have @@ -2879,7 +2874,7 @@ impl EthCoin { ); Timer::sleep(10.).await; continue; - } + }, }; let to_traces_before_earliest = match self @@ -2901,7 +2896,7 @@ impl EthCoin { ); Timer::sleep(10.).await; continue; - } + }, }; let total_length = from_traces_before_earliest.len() + to_traces_before_earliest.len(); @@ -2939,7 +2934,7 @@ impl EthCoin { ); Timer::sleep(10.).await; continue; - } + }, }; let to_traces_after_latest = match self @@ -2961,7 +2956,7 @@ impl EthCoin { ); Timer::sleep(10.).await; continue; - } + }, }; let total_length = from_traces_after_latest.len() + to_traces_after_latest.len(); @@ -3007,7 +3002,7 @@ impl EthCoin { ), ); continue; - } + }, }; let web3_tx = match web3_tx { Some(t) => t, @@ -3018,7 +3013,7 @@ impl EthCoin { &ERRL!("No such transaction {:?}", trace.transaction_hash.unwrap()), ); continue; - } + }, }; mm_counter!(ctx.metrics, "tx.history.response.count", 1, "coin" => self.ticker.clone(), "method" => "tx_detail_by_hash"); @@ -3036,7 +3031,7 @@ impl EthCoin { ), ); continue; - } + }, }; let fee_coin = match &self.coin_type { EthCoinType::Eth => self.ticker(), @@ -3048,7 +3043,7 @@ impl EthCoin { &ERRL!("Error on getting fee coin: Nft Protocol is not supported yet!"), ); continue; - } + }, }; let fee_details: Option = match receipt { Some(r) => { @@ -3063,9 +3058,9 @@ impl EthCoin { PayForGasOption::Legacy(LegacyGasPrice { gas_price }), fee_coin, ) - .unwrap(), + .unwrap(), ) - } + }, None => None, }; @@ -3103,7 +3098,7 @@ impl EthCoin { &ERRL!("Error {} on getting block {} data", e, trace.block_number), ); continue; - } + }, }; let details = TransactionDetails { @@ -3171,7 +3166,7 @@ impl EthCoin { &ERRL!("Error on getting my address: {}", e), ); return; - } + }, }; let mut success_iteration = 0i32; loop { @@ -3197,7 +3192,7 @@ impl EthCoin { ); Timer::sleep(10.).await; continue; - } + }, }; let mut saved_events = @@ -3243,7 +3238,7 @@ impl EthCoin { ); Timer::sleep(10.).await; continue; - } + }, }; let to_events_before_earliest = match self @@ -3266,7 +3261,7 @@ impl EthCoin { ); Timer::sleep(10.).await; continue; - } + }, }; let total_length = from_events_before_earliest.len() + to_events_before_earliest.len(); @@ -3304,7 +3299,7 @@ impl EthCoin { ); Timer::sleep(10.).await; continue; - } + }, }; let to_events_after_latest = match self @@ -3327,7 +3322,7 @@ impl EthCoin { ); Timer::sleep(10.).await; continue; - } + }, }; let total_length = from_events_after_latest.len() + to_events_after_latest.len(); @@ -3362,7 +3357,7 @@ impl EthCoin { &ERRL!("Error {} on 'load_history_from_file', stop the history loop", e), ); return; - } + }, }; let internal_id = BytesJson::from(sha256(&json::to_vec(&event).unwrap()).to_vec()); if existing_history.iter().any(|item| item.internal_id == internal_id) { @@ -3405,7 +3400,7 @@ impl EthCoin { ), ); continue; - } + }, }; mm_counter!(ctx.metrics, "tx.history.response.count", 1, @@ -3420,7 +3415,7 @@ impl EthCoin { &ERRL!("No such transaction {:?}", event.transaction_hash.unwrap()), ); continue; - } + }, }; let receipt = match self.transaction_receipt(event.transaction_hash.unwrap()).await { @@ -3436,7 +3431,7 @@ impl EthCoin { ), ); continue; - } + }, }; let fee_coin = match &self.coin_type { EthCoinType::Eth => self.ticker(), @@ -3448,7 +3443,7 @@ impl EthCoin { &ERRL!("Error on getting fee coin: Nft Protocol is not supported yet!"), ); continue; - } + }, }; let fee_details = match receipt { Some(r) => { @@ -3463,9 +3458,9 @@ impl EthCoin { PayForGasOption::Legacy(LegacyGasPrice { gas_price }), fee_coin, ) - .unwrap(), + .unwrap(), ) - } + }, None => None, }; let block_number = event.block_number.unwrap(); @@ -3478,7 +3473,7 @@ impl EthCoin { &ERRL!("Block {} is None", block_number), ); continue; - } + }, Err(e) => { ctx.log.log( "", @@ -3486,7 +3481,7 @@ impl EthCoin { &ERRL!("Error {} on getting block {} data", e, block_number), ); continue; - } + }, }; let raw = signed_tx_from_web3_tx(web3_tx).unwrap(); @@ -3586,12 +3581,12 @@ impl EthCoin { .await .map_err(|e| TransactionErr::Plain(ERRL!("{}", e)))?; sign_and_send_transaction_with_keypair(&coin, key_pair, address, value, action, data, gas).await - } + }, EthPrivKeyPolicy::Trezor => Err(TransactionErr::Plain(ERRL!("Trezor is not supported for swaps yet!"))), #[cfg(target_arch = "wasm32")] EthPrivKeyPolicy::Metamask(_) => { sign_and_send_transaction_with_metamask(coin, value, action, data, gas).await - } + }, } }; Box::new(fut.boxed().compat()) @@ -3618,7 +3613,7 @@ impl EthCoin { data, U256::from(gas_limit::ETH_SEND_ERC20), ) - } + }, EthCoinType::Nft { .. } => Box::new(futures01::future::err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported yet!" )))), @@ -3661,7 +3656,7 @@ impl EthCoin { Token::Bool(reward.send_contract_reward_on_spend), Token::Uint(reward_amount) ])) - } + }, None => try_tx_fus!(function.encode_input(&[ Token::FixedBytes(id), Token::Address(receiver_addr), @@ -3671,7 +3666,7 @@ impl EthCoin { }; let gas = U256::from(gas_limit::ETH_PAYMENT); self.sign_and_send_transaction(value, Action::Call(swap_contract_address), data, gas) - } + }, EthCoinType::Erc20 { platform: _, token_addr, @@ -3695,13 +3690,13 @@ impl EthCoin { let eth_reward_amount = try_tx_fus!(wei_from_big_decimal(&reward.amount, ETH_DECIMALS)); value += eth_reward_amount; eth_reward_amount - } + }, RewardTarget::PaymentSpender => { let token_reward_amount = try_tx_fus!(wei_from_big_decimal(&reward.amount, self.decimals)); amount += token_reward_amount; token_reward_amount - } + }, _ => { // TODO tests passed without this change, need to research on how it worked if reward.send_contract_reward_on_spend { @@ -3712,7 +3707,7 @@ impl EthCoin { } else { 0.into() } - } + }, }; try_tx_fus!(function.encode_input(&[ @@ -3726,7 +3721,7 @@ impl EthCoin { Token::Bool(reward.send_contract_reward_on_spend), Token::Uint(reward_amount), ])) - } + }, None => { try_tx_fus!(function.encode_input(&[ Token::FixedBytes(id), @@ -3736,7 +3731,7 @@ impl EthCoin { Token::FixedBytes(secret_hash), Token::Uint(time_lock) ])) - } + }, }; let wait_for_required_allowance_until = args.wait_for_confirmation_until; @@ -3781,7 +3776,7 @@ impl EthCoin { )) } })) - } + }, EthCoinType::Nft { .. } => Box::new(futures01::future::err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported yet!" )))), @@ -3803,7 +3798,7 @@ impl EthCoin { return Box::new(futures01::future::err(TransactionErr::Plain(ERRL!( "Invalid payment action: the payment action cannot be create" )))); - } + }, }; let watcher_reward = input.watcher_reward; @@ -3852,7 +3847,7 @@ impl EthCoin { ) }), ) - } + }, EthCoinType::Erc20 { platform: _, token_addr, @@ -3900,7 +3895,7 @@ impl EthCoin { ) }), ) - } + }, EthCoinType::Nft { .. } => Box::new(futures01::future::err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported yet!" )))), @@ -3922,7 +3917,7 @@ impl EthCoin { return Box::new(futures01::future::err(TransactionErr::Plain(ERRL!( "Invalid payment action: the payment action cannot be create" )))); - } + }, }; match self.coin_type { @@ -3972,7 +3967,7 @@ impl EthCoin { ) }), ) - } + }, EthCoinType::Erc20 { platform: _, token_addr, @@ -4023,7 +4018,7 @@ impl EthCoin { ) }), ) - } + }, EthCoinType::Nft { .. } => Box::new(futures01::future::err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported yet!" )))), @@ -4092,9 +4087,9 @@ impl EthCoin { data, U256::from(gas_limit::ETH_RECEIVER_SPEND), ) - .compat() - .await - } + .compat() + .await + }, EthCoinType::Erc20 { platform: _, token_addr, @@ -4144,9 +4139,9 @@ impl EthCoin { data, U256::from(gas_limit::ERC20_RECEIVER_SPEND), ) - .compat() - .await - } + .compat() + .await + }, EthCoinType::Nft { .. } => Err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported!" ))), @@ -4215,9 +4210,9 @@ impl EthCoin { data, U256::from(gas_limit::ETH_SENDER_REFUND), ) - .compat() - .await - } + .compat() + .await + }, EthCoinType::Erc20 { platform: _, token_addr, @@ -4267,9 +4262,9 @@ impl EthCoin { data, U256::from(gas_limit::ERC20_SENDER_REFUND), ) - .compat() - .await - } + .compat() + .await + }, EthCoinType::Nft { .. } => Err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported yet!" ))), @@ -4292,12 +4287,12 @@ impl EthCoin { _ => { let error = format!("Expected U256 as balanceOf result but got {:?}", decoded); MmError::err(BalanceError::InvalidResponse(error)) - } + }, } - } + }, EthCoinType::Nft { .. } => { MmError::err(BalanceError::Internal("Nft Protocol is not supported yet!".to_string())) - } + }, } }; Box::new(fut.boxed().compat()) @@ -4358,7 +4353,7 @@ impl EthCoin { _ => { let error = format!("Expected U256 as balanceOf result but got {:?}", decoded); MmError::err(BalanceError::InvalidResponse(error)) - } + }, } } @@ -4384,14 +4379,14 @@ impl EthCoin { _ => { let error = format!("Expected U256 as balanceOf result but got {:?}", decoded); return MmError::err(BalanceError::InvalidResponse(error)); - } + }, } - } + }, EthCoinType::Erc20 { .. } => { return MmError::err(BalanceError::Internal( "Erc20 coin type doesnt support Erc1155 standard".to_owned(), )); - } + }, }; let wallet_amount = u256_to_big_decimal(wallet_amount_uint, self.decimals)?; Ok(wallet_amount) @@ -4414,19 +4409,19 @@ impl EthCoin { _ => { let error = format!("Expected Address as ownerOf result but got {:?}", decoded); return MmError::err(GetNftInfoError::InvalidResponse(error)); - } + }, } - } + }, EthCoinType::Erc20 { .. } => { return MmError::err(GetNftInfoError::Internal( "Erc20 coin type doesnt support Erc721 standard".to_owned(), )); - } + }, }; Ok(owner_address) } - fn estimate_gas_wrapper(&self, req: CallRequest) -> Box + Send> { + fn estimate_gas_wrapper(&self, req: CallRequest) -> Box + Send> { let coin = self.clone(); // always using None block number as old Geth version accept only single argument in this RPC @@ -4520,9 +4515,9 @@ impl EthCoin { _ => { let error = format!("Expected U256 as allowance result but got {:?}", decoded); MmError::err(Web3RpcError::InvalidResponse(error)) - } + }, } - } + }, EthCoinType::Nft { .. } => MmError::err(Web3RpcError::NftProtocolNotSupported), } }; @@ -4573,7 +4568,7 @@ impl EthCoin { return Err(TransactionErr::ProtocolNotSupported(ERRL!( "Nft Protocol is not supported yet!" ))); - } + }, }; let function = try_tx_s!(ERC20_CONTRACT.function("approve")); let data = try_tx_s!(function.encode_input(&[Token::Address(spender), Token::Uint(amount)])); @@ -4596,7 +4591,7 @@ impl EthCoin { swap_contract_address: Address, from_block: u64, to_block: u64, - ) -> Box, Error=String> + Send> { + ) -> Box, Error = String> + Send> { let contract_event = try_fus!(SWAP_CONTRACT.event("PaymentSent")); let filter = FilterBuilder::default() .topics(Some(vec![contract_event.signature()]), None, None, None) @@ -4617,7 +4612,7 @@ impl EthCoin { swap_contract_address: Address, from_block: u64, to_block: u64, - ) -> Box, Error=String> + Send> { + ) -> Box, Error = String> + Send> { let contract_event = try_fus!(SWAP_CONTRACT.event("ReceiverSpent")); let filter = FilterBuilder::default() .topics(Some(vec![contract_event.signature()]), None, None, None) @@ -4764,10 +4759,10 @@ impl EthCoin { if watcher_reward.send_contract_reward_on_spend { expected_value += actual_reward_amount } - } + }, RewardTarget::PaymentSender | RewardTarget::PaymentSpender | RewardTarget::Contract => { expected_value += actual_reward_amount - } + }, }; } @@ -4777,7 +4772,7 @@ impl EthCoin { tx_from_rpc.value, trade_amount ))); } - } + }, EthCoinType::Erc20 { platform: _, token_addr, @@ -4855,10 +4850,10 @@ impl EthCoin { let expected_reward_amount = match watcher_reward.reward_target { RewardTarget::Contract | RewardTarget::PaymentSender => { wei_from_big_decimal(&watcher_reward.amount, ETH_DECIMALS)? - } + }, RewardTarget::PaymentSpender => { wei_from_big_decimal(&watcher_reward.amount, selfi.decimals)? - } + }, _ => { // TODO tests passed without this change, need to research on how it worked if watcher_reward.send_contract_reward_on_spend { @@ -4866,7 +4861,7 @@ impl EthCoin { } else { 0.into() } - } + }, }; let actual_reward_amount = get_function_input_data(&decoded, function, 8) @@ -4887,13 +4882,13 @@ impl EthCoin { match watcher_reward.reward_target { RewardTarget::PaymentSender | RewardTarget::Contract => { expected_value += actual_reward_amount - } + }, RewardTarget::PaymentSpender => expected_amount += actual_reward_amount, _ => { if watcher_reward.send_contract_reward_on_spend { expected_value += actual_reward_amount } - } + }, }; if decoded[1] != Token::Uint(expected_amount) { @@ -4910,7 +4905,7 @@ impl EthCoin { tx_from_rpc.value, expected_value ))); } - } + }, EthCoinType::Nft { .. } => return MmError::err(ValidatePaymentError::NftProtocolNotSupported), } @@ -4923,7 +4918,7 @@ impl EthCoin { &self, swap_contract_address: H160, token: Token, - ) -> Box + Send + 'static> { + ) -> Box + Send + 'static> { let function = try_fus!(SWAP_CONTRACT.function("payments")); let data = try_fus!(function.encode_input(&[token])); @@ -5000,13 +4995,13 @@ impl EthCoin { Some(t) => t, None => { return ERR!("Found ReceiverSpent event, but transaction {:02x} is missing", tx_hash); - } + }, }; return Ok(Some(FoundSwapTxSpend::Spent(TransactionEnum::from(try_s!( signed_tx_from_web3_tx(transaction) ))))); - } + }, None => return ERR!("Found ReceiverSpent event, but it doesn't have tx_hash"), } } @@ -5025,13 +5020,13 @@ impl EthCoin { Some(t) => t, None => { return ERR!("Found SenderRefunded event, but transaction {:02x} is missing", tx_hash); - } + }, }; return Ok(Some(FoundSwapTxSpend::Refunded(TransactionEnum::from(try_s!( signed_tx_from_web3_tx(transaction) ))))); - } + }, None => return ERR!("Found SenderRefunded event, but it doesn't have tx_hash"), } } @@ -5051,10 +5046,10 @@ impl EthCoin { .await .retry_on_err() }) - .until_s(wait_until) - .repeat_every_secs(10.) - .await - .map_err(|_| WatcherRewardError::RPCError("Error getting the gas price".to_string()))?; + .until_s(wait_until) + .repeat_every_secs(10.) + .await + .map_err(|_| WatcherRewardError::RPCError("Error getting the gas price".to_string()))?; let gas_cost_wei = calc_total_fee(U256::from(REWARD_GAS_AMOUNT), &pay_for_gas_option) .map_err(|e| WatcherRewardError::InternalError(e.to_string()))?; @@ -5072,10 +5067,10 @@ impl EthCoin { Err(e) => { error!("Error {} on eth_gasPrice request", e); None - } + }, } } - .boxed(); + .boxed(); let eth_fee_history_price_fut = async { match coin.eth_fee_history(U256::from(1u64), BlockNumber::Latest, &[]).await { @@ -5086,10 +5081,10 @@ impl EthCoin { Err(e) => { debug!("Error {} on eth_feeHistory request", e); None - } + }, } } - .boxed(); + .boxed(); let (eth_gas_price, eth_fee_history_price) = join(eth_gas_price_fut, eth_fee_history_price_fut).await; // on editions < 2021 the compiler will resolve array.into_iter() as (&array).into_iter() @@ -5119,7 +5114,7 @@ impl EthCoin { GasApiProvider::Infura => InfuraGasApiCaller::fetch_infura_fee_estimation(&gas_api_conf.url).boxed(), GasApiProvider::Blocknative => { BlocknativeGasApiCaller::fetch_blocknative_fee_estimation(&gas_api_conf.url).boxed() - } + }, }; provider_estimator_fut .or_else(|provider_estimator_err| { @@ -5143,7 +5138,7 @@ impl EthCoin { SwapTxFeePolicy::Internal => { let gas_price = coin.get_gas_price().await?; Ok(PayForGasOption::Legacy(LegacyGasPrice { gas_price })) - } + }, SwapTxFeePolicy::Low | SwapTxFeePolicy::Medium | SwapTxFeePolicy::High => { let fee_per_gas = coin.get_eip1559_gas_fee().await?; let pay_result = match swap_fee_policy { @@ -5161,7 +5156,7 @@ impl EthCoin { }), }; Ok(pay_result) - } + }, SwapTxFeePolicy::Unsupported => Err(MmError::new(Web3RpcError::Internal("swap fee policy not set".into()))), } } @@ -5187,10 +5182,10 @@ impl EthCoin { }, } }) - .until_ready() - .repeat_every_secs(1.) - .await - .ok(); + .until_ready() + .repeat_every_secs(1.) + .await + .ok(); } /// Returns `None` if the transaction hasn't appeared on the RPC nodes at the specified time. @@ -5243,7 +5238,7 @@ impl EthCoin { ); Timer::sleep(check_every).await; continue; - } + }, }; if let Some(receipt) = web3_receipt { @@ -5285,14 +5280,14 @@ impl EthCoin { if current_block >= block_number { break Ok(()); } - } + }, Err(e) => { error!( "Error {:?} getting the {} block number retrying in 15 seconds", e, selfi.ticker() ); - } + }, }; Timer::sleep(check_every).await; @@ -5316,7 +5311,7 @@ impl EthCoin { pub fn get_addr_nonce( self, addr: Address, - ) -> Box), Error=String> + Send> { + ) -> Box), Error = String> + Send> { const TMP_SOCKET_DURATION: Duration = Duration::from_secs(300); let fut = async move { @@ -5354,7 +5349,7 @@ impl EthCoin { Err(e) => { error!("Error getting nonce for addr {:?}: {}", addr, e); None - } + }, }) .collect(); if nonces.is_empty() { @@ -5408,9 +5403,9 @@ impl EthTxFeeDetails { // Using max_fee_per_gas as estimated gas_price value for compatibility in caller not expecting eip1559 fee per gas values. // Normally the caller should pay attention to presence of max_fee_per_gas and max_priority_fee_per_gas in the result: PayForGasOption::Eip1559(Eip1559FeePerGas { - max_fee_per_gas, - max_priority_fee_per_gas, - }) => (max_fee_per_gas, Some(max_fee_per_gas), Some(max_priority_fee_per_gas)), + max_fee_per_gas, + max_priority_fee_per_gas, + }) => (max_fee_per_gas, Some(max_fee_per_gas), Some(max_priority_fee_per_gas)), }; let gas_price = u256_to_big_decimal(gas_price, ETH_DECIMALS)?; let (max_fee_per_gas, max_priority_fee_per_gas) = match (max_fee_per_gas, max_priority_fee_per_gas) { @@ -5473,7 +5468,7 @@ impl MmCoin for EthCoin { EthAddressFormat::MixedCase => { let _addr = try_s!(addr_from_str(from)); Ok(checksum_address(from)) - } + }, } } @@ -5485,7 +5480,7 @@ impl MmCoin for EthCoin { } } - fn process_history_loop(&self, ctx: MmArc) -> Box + Send> { + fn process_history_loop(&self, ctx: MmArc) -> Box + Send> { cfg_wasm32! { ctx.log.log( "🤔", @@ -5510,7 +5505,7 @@ impl MmCoin for EthCoin { fn history_sync_status(&self) -> HistorySyncState { self.history_sync_state.lock().unwrap().clone() } - fn get_trade_fee(&self) -> Box + Send> { + fn get_trade_fee(&self) -> Box + Send> { let coin = self.clone(); Box::new( async move { @@ -5532,8 +5527,8 @@ impl MmCoin for EthCoin { paid_from_trading_vol: false, }) } - .boxed() - .compat(), + .boxed() + .compat(), ) } @@ -5555,13 +5550,13 @@ impl MmCoin for EthCoin { } else { U256::from(gas_limit::ETH_PAYMENT) } - } + }, EthCoinType::Erc20 { token_addr, .. } => { let mut gas = U256::from(gas_limit::ERC20_PAYMENT); let value = match value { TradePreimageValue::Exact(value) | TradePreimageValue::UpperBound(value) => { wei_from_big_decimal(&value, self.decimals)? - } + }, }; let allowed = self.allowance(self.swap_contract_address).compat().await?; if allowed < value { @@ -5582,7 +5577,7 @@ impl MmCoin for EthCoin { gas += U256::from(gas_limit::ERC20_SENDER_REFUND); // add 'senderRefund' gas if requested } gas - } + }, EthCoinType::Nft { .. } => return MmError::err(TradePreimageError::NftProtocolNotSupported), }; @@ -5644,7 +5639,7 @@ impl MmCoin for EthCoin { let function = ERC20_CONTRACT.function("transfer")?; let data = function.encode_input(&[Token::Address(to_addr), Token::Uint(dex_fee_amount)])?; (0.into(), data, token_addr, platform) - } + }, EthCoinType::Nft { .. } => return MmError::err(TradePreimageError::NftProtocolNotSupported), }; @@ -5760,7 +5755,7 @@ impl TryToAddress for Option { pub trait GuiAuthMessages { fn gui_auth_sign_message_hash(message: String) -> Option<[u8; 32]>; fn generate_gui_auth_signed_validation(generator: GuiAuthValidationGenerator) - -> SignatureResult; + -> SignatureResult; } impl GuiAuthMessages for EthCoin { @@ -5843,7 +5838,7 @@ fn validate_fee_impl(coin: EthCoin, validate_fee_args: EthValidateFeeArgs<'_>) - tx_from_rpc, expected_value ))); } - } + }, EthCoinType::Erc20 { platform: _, token_addr, @@ -5881,15 +5876,15 @@ fn validate_fee_impl(coin: EthCoin, validate_fee_args: EthValidateFeeArgs<'_>) - value, expected_value ))); } - } + }, _ => { return MmError::err(ValidatePaymentError::WrongPaymentTx(format!( "Should have got uint token but got {:?}", value_input ))); - } + }, } - } + }, EthCoinType::Nft { .. } => return MmError::err(ValidatePaymentError::NftProtocolNotSupported), } @@ -6022,7 +6017,7 @@ fn signed_tx_from_web3_tx(transaction: Web3Transaction) -> Result { let gas_price = transaction .gas_price @@ -6037,7 +6032,7 @@ fn signed_tx_from_web3_tx(transaction: Web3Transaction) -> Result { let max_fee_per_gas = transaction .max_fee_per_gas @@ -6055,7 +6050,7 @@ fn signed_tx_from_web3_tx(transaction: Web3Transaction) -> Result return Err(ERRL!("Internal error: 'tx_type' invalid")), }; @@ -6194,7 +6189,7 @@ async fn get_max_eth_tx_type_conf(ctx: &MmArc, conf: &Json, coin_type: &EthCoinT _ => Ok(None), } } - } + }, } } @@ -6235,7 +6230,7 @@ pub async fn eth_coin_from_conf_and_request( let path_to_address = try_s!(json::from_value::>( req["path_to_address"].clone() )) - .unwrap_or_default(); + .unwrap_or_default(); let (key_pair, derivation_method) = try_s!(build_address_and_priv_key_policy(ctx, ticker, conf, priv_key_policy, &path_to_address, None).await); @@ -6264,18 +6259,18 @@ pub async fn eth_coin_from_conf_and_request( ctx.spawner().spawn_with_settings(fut, settings); Web3Transport::Websocket(websocket_transport) - } + }, Some("http") | Some("https") => { let node = HttpTransportNode { uri, gui_auth: false }; Web3Transport::new_http_with_event_handlers(node, event_handlers.clone()) - } + }, _ => { return ERR!( "Invalid node address '{}'. Only http(s) and ws(s) nodes are supported", uri ); - } + }, }; let web3 = Web3::new(transport); @@ -6285,7 +6280,7 @@ pub async fn eth_coin_from_conf_and_request( error!("Couldn't get client version for url {}: {}", url, e); continue; - } + }, }; web3_instances.push(Web3Instance { @@ -6319,7 +6314,7 @@ pub async fn eth_coin_from_conf_and_request( Some(d) => d as u8, }; (EthCoinType::Erc20 { platform, token_addr }, decimals) - } + }, CoinProtocol::NFT { platform } => (EthCoinType::Nft { platform }, ETH_DECIMALS), _ => return ERR!("Expect ETH, ERC20 or NFT protocol"), }; @@ -6462,16 +6457,16 @@ fn increase_gas_price_by_stage(pay_for_gas_option: PayForGasOption, level: &FeeA FeeApproxStage::WithoutApprox => gas_price, FeeApproxStage::StartSwap => { increase_by_percent_one_gwei(gas_price, GAS_PRICE_APPROXIMATION_PERCENT_ON_START_SWAP) - } + }, FeeApproxStage::OrderIssue => { increase_by_percent_one_gwei(gas_price, GAS_PRICE_APPROXIMATION_PERCENT_ON_ORDER_ISSUE) - } + }, FeeApproxStage::TradePreimage => { increase_by_percent_one_gwei(gas_price, GAS_PRICE_APPROXIMATION_PERCENT_ON_TRADE_PREIMAGE) - } + }, FeeApproxStage::WatcherPreimage => { increase_by_percent_one_gwei(gas_price, GAS_PRICE_APPROXIMATION_PERCENT_ON_WATCHER_PREIMAGE) - } + }, }; PayForGasOption::Legacy(LegacyGasPrice { gas_price: new_gas_price, @@ -6516,7 +6511,7 @@ pub async fn get_eth_address( } else { PrivKeyBuildPolicy::detect_priv_key_policy(ctx)? } - .into(); + .into(); let (_, derivation_method) = build_address_and_priv_key_policy(ctx, ticker, conf, priv_key_policy, path_to_address, None).await?; @@ -6526,7 +6521,7 @@ pub async fn get_eth_address( return Err(MmError::new(GetEthAddressError::UnexpectedDerivationMethod( UnexpectedDerivationMethod::UnsupportedError("HDWallet is not supported for NFT yet!".to_owned()), ))); - } + }, }; Ok(MyWalletAddress { @@ -6559,7 +6554,7 @@ fn get_valid_nft_addr_to_withdraw( return MmError::err(GetValidEthWithdrawAddError::CoinDoesntSupportNftWithdraw { coin: coin_enum.ticker().to_owned(), }); - } + }, }; let to_addr = valid_addr_from_str(to).map_err(GetValidEthWithdrawAddError::InvalidAddress)?; let token_addr = addr_from_str(token_add).map_err(GetValidEthWithdrawAddError::InvalidAddress)?; @@ -6609,12 +6604,12 @@ async fn get_eth_gas_details_from_withdraw_fee( Some(WithdrawFee::EthGas { gas_price, gas }) => { let gas_price = wei_from_big_decimal(&gas_price, ETH_GWEI_DECIMALS)?; return Ok((gas.into(), PayForGasOption::Legacy(LegacyGasPrice { gas_price }))); - } + }, Some(WithdrawFee::EthGasEip1559 { - max_fee_per_gas, - max_priority_fee_per_gas, - gas_option: gas_limit, - }) => { + max_fee_per_gas, + max_priority_fee_per_gas, + gas_option: gas_limit, + }) => { let max_fee_per_gas = wei_from_big_decimal(&max_fee_per_gas, ETH_GWEI_DECIMALS)?; let max_priority_fee_per_gas = wei_from_big_decimal(&max_priority_fee_per_gas, ETH_GWEI_DECIMALS)?; match gas_limit { @@ -6626,27 +6621,27 @@ async fn get_eth_gas_details_from_withdraw_fee( max_priority_fee_per_gas, }), )); - } + }, EthGasLimitOption::Calc => // go to gas estimate code - { - PayForGasOption::Eip1559(Eip1559FeePerGas { - max_fee_per_gas, - max_priority_fee_per_gas, - }) - } + { + PayForGasOption::Eip1559(Eip1559FeePerGas { + max_fee_per_gas, + max_priority_fee_per_gas, + }) + }, } - } + }, Some(fee_policy) => { let error = format!("Expected 'EthGas' fee type, found {:?}", fee_policy); return MmError::err(EthGasDetailsErr::InvalidFeePolicy(error)); - } + }, None => { // If WithdrawFee not set use legacy gas price (?) let gas_price = eth_coin.get_gas_price().await?; // go to gas estimate code PayForGasOption::Legacy(LegacyGasPrice { gas_price }) - } + }, }; // covering edge case by deducting the standard transfer fee when we want to max withdraw ETH @@ -6698,9 +6693,9 @@ fn tx_builder_with_pay_for_gas_option( let tx_builder = match *pay_for_gas_option { PayForGasOption::Legacy(LegacyGasPrice { gas_price }) => tx_builder.with_gas_price(gas_price), PayForGasOption::Eip1559(Eip1559FeePerGas { - max_priority_fee_per_gas, - max_fee_per_gas, - }) => tx_builder + max_priority_fee_per_gas, + max_fee_per_gas, + }) => tx_builder .with_priority_fee_per_gas(max_fee_per_gas, max_priority_fee_per_gas) .with_chain_id(eth_coin.chain_id), }; @@ -6726,9 +6721,9 @@ fn call_request_with_pay_for_gas_option(call_request: CallRequest, pay_for_gas_o ..call_request }, PayForGasOption::Eip1559(Eip1559FeePerGas { - max_fee_per_gas, - max_priority_fee_per_gas, - }) => CallRequest { + max_fee_per_gas, + max_priority_fee_per_gas, + }) => CallRequest { gas_price: None, max_fee_per_gas: Some(max_fee_per_gas), max_priority_fee_per_gas: Some(max_priority_fee_per_gas), @@ -6950,8 +6945,8 @@ impl GetNewAddressRpcOps for EthCoin { params: GetNewAddressParams, confirm_address: &ConfirmAddress, ) -> MmResult, GetNewAddressRpcError> - where - ConfirmAddress: HDConfirmAddress, + where + ConfirmAddress: HDConfirmAddress, { get_new_address::common_impl::get_new_address_rpc(self, params, confirm_address).await } @@ -7003,8 +6998,8 @@ impl InitCreateAccountRpcOps for EthCoin { state: CreateAccountState, xpub_extractor: Option, ) -> MmResult, CreateAccountRpcError> - where - XPubExtractor: HDXPubExtractor + Send, + where + XPubExtractor: HDXPubExtractor + Send, { init_create_account::common_impl::init_create_new_account_rpc(self, params, state, xpub_extractor).await } diff --git a/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs b/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs index 6dd29f6e1e..b17a5a5cf4 100644 --- a/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs +++ b/mm2src/coins/z_coin/storage/blockdb/blockdb_sql_storage.rs @@ -69,7 +69,7 @@ impl BlockDbImpl { db_id, }) }) - .await + .await } #[cfg(test)] @@ -101,7 +101,7 @@ impl BlockDbImpl { db_id, }) }) - .await + .await } pub(crate) async fn get_latest_block(&self) -> ZcoinStorageRes { @@ -114,9 +114,9 @@ impl BlockDbImpl { |row| row.get(0), ) }) - .await - .map_to_mm(|err| ZcoinStorageError::DbError(err.to_string()))? - .unwrap_or(0)) + .await + .map_to_mm(|err| ZcoinStorageError::DbError(err.to_string()))? + .unwrap_or(0)) } pub(crate) async fn insert_block(&self, height: u32, cb_bytes: Vec) -> ZcoinStorageRes { @@ -131,7 +131,7 @@ impl BlockDbImpl { Ok(insert) }) - .await + .await } pub(crate) async fn rewind_to_height(&self, height: BlockHeight) -> ZcoinStorageRes { @@ -142,7 +142,7 @@ impl BlockDbImpl { .execute("DELETE from compactblocks WHERE height > ?1", [u32::from(height)]) .map_to_mm(|err| ZcoinStorageError::RemoveFromStorageErr(err.to_string())) }) - .await + .await } pub(crate) async fn get_earliest_block(&self) -> ZcoinStorageRes { @@ -155,10 +155,10 @@ impl BlockDbImpl { |row| row.get::<_, Option>(0), ) }) - .await - .map_to_mm(|err| ZcoinStorageError::GetFromStorageError(err.to_string()))? - .flatten() - .unwrap_or(0)) + .await + .map_to_mm(|err| ZcoinStorageError::GetFromStorageError(err.to_string()))? + .flatten() + .unwrap_or(0)) } pub(crate) async fn query_blocks_by_limit( @@ -191,7 +191,7 @@ impl BlockDbImpl { Ok(rows.collect_vec()) }) - .await + .await } pub(crate) async fn process_blocks_with_mode( @@ -212,7 +212,7 @@ impl BlockDbImpl { opt.map(|(_, max)| max) .unwrap_or(BlockHeight::from_u32(params.sapling_activation_height) - 1) })? - } + }, }; let rows = self.query_blocks_by_limit(from_height, limit).await?; @@ -236,7 +236,7 @@ impl BlockDbImpl { match &mode.clone() { BlockProcessingMode::Validate => { validate_chain(block, &mut prev_height, &mut prev_hash).await?; - } + }, BlockProcessingMode::Scan(data, z_balance_change_sender) => { let tx_size = scan_cached_block(data, ¶ms, &block, &mut from_height).await?; // If there are transactions present in the current scanned block, @@ -246,7 +246,7 @@ impl BlockDbImpl { sender.send(()).await.expect("No receiver is available/dropped"); }; }; - } + }, } } Ok(()) From 5fdee3ec9f87365b6d8a39d517f01322fe8cafba Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 1 Jul 2024 15:28:42 +0100 Subject: [PATCH 165/186] Fix swap restart issue with multi-pubkey DBs --- mm2src/mm2_main/src/lp_swap.rs | 30 +++++++++++++++++-- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 8 +++++ .../src/lp_swap/recreate_swap_data.rs | 4 +++ mm2src/mm2_main/src/lp_swap/taker_swap.rs | 6 ++++ 4 files changed, 46 insertions(+), 2 deletions(-) diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index d6f3c6de02..7cf463836f 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -1538,9 +1538,24 @@ pub async fn swap_kick_starts(ctx: MmArc, db_id: Option<&str>) -> Result (swap.maker_db_id.as_deref(), swap.taker_db_id.as_deref()), + SavedSwap::Taker(swap) => (swap.maker_db_id.as_deref(), swap.taker_db_id.as_deref()), + }; let taker_coin = loop { match lp_coinfind(&ctx, &taker_coin_ticker).await { - Ok(Some(c)) => break c, + Ok(Some(c)) => { + if taker_db_id == c.account_db_id().await.as_deref() { + break c; + }; + info!( + "Can't kickstart the swap {} until the coin {} is activated with pubkey: {}", + swap.uuid(), + taker_coin_ticker, + taker_db_id.unwrap_or(&ctx.rmd160_hex()) + ); + Timer::sleep(5.).await; + }, Ok(None) => { info!( "Can't kickstart the swap {} until the coin {} is activated", @@ -1558,7 +1573,18 @@ async fn kickstart_thread_handler(ctx: MmArc, swap: SavedSwap, maker_coin_ticker let maker_coin = loop { match lp_coinfind(&ctx, &maker_coin_ticker).await { - Ok(Some(c)) => break c, + Ok(Some(c)) => { + if maker_db_id == c.account_db_id().await.as_deref() { + break c; + }; + info!( + "Can't kickstart the swap {} until the coin {} is activated with pubkey: {}", + swap.uuid(), + maker_coin_ticker, + maker_db_id.unwrap_or(&ctx.rmd160_hex()) + ); + Timer::sleep(5.).await; + }, Ok(None) => { info!( "Can't kickstart the swap {} until the coin {} is activated", diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index 9d01012036..5811f2ba88 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -108,6 +108,8 @@ async fn save_my_maker_swap_event( events: vec![], success_events: MAKER_SUCCESS_EVENTS.iter().map(|event| event.to_string()).collect(), error_events: MAKER_ERROR_EVENTS.iter().map(|event| event.to_string()).collect(), + maker_db_id: swap.maker_coin.account_db_id().await, + taker_db_id: swap.taker_coin.account_db_id().await, }), Err(e) => return ERR!("{}", e), }; @@ -1820,6 +1822,10 @@ pub struct MakerSavedSwap { pub mm_version: Option, pub success_events: Vec, pub error_events: Vec, + /// needed to validate if pending maker coin is activated with the correct `db_id` in `kickstart_thread_handler` + pub maker_db_id: Option, + /// needed to validate if pending taker coin is activated with the correct `db_id` in `kickstart_thread_handler` + pub taker_db_id: Option, } #[cfg(test)] @@ -1877,6 +1883,8 @@ impl MakerSavedSwap { mm_version: None, success_events: vec![], error_events: vec![], + maker_db_id: None, + taker_db_id: None, } } } diff --git a/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs b/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs index 58801ec0b6..822217b9f8 100644 --- a/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs +++ b/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs @@ -94,6 +94,8 @@ fn recreate_maker_swap(ctx: MmArc, taker_swap: TakerSavedSwap) -> RecreateSwapRe mm_version: Some(ctx.mm_version.clone()), success_events: MAKER_SUCCESS_EVENTS.iter().map(|event| event.to_string()).collect(), error_events: MAKER_ERROR_EVENTS.iter().map(|event| event.to_string()).collect(), + taker_db_id: taker_swap.taker_db_id, + maker_db_id: taker_swap.maker_db_id, }; let mut event_it = taker_swap.events.into_iter(); @@ -296,6 +298,8 @@ async fn recreate_taker_swap(ctx: MmArc, maker_swap: MakerSavedSwap) -> Recreate mm_version: Some(ctx.mm_version.clone()), success_events: TAKER_SUCCESS_EVENTS.iter().map(|event| event.to_string()).collect(), error_events: TAKER_ERROR_EVENTS.iter().map(|event| event.to_string()).collect(), + taker_db_id: maker_swap.taker_db_id, + maker_db_id: maker_swap.maker_db_id, }; let mut event_it = maker_swap.events.into_iter(); diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index 8600dc5c3f..b40043bdea 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -141,6 +141,8 @@ async fn save_my_taker_swap_event( TAKER_SUCCESS_EVENTS.iter().map(<&str>::to_string).collect() }, error_events: TAKER_ERROR_EVENTS.iter().map(<&str>::to_string).collect(), + taker_db_id: swap.taker_coin.account_db_id().await, + maker_db_id: swap.maker_coin.account_db_id().await, }), Err(e) => return ERR!("{}", e), }; @@ -218,6 +220,10 @@ pub struct TakerSavedSwap { pub mm_version: Option, pub success_events: Vec, pub error_events: Vec, + /// needed to validate if pending maker coin is activated with the correct `db_id` in `kickstart_thread_handler` + pub maker_db_id: Option, + /// needed to validate if pending taker coin is activated with the correct `db_id` in `kickstart_thread_handler` + pub taker_db_id: Option, } impl TakerSavedSwap { From 9a8494e02b5ebf2a40b66ffa9028d3d6a730ac71 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 1 Jul 2024 16:23:27 +0100 Subject: [PATCH 166/186] fix review notes --- mm2src/coins/eth/v2_activation.rs | 4 +- mm2src/coins/lightning/ln_utils.rs | 7 ++- mm2src/coins/lp_coins.rs | 2 +- mm2src/coins/nft.rs | 12 ++++- mm2src/coins/tendermint/tendermint_coin.rs | 2 +- mm2src/mm2_core/src/mm_ctx.rs | 4 +- mm2src/mm2_core/src/sql_connection_pool.rs | 50 ++++++------------- mm2src/mm2_db/src/indexed_db/db_lock.rs | 14 +++--- mm2src/mm2_main/src/lp_swap.rs | 14 +++--- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 12 ++--- .../src/lp_swap/recreate_swap_data.rs | 8 +-- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 8 +-- 12 files changed, 60 insertions(+), 77 deletions(-) diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 9c580975ec..3692d75772 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -665,7 +665,7 @@ pub(crate) async fn build_address_and_priv_key_policy( let activated_key = KeyPair::from_secret_slice(raw_priv_key.as_slice()) .map_to_mm(|e| EthActivationV2Error::InternalError(e.to_string()))?; #[cfg(not(target_arch = "wasm32"))] - run_db_migraiton_for_new_eth_pubkey(ctx, &activated_key).await?; + run_db_migration_for_new_eth_pubkey(ctx, &activated_key).await?; let bip39_secp_priv_key = global_hd_ctx.root_priv_key().clone(); @@ -926,7 +926,7 @@ fn compress_public_key(uncompressed: H520) -> MmResult MmResult<(), EthActivationV2Error> { +async fn run_db_migration_for_new_eth_pubkey(ctx: &MmArc, keypair: &KeyPair) -> MmResult<(), EthActivationV2Error> { let db_id = hex::encode(dhash160(keypair.public().as_bytes())); let shared_db_id = shared_db_id_from_seed(&db_id) .mm_err(|err| EthActivationV2Error::InternalError(err.to_string()))? diff --git a/mm2src/coins/lightning/ln_utils.rs b/mm2src/coins/lightning/ln_utils.rs index 9408724d3a..88918f776c 100644 --- a/mm2src/coins/lightning/ln_utils.rs +++ b/mm2src/coins/lightning/ln_utils.rs @@ -69,11 +69,10 @@ pub async fn init_persister( } pub async fn init_db(ctx: &MmArc, ticker: String, db_id: Option<&str>) -> EnableLightningResult { - let shared = ctx.sqlite_conn_opt(db_id).or_mm_err(|| { + let db = ctx.sqlite_conn_opt(db_id).or_mm_err(|| { EnableLightningError::DbError("'MmCtx::sqlite_connection' is not found or initialized".to_owned()) })?; - - let db = SqliteLightningDB::new(ticker, shared)?; + let db = SqliteLightningDB::new(ticker, db)?; if !db.is_db_initialized().await? { db.init_db().await?; @@ -145,7 +144,7 @@ pub async fn init_channel_manager( return MmError::err(EnableLightningError::UnsupportedMode( "Lightning network".into(), "electrum".into(), - )); + )) }, }; let best_header = get_best_header(&rpc_client).await?; diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 1c26151bb4..826cb38820 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -4619,7 +4619,7 @@ pub async fn find_unique_account_ids_active(ctx: &MmArc) -> Result Result, String> { // Using a HashSet to ensure uniqueness efficiently // Initialize with default wallet pubkey as coin.account_db_id() will return None by default. - let mut account_ids = HashSet::from([ctx.rmd160_hex()]); + let mut account_ids = HashSet::from([ctx.rmd160.to_string()]); let coin_ctx = try_s!(CoinsContext::from_ctx(ctx)); let coins = coin_ctx.coins.lock().await; diff --git a/mm2src/coins/nft.rs b/mm2src/coins/nft.rs index a7dfd84dc4..9a61c51f3e 100644 --- a/mm2src/coins/nft.rs +++ b/mm2src/coins/nft.rs @@ -1668,7 +1668,11 @@ pub async fn find_unique_nft_account_ids( for coin in coins.iter() { if coin.is_available() { // Use default if no db_id - let db_id = coin.inner.account_db_id().await.unwrap_or_else(|| ctx.rmd160_hex()); + let db_id = coin + .inner + .account_db_id() + .await + .unwrap_or_else(|| ctx.rmd160.to_string()); let entry = active_id_chains.entry(db_id).or_insert_with(Vec::new); if let Ok(chain) = Chain::from_ticker(coin.inner.ticker()) { if chains.contains(&chain) { @@ -1689,7 +1693,11 @@ pub async fn find_nft_account_id_for_chain(ctx: &MmArc, chains: Chain) -> Result for coin in coins.iter() { if coin.is_available() { // Use default if no db_id - let db_id = coin.inner.account_db_id().await.unwrap_or_else(|| ctx.rmd160_hex()); + let db_id = coin + .inner + .account_db_id() + .await + .unwrap_or_else(|| ctx.rmd160.to_string()); if let Ok(chain) = Chain::from_ticker(coin.inner.ticker()) { if chains == chain { return Ok(Some((db_id, chain))); diff --git a/mm2src/coins/tendermint/tendermint_coin.rs b/mm2src/coins/tendermint/tendermint_coin.rs index 6c8ac8dd30..a0bd1ce829 100644 --- a/mm2src/coins/tendermint/tendermint_coin.rs +++ b/mm2src/coins/tendermint/tendermint_coin.rs @@ -2311,7 +2311,7 @@ impl MmCoin for TendermintCoin { async fn account_db_id(&self) -> Option { if let Ok(public_key) = self.activation_policy.public_key() { let address_hash = dhash160(&public_key.to_bytes()); - let address_rmd160_hex = hex::encode(address_hash.as_slice()); + let address_rmd160_hex = address_hash.to_string(); return Some(address_rmd160_hex); }; diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 70c6ad2b98..177d973391 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -201,8 +201,6 @@ impl MmCtx { self.rmd160.or(&|| &*DEFAULT) } - pub fn rmd160_hex(&self) -> String { hex::encode(self.rmd160().as_slice()) } - pub fn shared_db_id(&self) -> &H160 { lazy_static! { static ref DEFAULT: H160 = [0; 20].into(); @@ -305,7 +303,7 @@ impl MmCtx { /// No checks in this method, the paths should be checked in the `fn fix_directories` instead. #[cfg(not(target_arch = "wasm32"))] pub fn dbdir(&self, db_id: Option<&str>) -> PathBuf { - let db_id = db_id.map(|t| t.to_owned()).unwrap_or_else(|| self.rmd160_hex()); + let db_id = db_id.map(|t| t.to_owned()).unwrap_or_else(|| self.rmd160.to_string()); path_to_dbdir(self.conf["dbdir"].as_str(), &db_id) } diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 1a20c49ad0..539ff4dc17 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -65,8 +65,8 @@ impl SqliteConnPool { let db_root = ctx.conf["dbdir"].as_str(); try_s!(ctx.sqlite_conn_pool.pin(Self { connections, - rmd160_hex: ctx.rmd160_hex(), - shared_db_id: hex::encode(*ctx.shared_db_id()), + rmd160_hex: ctx.rmd160.to_string(), + shared_db_id: ctx.shared_db_id().to_string(), db_root: db_root.map(|d| d.to_owned()) })); @@ -95,8 +95,8 @@ impl SqliteConnPool { let db_root = ctx.conf["dbdir"].as_str(); try_s!(ctx.sqlite_conn_pool.pin(Self { connections, - rmd160_hex: ctx.rmd160_hex(), - shared_db_id: hex::encode(*ctx.shared_db_id()), + rmd160_hex: ctx.rmd160.to_string(), + shared_db_id: ctx.shared_db_id().to_string(), db_root: db_root.map(|d| d.to_owned()) })); @@ -130,44 +130,22 @@ impl SqliteConnPool { connection } - /// Retrieves a single-user connection from the pool. + /// Run a sql query for db. pub fn run_sql_query(&self, db_id: Option<&str>, f: F) -> R where F: FnOnce(MutexGuard) -> R + Send + 'static, R: Send + 'static, { - self.run_sql_query_impl(db_id, DbIdConnKind::Single, f) + f(self.sqlite_conn_impl(db_id, DbIdConnKind::Single).lock().unwrap()) } - /// Retrieves a shared connection from the pool. + /// Run a sql query for shared_db. pub fn run_sql_query_shared(&self, db_id: Option<&str>, f: F) -> R where F: FnOnce(MutexGuard) -> R + Send + 'static, R: Send + 'static, { - self.run_sql_query_impl(db_id, DbIdConnKind::Shared, f) - } - - /// Internal run a sql query. - fn run_sql_query_impl(&self, db_id: Option<&str>, kind: DbIdConnKind, f: F) -> R - where - F: FnOnce(MutexGuard) -> R + Send + 'static, - R: Send + 'static, - { - let db_id = self.db_id(db_id, &kind); - let connections = self.connections.read().unwrap(); - if let Some(connection) = connections.get(&db_id) { - let conn = connection.lock().unwrap(); - return f(conn); - } - drop(connections); - - let mut connections = self.connections.write().unwrap(); - let sqlite_file_path = self.sqlite_file_path(&db_id, &kind); - let connection = Self::open_connection(sqlite_file_path); - connections.insert(db_id, Arc::clone(&connection)); - - f(connection.lock().unwrap()) + f(self.sqlite_conn_impl(db_id, DbIdConnKind::Shared).lock().unwrap()) } pub fn add_test_db(&self, db_id: String) { @@ -198,8 +176,8 @@ impl SqliteConnPool { match kind { DbIdConnKind::Shared => db_id .map(|e| e.to_owned()) - .unwrap_or_else(|| hex::encode(ctx.shared_db_id().as_slice())), - DbIdConnKind::Single => db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160_hex()), + .unwrap_or_else(|| ctx.shared_db_id().to_string()), + DbIdConnKind::Single => db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160.to_string()), } } fn sqlite_file_path(&self, db_id: &str, kind: &DbIdConnKind) -> PathBuf { @@ -221,7 +199,7 @@ pub struct AsyncSqliteConnPool { impl AsyncSqliteConnPool { /// Initialize a database connection. pub async fn init(ctx: &MmCtx, db_id: Option<&str>) -> Result<(), String> { - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160_hex()); + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160.to_string()); if let Some(pool) = ctx.async_sqlite_conn_pool.as_option() { let conn = Self::open_connection(&pool.sqlite_file_path).await; @@ -237,7 +215,7 @@ impl AsyncSqliteConnPool { try_s!(ctx.async_sqlite_conn_pool.pin(Self { connections, sqlite_file_path, - rmd160_hex: ctx.rmd160_hex(), + rmd160_hex: ctx.rmd160.to_string(), })); Ok(()) @@ -245,7 +223,7 @@ impl AsyncSqliteConnPool { /// Initialize a database connection. pub async fn init_test(ctx: &MmCtx, db_id: Option<&str>) -> Result<(), String> { - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160_hex()); + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160.to_string()); if let Some(pool) = ctx.async_sqlite_conn_pool.as_option() { let mut pool = pool.connections.write().await; @@ -263,7 +241,7 @@ impl AsyncSqliteConnPool { try_s!(ctx.async_sqlite_conn_pool.pin(Self { connections, sqlite_file_path: PathBuf::new(), - rmd160_hex: ctx.rmd160_hex(), + rmd160_hex: ctx.rmd160.to_string(), })); Ok(()) } diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index ce3f5f64a7..386595ffab 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -27,16 +27,16 @@ impl ConstructibleDb { /// Creates a new uninitialized `Db` instance from other Iguana and/or HD accounts. /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new(ctx: &MmArc, db_id: Option<&str>) -> Self { - let db_id_ = hex::encode(ctx.rmd160().as_slice()); - let shared_db_id = hex::encode(ctx.shared_db_id().as_slice()); + let default_db_id = ctx.rmd160().to_string(); + let shared_db_id = ctx.shared_db_id().to_string(); - let db_id = db_id.unwrap_or(&db_id_); + let db_id = db_id.unwrap_or(&default_db_id).to_string(); let conns = HashMap::from([(db_id.to_owned(), Arc::new(AsyncMutex::new(None)))]); ConstructibleDb { locks: Arc::new(RwLock::new(conns)), db_namespace: ctx.db_namespace, - db_id: db_id.to_string(), + db_id, shared_db_id, } } @@ -46,7 +46,7 @@ impl ConstructibleDb { /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new_shared_db(ctx: &MmArc) -> Self { let db_id = hex::encode(ctx.rmd160().as_slice()); - let shared_db_id = hex::encode(ctx.shared_db_id().as_slice()); + let shared_db_id = ctx.shared_db_id().to_string(); let conns = HashMap::from([(shared_db_id.clone(), Arc::new(AsyncMutex::new(None)))]); ConstructibleDb { locks: Arc::new(RwLock::new(conns)), @@ -59,8 +59,8 @@ impl ConstructibleDb { /// Creates a new uninitialized `Db` instance shared between all wallets/seed. /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new_global_db(ctx: &MmArc) -> Self { - let db_id = hex::encode(ctx.rmd160().as_slice()); - let shared_db_id = hex::encode(ctx.shared_db_id().as_slice()); + let db_id = ctx.rmd160().to_string(); + let shared_db_id = ctx.shared_db_id().to_string(); ConstructibleDb { locks: Arc::new(RwLock::new(HashMap::default())), db_namespace: ctx.db_namespace, diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index 7cf463836f..c1467c8674 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -1538,21 +1538,21 @@ pub async fn swap_kick_starts(ctx: MmArc, db_id: Option<&str>) -> Result (swap.maker_db_id.as_deref(), swap.taker_db_id.as_deref()), - SavedSwap::Taker(swap) => (swap.maker_db_id.as_deref(), swap.taker_db_id.as_deref()), + let (maker_coin_db_id, taker_coin_db_id) = match &swap { + SavedSwap::Maker(swap) => (swap.maker_coin_db_id.as_deref(), swap.taker_coin_db_id.as_deref()), + SavedSwap::Taker(swap) => (swap.maker_coin_db_id.as_deref(), swap.taker_coin_db_id.as_deref()), }; let taker_coin = loop { match lp_coinfind(&ctx, &taker_coin_ticker).await { Ok(Some(c)) => { - if taker_db_id == c.account_db_id().await.as_deref() { + if taker_coin_db_id == c.account_db_id().await.as_deref() { break c; }; info!( "Can't kickstart the swap {} until the coin {} is activated with pubkey: {}", swap.uuid(), taker_coin_ticker, - taker_db_id.unwrap_or(&ctx.rmd160_hex()) + taker_coin_db_id.unwrap_or(&ctx.rmd160.to_string()) ); Timer::sleep(5.).await; }, @@ -1574,14 +1574,14 @@ async fn kickstart_thread_handler(ctx: MmArc, swap: SavedSwap, maker_coin_ticker let maker_coin = loop { match lp_coinfind(&ctx, &maker_coin_ticker).await { Ok(Some(c)) => { - if maker_db_id == c.account_db_id().await.as_deref() { + if maker_coin_db_id == c.account_db_id().await.as_deref() { break c; }; info!( "Can't kickstart the swap {} until the coin {} is activated with pubkey: {}", swap.uuid(), maker_coin_ticker, - maker_db_id.unwrap_or(&ctx.rmd160_hex()) + maker_coin_db_id.unwrap_or(&ctx.rmd160.to_string()) ); Timer::sleep(5.).await; }, diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index 5811f2ba88..09f702ca0e 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -108,8 +108,8 @@ async fn save_my_maker_swap_event( events: vec![], success_events: MAKER_SUCCESS_EVENTS.iter().map(|event| event.to_string()).collect(), error_events: MAKER_ERROR_EVENTS.iter().map(|event| event.to_string()).collect(), - maker_db_id: swap.maker_coin.account_db_id().await, - taker_db_id: swap.taker_coin.account_db_id().await, + maker_coin_db_id: swap.maker_coin.account_db_id().await, + taker_coin_db_id: swap.taker_coin.account_db_id().await, }), Err(e) => return ERR!("{}", e), }; @@ -1823,9 +1823,9 @@ pub struct MakerSavedSwap { pub success_events: Vec, pub error_events: Vec, /// needed to validate if pending maker coin is activated with the correct `db_id` in `kickstart_thread_handler` - pub maker_db_id: Option, + pub maker_coin_db_id: Option, /// needed to validate if pending taker coin is activated with the correct `db_id` in `kickstart_thread_handler` - pub taker_db_id: Option, + pub taker_coin_db_id: Option, } #[cfg(test)] @@ -1883,8 +1883,8 @@ impl MakerSavedSwap { mm_version: None, success_events: vec![], error_events: vec![], - maker_db_id: None, - taker_db_id: None, + maker_coin_db_id: None, + taker_coin_db_id: None, } } } diff --git a/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs b/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs index 822217b9f8..8635d7a34c 100644 --- a/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs +++ b/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs @@ -94,8 +94,8 @@ fn recreate_maker_swap(ctx: MmArc, taker_swap: TakerSavedSwap) -> RecreateSwapRe mm_version: Some(ctx.mm_version.clone()), success_events: MAKER_SUCCESS_EVENTS.iter().map(|event| event.to_string()).collect(), error_events: MAKER_ERROR_EVENTS.iter().map(|event| event.to_string()).collect(), - taker_db_id: taker_swap.taker_db_id, - maker_db_id: taker_swap.maker_db_id, + taker_coin_db_id: taker_swap.taker_coin_db_id, + maker_coin_db_id: taker_swap.maker_coin_db_id, }; let mut event_it = taker_swap.events.into_iter(); @@ -298,8 +298,8 @@ async fn recreate_taker_swap(ctx: MmArc, maker_swap: MakerSavedSwap) -> Recreate mm_version: Some(ctx.mm_version.clone()), success_events: TAKER_SUCCESS_EVENTS.iter().map(|event| event.to_string()).collect(), error_events: TAKER_ERROR_EVENTS.iter().map(|event| event.to_string()).collect(), - taker_db_id: maker_swap.taker_db_id, - maker_db_id: maker_swap.maker_db_id, + taker_coin_db_id: maker_swap.taker_coin_db_id, + maker_coin_db_id: maker_swap.maker_coin_db_id, }; let mut event_it = maker_swap.events.into_iter(); diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index b40043bdea..a2f249a26f 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -141,8 +141,8 @@ async fn save_my_taker_swap_event( TAKER_SUCCESS_EVENTS.iter().map(<&str>::to_string).collect() }, error_events: TAKER_ERROR_EVENTS.iter().map(<&str>::to_string).collect(), - taker_db_id: swap.taker_coin.account_db_id().await, - maker_db_id: swap.maker_coin.account_db_id().await, + taker_coin_db_id: swap.taker_coin.account_db_id().await, + maker_coin_db_id: swap.maker_coin.account_db_id().await, }), Err(e) => return ERR!("{}", e), }; @@ -221,9 +221,9 @@ pub struct TakerSavedSwap { pub success_events: Vec, pub error_events: Vec, /// needed to validate if pending maker coin is activated with the correct `db_id` in `kickstart_thread_handler` - pub maker_db_id: Option, + pub maker_coin_db_id: Option, /// needed to validate if pending taker coin is activated with the correct `db_id` in `kickstart_thread_handler` - pub taker_db_id: Option, + pub taker_coin_db_id: Option, } impl TakerSavedSwap { From 7c12cce45acde07bf5a144255066122553ea1433 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 1 Jul 2024 16:43:49 +0100 Subject: [PATCH 167/186] revert Cargo.toml --- Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 34dd176a09..384e5b1336 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,3 +71,6 @@ debug-assertions = false panic = 'unwind' incremental = true codegen-units = 256 + +[profile.release.package.mocktopus] +opt-level = 1 # TODO: MIR fails on optimizing this dependency, remove that.. From 0b4d808e59901cecc7ed4ca46a817c1cada24efd Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 4 Jul 2024 13:20:59 +0100 Subject: [PATCH 168/186] fix shared_db_id derivation and review notes --- mm2src/coins/eth/v2_activation.rs | 28 +++---- mm2src/coins/tendermint/tendermint_coin.rs | 5 +- .../utxo/utxo_builder/utxo_arc_builder.rs | 38 ---------- .../utxo/utxo_builder/utxo_coin_builder.rs | 38 +++++++++- mm2src/coins/utxo/utxo_common.rs | 5 +- .../src/tendermint_with_assets_activation.rs | 75 ++++++++----------- mm2src/crypto/src/global_hd_ctx.rs | 5 ++ mm2src/mm2_core/src/mm_ctx.rs | 11 ++- mm2src/mm2_core/src/sql_connection_pool.rs | 35 ++++----- mm2src/mm2_db/src/indexed_db/db_lock.rs | 6 +- mm2src/mm2_main/src/lp_native_dex.rs | 20 +++-- .../src/rpc/lp_commands/lp_commands.rs | 2 +- 12 files changed, 129 insertions(+), 139 deletions(-) diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 3692d75772..2537ad0e5e 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -664,12 +664,16 @@ pub(crate) async fn build_address_and_priv_key_policy( .mm_err(|e| EthActivationV2Error::InternalError(e.to_string()))?; let activated_key = KeyPair::from_secret_slice(raw_priv_key.as_slice()) .map_to_mm(|e| EthActivationV2Error::InternalError(e.to_string()))?; + let hd_wallet_rmd160 = global_hd_ctx.derive_rmd160(); #[cfg(not(target_arch = "wasm32"))] - run_db_migration_for_new_eth_pubkey(ctx, &activated_key).await?; + { + let db_id = dhash160(activated_key.public().as_bytes()); + run_db_migration_for_new_eth_pubkey(ctx, Some(db_id.to_string()), Some(hd_wallet_rmd160.to_string())) + .await?; + } let bip39_secp_priv_key = global_hd_ctx.root_priv_key().clone(); - let hd_wallet_rmd160 = dhash160(activated_key.public().as_bytes()); let hd_wallet_storage = HDWalletCoinStorage::init_with_rmd160(ctx, ticker.to_string(), hd_wallet_rmd160) .await .mm_err(EthActivationV2Error::from)?; @@ -926,11 +930,13 @@ fn compress_public_key(uncompressed: H520) -> MmResult MmResult<(), EthActivationV2Error> { - let db_id = hex::encode(dhash160(keypair.public().as_bytes())); - let shared_db_id = shared_db_id_from_seed(&db_id) - .mm_err(|err| EthActivationV2Error::InternalError(err.to_string()))? - .to_string(); +async fn run_db_migration_for_new_eth_pubkey( + ctx: &MmArc, + db_id: Option, + shared_db_id: Option, +) -> MmResult<(), EthActivationV2Error> { + info!("Public key hash: {db_id:?}"); + info!("Shared Database ID: {shared_db_id:?}"); let db_migration_sender = ctx .db_migration_watcher @@ -939,16 +945,10 @@ async fn run_db_migration_for_new_eth_pubkey(ctx: &MmArc, keypair: &KeyPair) -> .get_sender(); let mut db_migration_sender = db_migration_sender.lock().await; db_migration_sender - .send(DbIds { - db_id: db_id.clone(), - shared_db_id: shared_db_id.clone(), - }) + .send(DbIds { db_id, shared_db_id }) .await .map_to_mm(|err| EthActivationV2Error::InternalError(err.to_string()))?; - debug!("Public key hash: {db_id}"); - debug!("Shared Database ID: {shared_db_id}"); - Ok(()) } diff --git a/mm2src/coins/tendermint/tendermint_coin.rs b/mm2src/coins/tendermint/tendermint_coin.rs index a0bd1ce829..204943167c 100644 --- a/mm2src/coins/tendermint/tendermint_coin.rs +++ b/mm2src/coins/tendermint/tendermint_coin.rs @@ -109,6 +109,7 @@ const ACCOUNT_SEQUENCE_ERR: &str = "incorrect account sequence"; type TendermintPrivKeyPolicy = PrivKeyPolicy; +#[derive(Clone)] pub struct TendermintKeyPair { private_key_secret: Secp256k1Secret, public_key: Public, @@ -2953,7 +2954,7 @@ impl WatcherOps for TendermintCoin { pub fn tendermint_priv_key_policy( conf: &TendermintConf, ticker: &str, - priv_key_build_policy: PrivKeyBuildPolicy, + priv_key_build_policy: &PrivKeyBuildPolicy, path_to_address: HDPathAccountToAddressId, ) -> MmResult { match priv_key_build_policy { @@ -2963,7 +2964,7 @@ pub fn tendermint_priv_key_policy( kind: TendermintInitErrorKind::Internal(e.to_string()), })?; - let tendermint_pair = TendermintKeyPair::new(iguana, *mm2_internal_key_pair.public()); + let tendermint_pair = TendermintKeyPair::new(*iguana, *mm2_internal_key_pair.public()); Ok(TendermintPrivKeyPolicy::Iguana(tendermint_pair)) }, diff --git a/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs index 301805d3a1..d0f2a3a19a 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs @@ -11,14 +11,8 @@ use async_trait::async_trait; use chain::{BlockHeader, TransactionOutput}; use common::executor::{AbortSettings, SpawnAbortable, Timer}; use common::log::{debug, error, info, warn}; -#[cfg(not(target_arch = "wasm32"))] -use crypto::shared_db_id::shared_db_id_from_seed; use futures::compat::Future01CompatExt; -#[cfg(not(target_arch = "wasm32"))] use futures::SinkExt; -#[cfg(not(target_arch = "wasm32"))] use keys::hash::H160; use mm2_core::mm_ctx::MmArc; -#[cfg(not(target_arch = "wasm32"))] -use mm2_core::sql_connection_pool::DbIds; use mm2_err_handle::prelude::*; use mm2_event_stream::behaviour::{EventBehaviour, EventInitStatus}; #[cfg(test)] use mocktopus::macros::*; @@ -139,11 +133,6 @@ where } } - #[cfg(not(target_arch = "wasm32"))] - if let Some(hd) = utxo_arc.derivation_method.hd_wallet() { - run_db_migraiton_for_new_utxo_pubkey(self.ctx, hd.inner.hd_wallet_rmd160).await? - } - Ok(result_coin) } } @@ -708,30 +697,3 @@ fn spawn_block_header_utxo_loop( .weak_spawner() .spawn_with_settings(fut, settings); } - -#[cfg(not(target_arch = "wasm32"))] -async fn run_db_migraiton_for_new_utxo_pubkey(ctx: &MmArc, pubkey: H160) -> MmResult<(), UtxoCoinBuildError> { - let db_id = hex::encode(pubkey.as_slice()); - let shared_db_id = shared_db_id_from_seed(&db_id) - .mm_err(|err| UtxoCoinBuildError::Internal(err.to_string()))? - .to_string(); - - let db_migration_sender = ctx - .db_migration_watcher - .as_option() - .expect("Db migration watcher isn't intialized yet!") - .get_sender(); - let mut db_migration_sender = db_migration_sender.lock().await; - db_migration_sender - .send(DbIds { - db_id: db_id.clone(), - shared_db_id: shared_db_id.clone(), - }) - .await - .map_to_mm(|err| UtxoCoinBuildError::Internal(err.to_string()))?; - - debug!("Public key hash: {db_id}"); - debug!("Shared Database ID: {shared_db_id}"); - - Ok(()) -} diff --git a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs index 56c29f7a9b..27a51331b1 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs @@ -24,6 +24,7 @@ use derive_more::Display; use futures::channel::mpsc::{channel, unbounded, Receiver as AsyncReceiver, UnboundedReceiver, UnboundedSender}; use futures::compat::Future01CompatExt; use futures::lock::Mutex as AsyncMutex; +#[cfg(not(target_arch = "wasm32"))] use futures::SinkExt; use futures::StreamExt; use keys::bytes::Bytes; pub use keys::{Address, AddressBuilder, AddressFormat as UtxoAddressFormat, AddressHashEnum, AddressScriptType, @@ -219,7 +220,7 @@ pub trait UtxoFieldsWithGlobalHDBuilder: UtxoCoinBuilderCommonOps { }; let address_format = self.address_format()?; - let hd_wallet_rmd160 = activated_key_pair.public().address_hash(); + let hd_wallet_rmd160 = global_hd_ctx.derive_rmd160(); let hd_wallet_storage = HDWalletCoinStorage::init_with_rmd160(self.ctx(), self.ticker().to_owned(), hd_wallet_rmd160).await?; let accounts = load_hd_accounts_from_storage(&hd_wallet_storage, path_to_coin) @@ -237,6 +238,14 @@ pub trait UtxoFieldsWithGlobalHDBuilder: UtxoCoinBuilderCommonOps { }, address_format, }; + #[cfg(not(target_arch = "wasm32"))] + { + // db_id should be the current activated key for this hd wallet + let db_id = Some(activated_key_pair.public().address_hash()); + // device_rmd_160 is unqiue to a device, hence it can bs used as shared_db_id. + let shared_db_id = Some(hd_wallet_rmd160); + run_db_migration_for_new_utxo_pubkey(self.ctx(), db_id, shared_db_id).await? + } let derivation_method = DerivationMethod::HDWallet(hd_wallet); build_utxo_coin_fields_with_conf_and_policy(self, conf, priv_key_policy, derivation_method).await } @@ -1011,3 +1020,30 @@ async fn wait_for_protocol_version_checked(client: &ElectrumClientImpl) -> Resul // Flatten `Result< Result<(), String>, String >` .flatten() } + +#[cfg(not(target_arch = "wasm32"))] +pub async fn run_db_migration_for_new_utxo_pubkey( + ctx: &MmArc, + db_id: Option, + shared_db_id: Option, +) -> MmResult<(), UtxoCoinBuildError> { + use mm2_core::sql_connection_pool::DbIds; + + let db_id = db_id.map(|id| id.to_string()); + let shared_db_id = shared_db_id.map(|id| id.to_string()); + info!("Public key hash: {db_id:?}"); + info!("Shared Database ID: {shared_db_id:?}"); + + let db_migration_sender = ctx + .db_migration_watcher + .as_option() + .expect("Db migration watcher isn't intialized yet!") + .get_sender(); + let mut db_migration_sender = db_migration_sender.lock().await; + db_migration_sender + .send(DbIds { db_id, shared_db_id }) + .await + .map_to_mm(|err| UtxoCoinBuildError::Internal(err.to_string()))?; + + Ok(()) +} diff --git a/mm2src/coins/utxo/utxo_common.rs b/mm2src/coins/utxo/utxo_common.rs index 91c1c00598..9d5e84dcd1 100644 --- a/mm2src/coins/utxo/utxo_common.rs +++ b/mm2src/coins/utxo/utxo_common.rs @@ -5147,10 +5147,13 @@ where { if let Some(hd_wallet) = coin.derivation_method().hd_wallet() { // we can use hd_wallet_rmd160 as our shared_db_id since it's unique to a device - return hd_wallet + let db_id = hd_wallet .get_enabled_address() .await .map(|addr| hex::encode(addr.pubkey().address_hash().as_slice())); + println!("enable_address: {:?}", db_id); + + return db_id; } None diff --git a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs index 86ed3376ef..b891892923 100644 --- a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs +++ b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs @@ -18,10 +18,8 @@ use coins::tendermint::{tendermint_priv_key_policy, TendermintActivationPolicy, #[cfg(not(target_arch = "wasm32"))] use coins::utxo::dhash160; use coins::{CoinBalance, CoinProtocol, MarketCoinOps, MmCoin, MmCoinEnum, PrivKeyBuildPolicy}; use common::executor::{AbortSettings, SpawnAbortable}; -#[cfg(not(target_arch = "wasm32"))] use common::log::debug; +#[cfg(not(target_arch = "wasm32"))] use common::log::info; use common::{true_f, Future01CompatExt}; -#[cfg(not(target_arch = "wasm32"))] -use crypto::shared_db_id::shared_db_id_from_seed; #[cfg(not(target_arch = "wasm32"))] use futures::SinkExt; use mm2_core::mm_ctx::MmArc; #[cfg(not(target_arch = "wasm32"))] @@ -249,7 +247,11 @@ impl PlatformCoinWithTokensActivationOps for TendermintCoin { kind: TendermintInitErrorKind::CantUseWatchersWithPubkeyPolicy, }); } - run_db_migraiton_for_new_tendermint_pubkey(&ctx, pubkey, ticker.clone()).await?; + #[cfg(not(target_arch = "wasm32"))] + { + let db_id = dhash160(&pubkey.to_bytes()).to_string(); + run_db_migration_for_new_tendermint_pubkey(&ctx, Some(db_id), None, ticker.clone()).await?; + } TendermintActivationPolicy::with_public_key(pubkey) } else { @@ -258,27 +260,29 @@ impl PlatformCoinWithTokensActivationOps for TendermintCoin { ticker: ticker.clone(), kind: TendermintInitErrorKind::Internal(e.to_string()), })?; - if let PrivKeyBuildPolicy::GlobalHDAccount(_) = private_key_policy { - let tendermint_private_key_policy = tendermint_priv_key_policy( - &conf, - &ticker, - private_key_policy.clone(), - activation_request.path_to_address, - )?; - - let result = TendermintActivationPolicy::with_private_key_policy(tendermint_private_key_policy); + + let tendermint_private_key_policy = + tendermint_priv_key_policy(&conf, &ticker, &private_key_policy, activation_request.path_to_address)?; + + #[cfg(not(target_arch = "wasm32"))] + if let PrivKeyBuildPolicy::GlobalHDAccount(_hd) = &private_key_policy { + let result = TendermintActivationPolicy::with_private_key_policy(tendermint_private_key_policy.clone()); let pubkey = result.public_key().map_to_mm(|e| TendermintInitError { ticker: ticker.clone(), kind: TendermintInitErrorKind::Internal(e.to_string()), })?; - run_db_migraiton_for_new_tendermint_pubkey(&ctx, pubkey, ticker.clone()).await?; + let db_id = dhash160(&pubkey.to_bytes()); + let shared_db_id = _hd.derive_rmd160(); + run_db_migration_for_new_tendermint_pubkey( + &ctx, + Some(db_id.to_string()), + Some(shared_db_id.to_string()), + ticker.clone(), + ) + .await?; + }; - result - } else { - let tendermint_private_key_policy = - tendermint_priv_key_policy(&conf, &ticker, private_key_policy, activation_request.path_to_address)?; - TendermintActivationPolicy::with_private_key_policy(tendermint_private_key_policy) - } + TendermintActivationPolicy::with_private_key_policy(tendermint_private_key_policy) }; TendermintCoin::init( @@ -409,18 +413,14 @@ impl PlatformCoinWithTokensActivationOps for TendermintCoin { } #[cfg(not(target_arch = "wasm32"))] -async fn run_db_migraiton_for_new_tendermint_pubkey( +async fn run_db_migration_for_new_tendermint_pubkey( ctx: &MmArc, - pubkey: TendermintPublicKey, + db_id: Option, + shared_db_id: Option, ticker: String, ) -> MmResult<(), TendermintInitError> { - let db_id = hex::encode(dhash160(pubkey.to_bytes().as_slice())); - let shared_db_id = shared_db_id_from_seed(&pubkey.to_hex()) - .mm_err(|err| TendermintInitError { - ticker: ticker.to_string(), - kind: TendermintInitErrorKind::Internal(err.to_string()), - })? - .to_string(); + info!("Public key hash: {db_id:?}"); + info!("Shared Database ID: {shared_db_id:?}"); let db_migration_sender = ctx .db_migration_watcher @@ -429,27 +429,12 @@ async fn run_db_migraiton_for_new_tendermint_pubkey( .get_sender(); let mut db_migration_sender = db_migration_sender.lock().await; db_migration_sender - .send(DbIds { - db_id: db_id.clone(), - shared_db_id: shared_db_id.clone(), - }) + .send(DbIds { db_id, shared_db_id }) .await .map_to_mm(|err| TendermintInitError { ticker: ticker.to_string(), kind: TendermintInitErrorKind::Internal(err.to_string()), })?; - debug!("Public key hash: {db_id}"); - debug!("Shared Database ID: {shared_db_id}"); - - Ok(()) -} - -#[cfg(target_arch = "wasm32")] -async fn run_db_migraiton_for_new_tendermint_pubkey( - _ctx: &MmArc, - _pubkey: TendermintPublicKey, - _ticker: String, -) -> MmResult<(), TendermintInitError> { Ok(()) } diff --git a/mm2src/crypto/src/global_hd_ctx.rs b/mm2src/crypto/src/global_hd_ctx.rs index ad7c2bc63b..02787b21ff 100644 --- a/mm2src/crypto/src/global_hd_ctx.rs +++ b/mm2src/crypto/src/global_hd_ctx.rs @@ -1,9 +1,11 @@ use crate::privkey::{bip39_seed_from_passphrase, key_pair_from_secret, PrivKeyError}; use crate::{mm2_internal_der_path, Bip32Error, CryptoInitError, CryptoInitResult}; use bip32::{DerivationPath, ExtendedPrivateKey}; +use bitcrypto::dhash160; use common::drop_mutability; use keys::{KeyPair, Secret as Secp256k1Secret}; use mm2_err_handle::prelude::*; +use primitives::hash::{H160, H256}; use std::ops::Deref; use std::sync::Arc; use zeroize::{Zeroize, ZeroizeOnDrop}; @@ -74,6 +76,9 @@ impl GlobalHDAccountCtx { pub fn derive_secp256k1_secret(&self, derivation_path: &DerivationPath) -> MmResult { derive_secp256k1_secret(self.bip39_secp_priv_key.clone(), derivation_path) } + + /// Derives a unique identifier (RMD160 hash of the root public key) for the device. + pub fn derive_rmd160(&self) -> H160 { dhash160(H256::from(self.root_seed_bytes()).as_slice()) } } pub fn derive_secp256k1_secret( diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 177d973391..d32aa2cd94 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -201,7 +201,11 @@ impl MmCtx { self.rmd160.or(&|| &*DEFAULT) } - pub fn shared_db_id(&self) -> &H160 { + pub fn default_db_id(&self) -> String { self.rmd160().to_string() } + + pub fn db_id_or_default(&self, db_id: Option<&str>) -> String { db_id.unwrap_or(&self.default_db_id()).to_owned() } + + pub fn default_shared_db_id(&self) -> &H160 { lazy_static! { static ref DEFAULT: H160 = [0; 20].into(); } @@ -303,8 +307,7 @@ impl MmCtx { /// No checks in this method, the paths should be checked in the `fn fix_directories` instead. #[cfg(not(target_arch = "wasm32"))] pub fn dbdir(&self, db_id: Option<&str>) -> PathBuf { - let db_id = db_id.map(|t| t.to_owned()).unwrap_or_else(|| self.rmd160.to_string()); - path_to_dbdir(self.conf["dbdir"].as_str(), &db_id) + path_to_dbdir(self.conf["dbdir"].as_str(), &self.db_id_or_default(db_id)) } /// MM shared database path. @@ -319,7 +322,7 @@ impl MmCtx { pub fn shared_dbdir(&self, db_id: Option<&str>) -> PathBuf { let db_id = db_id .map(|d| d.to_owned()) - .unwrap_or_else(|| hex::encode(self.shared_db_id().as_slice())); + .unwrap_or_else(|| hex::encode(self.default_shared_db_id().as_slice())); path_to_dbdir(self.conf["dbdir"].as_str(), &db_id) } diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 539ff4dc17..3cf62445b1 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -25,7 +25,7 @@ enum DbIdConnKind { pub struct SqliteConnPool { connections: Arc>>>>, // default db_id - rmd160_hex: String, + default_db_id: String, // default shared_db_id shared_db_id: String, db_root: Option, @@ -65,8 +65,8 @@ impl SqliteConnPool { let db_root = ctx.conf["dbdir"].as_str(); try_s!(ctx.sqlite_conn_pool.pin(Self { connections, - rmd160_hex: ctx.rmd160.to_string(), - shared_db_id: ctx.shared_db_id().to_string(), + default_db_id: ctx.rmd160.to_string(), + shared_db_id: ctx.default_shared_db_id().to_string(), db_root: db_root.map(|d| d.to_owned()) })); @@ -95,8 +95,8 @@ impl SqliteConnPool { let db_root = ctx.conf["dbdir"].as_str(); try_s!(ctx.sqlite_conn_pool.pin(Self { connections, - rmd160_hex: ctx.rmd160.to_string(), - shared_db_id: ctx.shared_db_id().to_string(), + default_db_id: ctx.rmd160.to_string(), + shared_db_id: ctx.default_shared_db_id().to_string(), db_root: db_root.map(|d| d.to_owned()) })); @@ -139,15 +139,6 @@ impl SqliteConnPool { f(self.sqlite_conn_impl(db_id, DbIdConnKind::Single).lock().unwrap()) } - /// Run a sql query for shared_db. - pub fn run_sql_query_shared(&self, db_id: Option<&str>, f: F) -> R - where - F: FnOnce(MutexGuard) -> R + Send + 'static, - R: Send + 'static, - { - f(self.sqlite_conn_impl(db_id, DbIdConnKind::Shared).lock().unwrap()) - } - pub fn add_test_db(&self, db_id: String) { let mut connections = self.connections.write().unwrap(); connections.insert(db_id, Arc::new(Mutex::new(Connection::open_in_memory().unwrap()))); @@ -169,14 +160,14 @@ impl SqliteConnPool { .unwrap_or_else(|| self.shared_db_id.to_owned()), DbIdConnKind::Single => db_id .map(|e| e.to_owned()) - .unwrap_or_else(|| self.rmd160_hex.to_owned()), + .unwrap_or_else(|| self.default_db_id.to_owned()), } } fn db_id_from_ctx(ctx: &MmCtx, db_id: Option<&str>, kind: &DbIdConnKind) -> String { match kind { DbIdConnKind::Shared => db_id .map(|e| e.to_owned()) - .unwrap_or_else(|| ctx.shared_db_id().to_string()), + .unwrap_or_else(|| ctx.default_shared_db_id().to_string()), DbIdConnKind::Single => db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160.to_string()), } } @@ -193,7 +184,7 @@ impl SqliteConnPool { pub struct AsyncSqliteConnPool { connections: Arc>>>>, sqlite_file_path: PathBuf, - rmd160_hex: String, + default_db_id: String, } impl AsyncSqliteConnPool { @@ -215,7 +206,7 @@ impl AsyncSqliteConnPool { try_s!(ctx.async_sqlite_conn_pool.pin(Self { connections, sqlite_file_path, - rmd160_hex: ctx.rmd160.to_string(), + default_db_id: ctx.rmd160.to_string(), })); Ok(()) @@ -241,14 +232,14 @@ impl AsyncSqliteConnPool { try_s!(ctx.async_sqlite_conn_pool.pin(Self { connections, sqlite_file_path: PathBuf::new(), - rmd160_hex: ctx.rmd160.to_string(), + default_db_id: ctx.rmd160.to_string(), })); Ok(()) } /// Retrieve or create a connection. pub async fn async_sqlite_conn(&self, db_id: Option<&str>) -> Arc> { - let db_id = db_id.unwrap_or(&self.rmd160_hex); + let db_id = db_id.unwrap_or(&self.default_db_id); let connections = self.connections.read().await; if let Some(connection) = connections.get(db_id) { @@ -283,8 +274,8 @@ impl AsyncSqliteConnPool { } pub struct DbIds { - pub db_id: String, - pub shared_db_id: String, + pub db_id: Option, + pub shared_db_id: Option, } pub type DbMigrationHandler = Arc>>; diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index 386595ffab..113fb14ef2 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -28,7 +28,7 @@ impl ConstructibleDb { /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new(ctx: &MmArc, db_id: Option<&str>) -> Self { let default_db_id = ctx.rmd160().to_string(); - let shared_db_id = ctx.shared_db_id().to_string(); + let shared_db_id = ctx.default_shared_db_id().to_string(); let db_id = db_id.unwrap_or(&default_db_id).to_string(); let conns = HashMap::from([(db_id.to_owned(), Arc::new(AsyncMutex::new(None)))]); @@ -46,7 +46,7 @@ impl ConstructibleDb { /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new_shared_db(ctx: &MmArc) -> Self { let db_id = hex::encode(ctx.rmd160().as_slice()); - let shared_db_id = ctx.shared_db_id().to_string(); + let shared_db_id = ctx.default_shared_db_id().to_string(); let conns = HashMap::from([(shared_db_id.clone(), Arc::new(AsyncMutex::new(None)))]); ConstructibleDb { locks: Arc::new(RwLock::new(conns)), @@ -60,7 +60,7 @@ impl ConstructibleDb { /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new_global_db(ctx: &MmArc) -> Self { let db_id = ctx.rmd160().to_string(); - let shared_db_id = ctx.shared_db_id().to_string(); + let shared_db_id = ctx.default_shared_db_id().to_string(); ConstructibleDb { locks: Arc::new(RwLock::new(HashMap::default())), db_namespace: ctx.db_namespace, diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index a935c160bf..e0b30edf81 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -464,21 +464,26 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { let mut guard = receiver.lock().await; while let Some(ids) = guard.next().await { - if migrations.contains(&ids.db_id) { - debug!("{} migrated, skipping migration..", ids.db_id); - continue; + if let Some(db_id) = &ids.db_id { + if migrations.contains(db_id) { + debug!("{} migrated, skipping migration..", db_id); + continue; + } } // run db migration for db_id if new activated pubkey is unique. - if let Err(err) = run_db_migration_impl(&ctx, Some(&ids.db_id), Some(&ids.shared_db_id)).await { + if let Err(err) = run_db_migration_impl(&ctx, ids.db_id.as_deref(), ids.shared_db_id.as_deref()).await { error!("{err:?}"); continue; }; - // insert new db_id to migration list - migrations.insert(ids.db_id.clone()); + if let Some(db_id) = &ids.db_id { + // insert new db_id to migration list + migrations.insert(db_id.to_owned()); + }; + // Fetch and extend ctx.coins_needed_for_kick_start from new intialized db. - if let Err(err) = kick_start(ctx.clone(), Some(&ids.db_id)).await { + if let Err(err) = kick_start(ctx.clone(), ids.db_id.as_deref()).await { error!("{err:?}"); continue; }; @@ -576,7 +581,6 @@ pub async fn lp_init(ctx: MmArc, version: String, datetime: String) -> MmInitRes } async fn kick_start(ctx: MmArc, db_id: Option<&str>) -> MmInitResult<()> { - println!("kick_start: {db_id:?}"); let mut coins_needed_for_kick_start = swap_kick_starts(ctx.clone(), db_id) .await .map_to_mm(MmInitError::SwapsKickStartError)?; diff --git a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs index 6d632abdee..333526fcc7 100644 --- a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs +++ b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs @@ -57,7 +57,7 @@ pub struct GetSharedDbIdResponse { } pub async fn get_shared_db_id(ctx: MmArc, _req: Json) -> GetSharedDbIdResult { - let shared_db_id = ctx.shared_db_id().to_owned().into(); + let shared_db_id = ctx.default_shared_db_id().to_owned().into(); Ok(GetSharedDbIdResponse { shared_db_id }) } From 8f19481a222e96a3073c059377023a7ffc72888e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 5 Jul 2024 15:41:37 +0100 Subject: [PATCH 169/186] proper shared_db handling and improvements --- mm2src/coins/eth.rs | 4 ++- mm2src/coins/eth/v2_activation.rs | 36 ++++++------------- .../coins/hd_wallet/storage/mock_storage.rs | 2 +- mm2src/coins/hd_wallet/storage/mod.rs | 6 ++-- .../coins/hd_wallet/storage/sqlite_storage.rs | 6 ++-- .../coins/hd_wallet/storage/wasm_storage.rs | 24 ++++++------- mm2src/coins/lp_coins.rs | 11 ++---- mm2src/coins/my_tx_history_v2.rs | 2 +- mm2src/coins/tendermint/tendermint_coin.rs | 13 +++---- mm2src/coins/utxo/bch.rs | 2 +- mm2src/coins/utxo/qtum.rs | 2 +- .../utxo/utxo_builder/utxo_coin_builder.rs | 30 ++++++---------- mm2src/coins/utxo/utxo_common.rs | 11 +++--- mm2src/coins/utxo/utxo_common_tests.rs | 6 ++-- mm2src/coins/utxo/utxo_standard.rs | 2 +- .../src/platform_coin_with_tokens.rs | 2 +- .../standalone_coin/init_standalone_coin.rs | 2 +- .../src/tendermint_with_assets_activation.rs | 21 ++++------- mm2src/crypto/src/global_hd_ctx.rs | 5 --- mm2src/mm2_core/src/mm_ctx.rs | 4 +-- mm2src/mm2_core/src/sql_connection_pool.rs | 9 ++--- mm2src/mm2_main/src/lp_native_dex.rs | 20 +++++------ 22 files changed, 82 insertions(+), 138 deletions(-) diff --git a/mm2src/coins/eth.rs b/mm2src/coins/eth.rs index d3779d17be..b4dea2ebb2 100644 --- a/mm2src/coins/eth.rs +++ b/mm2src/coins/eth.rs @@ -5715,7 +5715,9 @@ impl MmCoin for EthCoin { async fn account_db_id(&self) -> Option { eth_account_db_id(self).await } - async fn tx_history_db_id(&self) -> Option { eth_shared_db_id(self).await.or(self.account_db_id().await) } + async fn shared_db_id(&self, ctx: &MmArc) -> Option { + eth_shared_db_id(self, ctx).await.or(eth_account_db_id(self).await) + } } pub trait TryToAddress { diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 2537ad0e5e..74a9ec1269 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -6,13 +6,10 @@ use crate::nft::nft_errors::{GetNftInfoError, ParseChainTypeError}; use crate::nft::nft_structs::Chain; #[cfg(target_arch = "wasm32")] use crate::EthMetamaskPolicy; use common::executor::AbortedError; -use crypto::shared_db_id::shared_db_id_from_seed; use crypto::{trezor::TrezorError, Bip32Error, CryptoCtxError, HwError}; use enum_derives::EnumFromTrait; #[cfg(not(target_arch = "wasm32"))] use futures::SinkExt; use instant::Instant; -#[cfg(not(target_arch = "wasm32"))] -use mm2_core::sql_connection_pool::DbIds; use mm2_err_handle::common_errors::WithInternal; #[cfg(target_arch = "wasm32")] use mm2_metamask::{from_metamask_error, MetamaskError, MetamaskRpcError, WithMetamaskRpcError}; @@ -664,16 +661,12 @@ pub(crate) async fn build_address_and_priv_key_policy( .mm_err(|e| EthActivationV2Error::InternalError(e.to_string()))?; let activated_key = KeyPair::from_secret_slice(raw_priv_key.as_slice()) .map_to_mm(|e| EthActivationV2Error::InternalError(e.to_string()))?; - let hd_wallet_rmd160 = global_hd_ctx.derive_rmd160(); - #[cfg(not(target_arch = "wasm32"))] - { - let db_id = dhash160(activated_key.public().as_bytes()); - run_db_migration_for_new_eth_pubkey(ctx, Some(db_id.to_string()), Some(hd_wallet_rmd160.to_string())) - .await?; - } - let bip39_secp_priv_key = global_hd_ctx.root_priv_key().clone(); + #[cfg(not(target_arch = "wasm32"))] + run_db_migration_for_new_eth_pubkey(ctx, dhash160(activated_key.public().as_bytes()).to_string()).await?; + + let hd_wallet_rmd160 = *ctx.rmd160(); let hd_wallet_storage = HDWalletCoinStorage::init_with_rmd160(ctx, ticker.to_string(), hd_wallet_rmd160) .await .mm_err(EthActivationV2Error::from)?; @@ -930,13 +923,8 @@ fn compress_public_key(uncompressed: H520) -> MmResult, - shared_db_id: Option, -) -> MmResult<(), EthActivationV2Error> { +async fn run_db_migration_for_new_eth_pubkey(ctx: &MmArc, db_id: String) -> MmResult<(), EthActivationV2Error> { info!("Public key hash: {db_id:?}"); - info!("Shared Database ID: {shared_db_id:?}"); let db_migration_sender = ctx .db_migration_watcher @@ -945,20 +933,18 @@ async fn run_db_migration_for_new_eth_pubkey( .get_sender(); let mut db_migration_sender = db_migration_sender.lock().await; db_migration_sender - .send(DbIds { db_id, shared_db_id }) + .send(db_id) .await .map_to_mm(|err| EthActivationV2Error::InternalError(err.to_string()))?; Ok(()) } -pub(super) async fn eth_shared_db_id(coin: &EthCoin) -> Option { - // Use the hd_wallet_rmd160 as the db_id since it's unique to a device and not tied to a single address - coin.derivation_method().hd_wallet().and_then(|hd| { - shared_db_id_from_seed(&hex::encode(hd.hd_wallet_rmd160.as_slice())) - .ok() - .map(|id| hex::encode(id.as_slice())) - }) +pub(super) async fn eth_shared_db_id(coin: &EthCoin, ctx: &MmArc) -> Option { + // Use the hd_wallet_rmd160 as the db_id in HD mode since it's unique to a device and not tied to a single address + coin.derivation_method() + .hd_wallet() + .map(|_| ctx.default_shared_db_id().to_string()) } pub(super) async fn eth_account_db_id(coin: &EthCoin) -> Option { diff --git a/mm2src/coins/hd_wallet/storage/mock_storage.rs b/mm2src/coins/hd_wallet/storage/mock_storage.rs index eaf686fc90..8086e58be8 100644 --- a/mm2src/coins/hd_wallet/storage/mock_storage.rs +++ b/mm2src/coins/hd_wallet/storage/mock_storage.rs @@ -8,7 +8,7 @@ pub(crate) struct HDWalletMockStorage; #[async_trait] #[cfg_attr(test, mockable)] impl HDWalletStorageInternalOps for HDWalletMockStorage { - async fn init(_ctx: &MmArc, _db_id: Option<&str>) -> HDWalletStorageResult + async fn init(_ctx: &MmArc) -> HDWalletStorageResult where Self: Sized, { diff --git a/mm2src/coins/hd_wallet/storage/mod.rs b/mm2src/coins/hd_wallet/storage/mod.rs index 77597c248f..fe3d3c9fa6 100644 --- a/mm2src/coins/hd_wallet/storage/mod.rs +++ b/mm2src/coins/hd_wallet/storage/mod.rs @@ -85,7 +85,7 @@ pub struct HDAccountStorageItem { #[async_trait] #[cfg_attr(test, mockable)] pub(crate) trait HDWalletStorageInternalOps { - async fn init(ctx: &MmArc, db_id: Option<&str>) -> HDWalletStorageResult + async fn init(ctx: &MmArc) -> HDWalletStorageResult where Self: Sized; @@ -225,7 +225,7 @@ impl HDWalletCoinStorage { let hd_wallet_rmd160 = crypto_ctx .hw_wallet_rmd160() .or_mm_err(|| HDWalletStorageError::HDWalletUnavailable)?; - let inner = Box::new(HDWalletStorageInstance::init(ctx, None).await?); + let inner = Box::new(HDWalletStorageInstance::init(ctx).await?); Ok(HDWalletCoinStorage { coin, hd_wallet_rmd160, @@ -238,7 +238,7 @@ impl HDWalletCoinStorage { coin: String, hd_wallet_rmd160: H160, ) -> HDWalletStorageResult { - let inner = Box::new(HDWalletStorageInstance::init(ctx, None).await?); + let inner = Box::new(HDWalletStorageInstance::init(ctx).await?); Ok(HDWalletCoinStorage { coin, hd_wallet_rmd160, diff --git a/mm2src/coins/hd_wallet/storage/sqlite_storage.rs b/mm2src/coins/hd_wallet/storage/sqlite_storage.rs index 911b0f9518..274e9255b7 100644 --- a/mm2src/coins/hd_wallet/storage/sqlite_storage.rs +++ b/mm2src/coins/hd_wallet/storage/sqlite_storage.rs @@ -97,11 +97,11 @@ pub(super) struct HDWalletSqliteStorage { #[async_trait] impl HDWalletStorageInternalOps for HDWalletSqliteStorage { - async fn init(ctx: &MmArc, db_id: Option<&str>) -> HDWalletStorageResult + async fn init(ctx: &MmArc) -> HDWalletStorageResult where Self: Sized, { - let shared = ctx.shared_sqlite_conn_opt(db_id).or_mm_err(|| { + let shared = ctx.shared_sqlite_conn_opt().or_mm_err(|| { HDWalletStorageError::Internal("'MmCtx::shared_sqlite_conn' is not initialized".to_owned()) })?; let storage = HDWalletSqliteStorage { @@ -279,7 +279,7 @@ pub(crate) async fn get_all_storage_items(ctx: &MmArc) -> Vec, - db_id: Option, } #[async_trait] impl HDWalletStorageInternalOps for HDWalletIndexedDbStorage { - async fn init(ctx: &MmArc, db_id: Option<&str>) -> HDWalletStorageResult + async fn init(ctx: &MmArc) -> HDWalletStorageResult where Self: Sized, { let coins_ctx = CoinsContext::from_ctx(ctx).map_to_mm(HDWalletStorageError::Internal)?; let db = SharedDb::downgrade(&coins_ctx.hd_wallet_db); - Ok(HDWalletIndexedDbStorage { - db, - db_id: db_id.map(String::from), - }) + Ok(HDWalletIndexedDbStorage { db }) } async fn load_accounts(&self, wallet_id: HDWalletId) -> HDWalletStorageResult> { let shared_db = self.get_shared_db()?; - let locked_db = Self::lock_db_mutex(&shared_db, self.db_id.as_deref()).await?; + let locked_db = Self::lock_db_mutex(&shared_db).await?; let transaction = locked_db.inner.transaction().await?; let table = transaction.table::().await?; @@ -197,7 +193,7 @@ impl HDWalletStorageInternalOps for HDWalletIndexedDbStorage { account_id: u32, ) -> HDWalletStorageResult> { let shared_db = self.get_shared_db()?; - let locked_db = Self::lock_db_mutex(&shared_db, self.db_id.as_deref()).await?; + let locked_db = Self::lock_db_mutex(&shared_db).await?; let transaction = locked_db.inner.transaction().await?; let table = transaction.table::().await?; @@ -239,7 +235,7 @@ impl HDWalletStorageInternalOps for HDWalletIndexedDbStorage { account: HDAccountStorageItem, ) -> HDWalletStorageResult<()> { let shared_db = self.get_shared_db()?; - let locked_db = Self::lock_db_mutex(&shared_db, self.db_id.as_deref()).await?; + let locked_db = Self::lock_db_mutex(&shared_db).await?; let transaction = locked_db.inner.transaction().await?; let table = transaction.table::().await?; @@ -254,7 +250,7 @@ impl HDWalletStorageInternalOps for HDWalletIndexedDbStorage { async fn clear_accounts(&self, wallet_id: HDWalletId) -> HDWalletStorageResult<()> { let shared_db = self.get_shared_db()?; - let locked_db = Self::lock_db_mutex(&shared_db, self.db_id.as_deref()).await?; + let locked_db = Self::lock_db_mutex(&shared_db).await?; let transaction = locked_db.inner.transaction().await?; let table = transaction.table::().await?; @@ -274,8 +270,8 @@ impl HDWalletIndexedDbStorage { .or_mm_err(|| HDWalletStorageError::Internal("'HDWalletIndexedDbStorage::db' doesn't exist".to_owned())) } - async fn lock_db_mutex(db: &SharedDb, db_id: Option<&str>) -> HDWalletStorageResult { - db.get_or_initialize_shared(db_id) + async fn lock_db_mutex(db: &SharedDb) -> HDWalletStorageResult { + db.get_or_initialize_shared(None) .await .mm_err(HDWalletStorageError::from) } @@ -300,7 +296,7 @@ impl HDWalletIndexedDbStorage { F: FnOnce(&mut HDAccountTable), { let shared_db = self.get_shared_db()?; - let locked_db = Self::lock_db_mutex(&shared_db, self.db_id.as_deref()).await?; + let locked_db = Self::lock_db_mutex(&shared_db).await?; let transaction = locked_db.inner.transaction().await?; let table = transaction.table::().await?; @@ -323,7 +319,7 @@ impl HDWalletIndexedDbStorage { #[cfg(any(test, target_arch = "wasm32"))] pub(super) async fn get_all_storage_items(ctx: &MmArc) -> Vec { let coins_ctx = CoinsContext::from_ctx(ctx).unwrap(); - let db = coins_ctx.hd_wallet_db.get_or_initialize(None).await.unwrap(); + let db = coins_ctx.hd_wallet_db.get_or_initialize_shared(None).await.unwrap(); let transaction = db.inner.transaction().await.unwrap(); let table = transaction.table::().await.unwrap(); table diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 826cb38820..6b46bfeb0e 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -285,7 +285,6 @@ use hd_wallet::{AccountUpdatingError, AddressDerivingError, HDAccountOps, HDAddr HDCoinHDAccount, HDExtractPubkeyError, HDPathAccountToAddressId, HDWalletAddress, HDWalletCoinOps, HDWalletOps, HDWithdrawError, HDXPubExtractor, WithdrawFrom, WithdrawSenderAddress}; use nft::nft_errors::GetNftInfoError; -use primitives::hash::H160; use qrc20::{qrc20_coin_with_policy, Qrc20ActivationParams, Qrc20Coin, Qrc20FeeDetails}; use rpc_command::{get_new_address::{GetNewAddressTaskManager, GetNewAddressTaskManagerShared}, init_account_balance::{AccountBalanceTaskManager, AccountBalanceTaskManagerShared}, @@ -3237,13 +3236,9 @@ pub trait MmCoin: /// If the coin is not derived from an HD wallet, it returns `None`. async fn account_db_id(&self) -> Option { None } - // Retrieves a unique identifier for the account that is shared across different contexts, - /// such as different derivation methods (HD wallet vs. non-HD wallet) - async fn shared_db_id(&self) -> Option { None } - - /// In normal wallet mode, this function returns the regular `db_id`, which is the RMD160 hash of the public key. - /// In HD wallet mode, it returns `hd_wallet_rmd160`, which is the RMD160 hash unique to the HD wallet/device. - async fn tx_history_db_id(&self) -> Option { None } + // Retrieves db_id for derivation methods (HD wallet vs. non-HD wallet) + // NOTE: this function only needs special handling for coins that supports HD wallet + async fn shared_db_id(&self, _ctx: &MmArc) -> Option { None } /// Path to tx history file #[cfg(not(target_arch = "wasm32"))] diff --git a/mm2src/coins/my_tx_history_v2.rs b/mm2src/coins/my_tx_history_v2.rs index e3806c7621..6954b81f23 100644 --- a/mm2src/coins/my_tx_history_v2.rs +++ b/mm2src/coins/my_tx_history_v2.rs @@ -402,7 +402,7 @@ pub(crate) async fn my_tx_history_v2_impl( where Coin: CoinWithTxHistoryV2 + MmCoin, { - let tx_history_storage = TxHistoryStorageBuilder::new(&ctx, coin.tx_history_db_id().await).build()?; + let tx_history_storage = TxHistoryStorageBuilder::new(&ctx, coin.shared_db_id(&ctx).await).build()?; let wallet_id = coin.history_wallet_id(); let is_storage_init = tx_history_storage.is_initialized_for(&wallet_id).await?; diff --git a/mm2src/coins/tendermint/tendermint_coin.rs b/mm2src/coins/tendermint/tendermint_coin.rs index 26a74c62e6..1f2a93612e 100644 --- a/mm2src/coins/tendermint/tendermint_coin.rs +++ b/mm2src/coins/tendermint/tendermint_coin.rs @@ -2424,12 +2424,13 @@ impl MmCoin for TendermintCoin { None } - async fn tx_history_db_id(&self) -> Option { - self.activation_policy - .public_key() - .ok() - .map(|k| hex::encode(dhash160(&k.to_bytes()))) - .or(self.account_db_id().await) // Fallback to the account db_id for non-HD wallets + async fn shared_db_id(&self, ctx: &MmArc) -> Option { + if let TendermintActivationPolicy::PrivateKey(PrivKeyPolicy::HDWallet { .. }) = self.activation_policy { + return Some(ctx.default_shared_db_id().to_string()); + }; + + // Fallback to the account db_id for non-HD wallets + self.account_db_id().await } } diff --git a/mm2src/coins/utxo/bch.rs b/mm2src/coins/utxo/bch.rs index acf18a5cde..7e1c6acdaa 100644 --- a/mm2src/coins/utxo/bch.rs +++ b/mm2src/coins/utxo/bch.rs @@ -1361,7 +1361,7 @@ impl MmCoin for BchCoin { async fn account_db_id(&self) -> Option { utxo_common::account_db_id(self).await } - async fn tx_history_db_id(&self) -> Option { utxo_common::tx_history_db_id(self).await } + async fn shared_db_id(&self, _ctx: &MmArc) -> Option { utxo_common::shared_db_id(self).await } } #[async_trait] diff --git a/mm2src/coins/utxo/qtum.rs b/mm2src/coins/utxo/qtum.rs index e34f9f522f..d4a0a843ad 100644 --- a/mm2src/coins/utxo/qtum.rs +++ b/mm2src/coins/utxo/qtum.rs @@ -982,7 +982,7 @@ impl MmCoin for QtumCoin { async fn account_db_id(&self) -> Option { utxo_common::account_db_id(self).await } - async fn tx_history_db_id(&self) -> Option { utxo_common::tx_history_db_id(self).await } + async fn shared_db_id(&self, _ctx: &MmArc) -> Option { utxo_common::shared_db_id(self).await } } #[async_trait] diff --git a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs index 27a51331b1..a3f88b9339 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs @@ -219,8 +219,15 @@ pub trait UtxoFieldsWithGlobalHDBuilder: UtxoCoinBuilderCommonOps { bip39_secp_priv_key: global_hd_ctx.root_priv_key().clone(), }; + #[cfg(not(target_arch = "wasm32"))] + { + // db_id should be the current activated key for this hd wallet + run_db_migration_for_new_utxo_pubkey(self.ctx(), activated_key_pair.public().address_hash().to_string()) + .await? + } + let address_format = self.address_format()?; - let hd_wallet_rmd160 = global_hd_ctx.derive_rmd160(); + let hd_wallet_rmd160 = *self.ctx().rmd160(); let hd_wallet_storage = HDWalletCoinStorage::init_with_rmd160(self.ctx(), self.ticker().to_owned(), hd_wallet_rmd160).await?; let accounts = load_hd_accounts_from_storage(&hd_wallet_storage, path_to_coin) @@ -238,14 +245,6 @@ pub trait UtxoFieldsWithGlobalHDBuilder: UtxoCoinBuilderCommonOps { }, address_format, }; - #[cfg(not(target_arch = "wasm32"))] - { - // db_id should be the current activated key for this hd wallet - let db_id = Some(activated_key_pair.public().address_hash()); - // device_rmd_160 is unqiue to a device, hence it can bs used as shared_db_id. - let shared_db_id = Some(hd_wallet_rmd160); - run_db_migration_for_new_utxo_pubkey(self.ctx(), db_id, shared_db_id).await? - } let derivation_method = DerivationMethod::HDWallet(hd_wallet); build_utxo_coin_fields_with_conf_and_policy(self, conf, priv_key_policy, derivation_method).await } @@ -1022,17 +1021,8 @@ async fn wait_for_protocol_version_checked(client: &ElectrumClientImpl) -> Resul } #[cfg(not(target_arch = "wasm32"))] -pub async fn run_db_migration_for_new_utxo_pubkey( - ctx: &MmArc, - db_id: Option, - shared_db_id: Option, -) -> MmResult<(), UtxoCoinBuildError> { - use mm2_core::sql_connection_pool::DbIds; - - let db_id = db_id.map(|id| id.to_string()); - let shared_db_id = shared_db_id.map(|id| id.to_string()); +pub async fn run_db_migration_for_new_utxo_pubkey(ctx: &MmArc, db_id: String) -> MmResult<(), UtxoCoinBuildError> { info!("Public key hash: {db_id:?}"); - info!("Shared Database ID: {shared_db_id:?}"); let db_migration_sender = ctx .db_migration_watcher @@ -1041,7 +1031,7 @@ pub async fn run_db_migration_for_new_utxo_pubkey( .get_sender(); let mut db_migration_sender = db_migration_sender.lock().await; db_migration_sender - .send(DbIds { db_id, shared_db_id }) + .send(db_id) .await .map_to_mm(|err| UtxoCoinBuildError::Internal(err.to_string()))?; diff --git a/mm2src/coins/utxo/utxo_common.rs b/mm2src/coins/utxo/utxo_common.rs index 9d5e84dcd1..0c6b015210 100644 --- a/mm2src/coins/utxo/utxo_common.rs +++ b/mm2src/coins/utxo/utxo_common.rs @@ -5147,13 +5147,10 @@ where { if let Some(hd_wallet) = coin.derivation_method().hd_wallet() { // we can use hd_wallet_rmd160 as our shared_db_id since it's unique to a device - let db_id = hd_wallet + return hd_wallet .get_enabled_address() .await .map(|addr| hex::encode(addr.pubkey().address_hash().as_slice())); - println!("enable_address: {:?}", db_id); - - return db_id; } None @@ -5161,15 +5158,15 @@ where /// In normal wallet mode, this function returns the regular `db_id`, which is the RMD160 hash of the public key. /// In HD wallet mode, it returns `hd_wallet_rmd160`, which is the RMD160 hash unique to the HD wallet/device. -pub async fn tx_history_db_id(coin: &Coin) -> Option +pub async fn shared_db_id(coin: &Coin) -> Option where Coin: CoinWithDerivationMethod + HDWalletCoinOps + HDCoinWithdrawOps + UtxoCommonOps, { // Use the hd_wallet_rmd160 as the db_id since it's unique to a device and not tied to a single address + // Fallback to the account db_id for non-HD wallets. coin.derivation_method() .hd_wallet() - .map(|hd| hex::encode(hd.inner.hd_wallet_rmd160.as_slice())) - .or(account_db_id(coin).await) // Fallback to the account db_id for non-HD wallets + .map(|hd| hd.inner.hd_wallet_rmd160.to_string()) } #[test] diff --git a/mm2src/coins/utxo/utxo_common_tests.rs b/mm2src/coins/utxo/utxo_common_tests.rs index 3e834005e3..0bc1f920e1 100644 --- a/mm2src/coins/utxo/utxo_common_tests.rs +++ b/mm2src/coins/utxo/utxo_common_tests.rs @@ -288,10 +288,10 @@ pub(super) async fn test_hd_utxo_tx_history_impl(rpc_client: ElectrumClient) { #[cfg(not(target_arch = "wasm32"))] { let dbs = ctx.sqlite_conn_pool.as_option().unwrap(); - dbs.add_test_db(coin.tx_history_db_id().await.unwrap()); + dbs.add_test_db(coin.shared_db_id(&ctx).await.unwrap()); } let current_balances = coin.my_addresses_balances().await.unwrap(); - let storage = TxHistoryStorageBuilder::new(&ctx, coin.tx_history_db_id().await) + let storage = TxHistoryStorageBuilder::new(&ctx, coin.shared_db_id(&ctx).await) .build() .unwrap(); spawn(utxo_history_loop( @@ -318,7 +318,7 @@ pub(super) async fn test_hd_utxo_tx_history_impl(rpc_client: ElectrumClient) { _ => unimplemented!(), } - let storage = TxHistoryStorageBuilder::new(&ctx, coin.tx_history_db_id().await) + let storage = TxHistoryStorageBuilder::new(&ctx, coin.shared_db_id(&ctx).await) .build() .unwrap(); spawn(utxo_history_loop( diff --git a/mm2src/coins/utxo/utxo_standard.rs b/mm2src/coins/utxo/utxo_standard.rs index c113fed05b..f1c11b5ec0 100644 --- a/mm2src/coins/utxo/utxo_standard.rs +++ b/mm2src/coins/utxo/utxo_standard.rs @@ -1009,7 +1009,7 @@ impl MmCoin for UtxoStandardCoin { async fn account_db_id(&self) -> Option { utxo_common::account_db_id(self).await } - async fn tx_history_db_id(&self) -> Option { utxo_common::tx_history_db_id(self).await } + async fn shared_db_id(&self, _ctx: &MmArc) -> Option { utxo_common::shared_db_id(self).await } } #[async_trait] diff --git a/mm2src/coins_activation/src/platform_coin_with_tokens.rs b/mm2src/coins_activation/src/platform_coin_with_tokens.rs index 3319381cc2..5692d86376 100644 --- a/mm2src/coins_activation/src/platform_coin_with_tokens.rs +++ b/mm2src/coins_activation/src/platform_coin_with_tokens.rs @@ -476,7 +476,7 @@ where if req.request.tx_history() { platform_coin.start_history_background_fetching( ctx.clone(), - TxHistoryStorageBuilder::new(&ctx, platform_coin.clone().into().tx_history_db_id().await).build()?, + TxHistoryStorageBuilder::new(&ctx, platform_coin.clone().into().shared_db_id(&ctx).await).build()?, activation_result.get_platform_balance(), ); } diff --git a/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs b/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs index 40f8f8475d..8adc178adb 100644 --- a/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs +++ b/mm2src/coins_activation/src/standalone_coin/init_standalone_coin.rs @@ -224,7 +224,7 @@ where coin.start_history_background_fetching( self.ctx.metrics.clone(), - TxHistoryStorageBuilder::new(&self.ctx, coin_clone.tx_history_db_id().await).build()?, + TxHistoryStorageBuilder::new(&self.ctx, coin_clone.shared_db_id(&self.ctx).await).build()?, current_balances, ); } diff --git a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs index bcce46b126..42e0584ff4 100644 --- a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs +++ b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs @@ -22,8 +22,6 @@ use common::executor::{AbortSettings, SpawnAbortable}; use common::{true_f, Future01CompatExt}; #[cfg(not(target_arch = "wasm32"))] use futures::SinkExt; use mm2_core::mm_ctx::MmArc; -#[cfg(not(target_arch = "wasm32"))] -use mm2_core::sql_connection_pool::DbIds; use mm2_err_handle::prelude::*; use mm2_event_stream::behaviour::{EventBehaviour, EventInitStatus}; use mm2_event_stream::EventStreamConfiguration; @@ -251,10 +249,8 @@ impl PlatformCoinWithTokensActivationOps for TendermintCoin { }); } #[cfg(not(target_arch = "wasm32"))] - { - let db_id = dhash160(&pubkey.to_bytes()).to_string(); - run_db_migration_for_new_tendermint_pubkey(&ctx, Some(db_id), None, ticker.clone()).await?; - } + run_db_migration_for_new_tendermint_pubkey(&ctx, dhash160(&pubkey.to_bytes()).to_string(), ticker.clone()) + .await?; TendermintActivationPolicy::with_public_key(pubkey) } else { @@ -268,18 +264,15 @@ impl PlatformCoinWithTokensActivationOps for TendermintCoin { tendermint_priv_key_policy(&conf, &ticker, &private_key_policy, activation_request.path_to_address)?; #[cfg(not(target_arch = "wasm32"))] - if let PrivKeyBuildPolicy::GlobalHDAccount(_hd) = &private_key_policy { + if let PrivKeyBuildPolicy::GlobalHDAccount(_) = &private_key_policy { let result = TendermintActivationPolicy::with_private_key_policy(tendermint_private_key_policy.clone()); let pubkey = result.public_key().map_to_mm(|e| TendermintInitError { ticker: ticker.clone(), kind: TendermintInitErrorKind::Internal(e.to_string()), })?; - let db_id = dhash160(&pubkey.to_bytes()); - let shared_db_id = _hd.derive_rmd160(); run_db_migration_for_new_tendermint_pubkey( &ctx, - Some(db_id.to_string()), - Some(shared_db_id.to_string()), + dhash160(&pubkey.to_bytes()).to_string(), ticker.clone(), ) .await?; @@ -419,12 +412,10 @@ impl PlatformCoinWithTokensActivationOps for TendermintCoin { #[cfg(not(target_arch = "wasm32"))] async fn run_db_migration_for_new_tendermint_pubkey( ctx: &MmArc, - db_id: Option, - shared_db_id: Option, + db_id: String, ticker: String, ) -> MmResult<(), TendermintInitError> { info!("Public key hash: {db_id:?}"); - info!("Shared Database ID: {shared_db_id:?}"); let db_migration_sender = ctx .db_migration_watcher @@ -433,7 +424,7 @@ async fn run_db_migration_for_new_tendermint_pubkey( .get_sender(); let mut db_migration_sender = db_migration_sender.lock().await; db_migration_sender - .send(DbIds { db_id, shared_db_id }) + .send(db_id) .await .map_to_mm(|err| TendermintInitError { ticker: ticker.to_string(), diff --git a/mm2src/crypto/src/global_hd_ctx.rs b/mm2src/crypto/src/global_hd_ctx.rs index 02787b21ff..ad7c2bc63b 100644 --- a/mm2src/crypto/src/global_hd_ctx.rs +++ b/mm2src/crypto/src/global_hd_ctx.rs @@ -1,11 +1,9 @@ use crate::privkey::{bip39_seed_from_passphrase, key_pair_from_secret, PrivKeyError}; use crate::{mm2_internal_der_path, Bip32Error, CryptoInitError, CryptoInitResult}; use bip32::{DerivationPath, ExtendedPrivateKey}; -use bitcrypto::dhash160; use common::drop_mutability; use keys::{KeyPair, Secret as Secp256k1Secret}; use mm2_err_handle::prelude::*; -use primitives::hash::{H160, H256}; use std::ops::Deref; use std::sync::Arc; use zeroize::{Zeroize, ZeroizeOnDrop}; @@ -76,9 +74,6 @@ impl GlobalHDAccountCtx { pub fn derive_secp256k1_secret(&self, derivation_path: &DerivationPath) -> MmResult { derive_secp256k1_secret(self.bip39_secp_priv_key.clone(), derivation_path) } - - /// Derives a unique identifier (RMD160 hash of the root public key) for the device. - pub fn derive_rmd160(&self) -> H160 { dhash160(H256::from(self.root_seed_bytes()).as_slice()) } } pub fn derive_secp256k1_secret( diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index d32aa2cd94..ecc79f7d66 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -358,11 +358,11 @@ impl MmCtx { /// Retrieves an optional shared connection from the pool for the specified database ID. /// Returns `None` if the connection pool is not initialized. #[cfg(not(target_arch = "wasm32"))] - pub fn shared_sqlite_conn_opt(&self, db_id: Option<&str>) -> Option>> { + pub fn shared_sqlite_conn_opt(&self) -> Option>> { self.sqlite_conn_pool .as_option() .cloned() - .map(|pool| pool.sqlite_conn_shared(db_id)) + .map(|pool| pool.sqlite_conn_shared(Some(&self.default_db_id()))) } /// Retrieves an optional connection from the pool for the specified database ID. diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 3cf62445b1..86dcfd94bd 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -273,13 +273,8 @@ impl AsyncSqliteConnPool { } } -pub struct DbIds { - pub db_id: Option, - pub shared_db_id: Option, -} - -pub type DbMigrationHandler = Arc>>; -pub type DbMigrationSender = Arc>>; +pub type DbMigrationHandler = Arc>>; +pub type DbMigrationSender = Arc>>; pub struct DbMigrationWatcher { sender: DbMigrationSender, diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index e0b30edf81..b59d321821 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -463,27 +463,23 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { .expect("db_m igration_watcher initialization failed"); let mut guard = receiver.lock().await; - while let Some(ids) = guard.next().await { - if let Some(db_id) = &ids.db_id { - if migrations.contains(db_id) { - debug!("{} migrated, skipping migration..", db_id); - continue; - } + while let Some(db_id) = guard.next().await { + if migrations.contains(&db_id) { + debug!("{} migrated, skipping migration..", db_id); + continue; } // run db migration for db_id if new activated pubkey is unique. - if let Err(err) = run_db_migration_impl(&ctx, ids.db_id.as_deref(), ids.shared_db_id.as_deref()).await { + if let Err(err) = run_db_migration_impl(&ctx, Some(&db_id), None).await { error!("{err:?}"); continue; }; - if let Some(db_id) = &ids.db_id { - // insert new db_id to migration list - migrations.insert(db_id.to_owned()); - }; + // insert new db_id to migration list + migrations.insert(db_id.to_owned()); // Fetch and extend ctx.coins_needed_for_kick_start from new intialized db. - if let Err(err) = kick_start(ctx.clone(), ids.db_id.as_deref()).await { + if let Err(err) = kick_start(ctx.clone(), Some(&db_id)).await { error!("{err:?}"); continue; }; From a78704dec81b487d820ddab5f61d1b0e7e86154e Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 9 Jul 2024 14:57:09 +0100 Subject: [PATCH 170/186] fix review notes --- mm2src/coins/eth/v2_activation.rs | 3 +-- .../utxo/utxo_builder/utxo_coin_builder.rs | 3 +-- .../src/tendermint_with_assets_activation.rs | 4 +-- mm2src/mm2_core/src/sql_connection_pool.rs | 26 ++++++++++++++----- mm2src/mm2_main/src/lp_native_dex.rs | 5 ++-- 5 files changed, 25 insertions(+), 16 deletions(-) diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 74a9ec1269..41e84a311f 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -926,12 +926,11 @@ fn compress_public_key(uncompressed: H520) -> MmResult MmResult<(), EthActivationV2Error> { info!("Public key hash: {db_id:?}"); - let db_migration_sender = ctx + let mut db_migration_sender = ctx .db_migration_watcher .as_option() .expect("Db migration watcher isn't intialized yet!") .get_sender(); - let mut db_migration_sender = db_migration_sender.lock().await; db_migration_sender .send(db_id) .await diff --git a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs index a3f88b9339..ccf6f27a47 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs @@ -1024,12 +1024,11 @@ async fn wait_for_protocol_version_checked(client: &ElectrumClientImpl) -> Resul pub async fn run_db_migration_for_new_utxo_pubkey(ctx: &MmArc, db_id: String) -> MmResult<(), UtxoCoinBuildError> { info!("Public key hash: {db_id:?}"); - let db_migration_sender = ctx + let mut db_migration_sender = ctx .db_migration_watcher .as_option() .expect("Db migration watcher isn't intialized yet!") .get_sender(); - let mut db_migration_sender = db_migration_sender.lock().await; db_migration_sender .send(db_id) .await diff --git a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs index 42e0584ff4..17e4687bf4 100644 --- a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs +++ b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs @@ -416,13 +416,11 @@ async fn run_db_migration_for_new_tendermint_pubkey( ticker: String, ) -> MmResult<(), TendermintInitError> { info!("Public key hash: {db_id:?}"); - - let db_migration_sender = ctx + let mut db_migration_sender = ctx .db_migration_watcher .as_option() .expect("Db migration watcher isn't intialized yet!") .get_sender(); - let mut db_migration_sender = db_migration_sender.lock().await; db_migration_sender .send(db_id) .await diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 86dcfd94bd..4c8759ba91 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -52,6 +52,11 @@ impl SqliteConnPool { // Connection pool is already initialized, insert new connection. if let Some(pool) = ctx.sqlite_conn_pool.as_option() { + let conns = pool.connections.read().await; + if conns.get(&db_id).is_some() { + return Ok(()); + } + let conn = Self::open_connection(sqlite_file_path); let mut pool = pool.connections.write().unwrap(); pool.insert(db_id, conn); @@ -193,6 +198,11 @@ impl AsyncSqliteConnPool { let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160.to_string()); if let Some(pool) = ctx.async_sqlite_conn_pool.as_option() { + let conns = pool.connections.read().await; + if conns.get(&db_id).is_some() { + return Ok(()); + } + let conn = Self::open_connection(&pool.sqlite_file_path).await; let mut pool = pool.connections.write().await; pool.insert(db_id, conn); @@ -217,6 +227,12 @@ impl AsyncSqliteConnPool { let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160.to_string()); if let Some(pool) = ctx.async_sqlite_conn_pool.as_option() { + let conns = pool.connections.read().await; + if conns.get(&db_id).is_some() { + return Ok(()); + } + drop(conns); + let mut pool = pool.connections.write().await; let conn = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); pool.insert(db_id, conn); @@ -273,8 +289,8 @@ impl AsyncSqliteConnPool { } } -pub type DbMigrationHandler = Arc>>; -pub type DbMigrationSender = Arc>>; +pub type DbMigrationHandler = Receiver; +pub type DbMigrationSender = Sender; pub struct DbMigrationWatcher { sender: DbMigrationSender, @@ -284,12 +300,10 @@ impl DbMigrationWatcher { pub fn init(ctx: &MmCtx) -> Result { let (sender, receiver) = channel(1); - let selfi = Arc::new(Self { - sender: Arc::new(AsyncMutex::new(sender)), - }); + let selfi = Arc::new(Self { sender }); try_s!(ctx.db_migration_watcher.pin(selfi)); - Ok(Arc::new(AsyncMutex::new(receiver))) + Ok(receiver) } pub fn get_sender(&self) -> DbMigrationSender { self.sender.clone() } diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index b59d321821..959ab37f81 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -458,12 +458,11 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { use std::collections::HashSet; let mut migrations = HashSet::new(); - let receiver = &ctx + let mut receiver = ctx .init_db_migration_watcher() .expect("db_m igration_watcher initialization failed"); - let mut guard = receiver.lock().await; - while let Some(db_id) = guard.next().await { + while let Some(db_id) = receiver.next().await { if migrations.contains(&db_id) { debug!("{} migrated, skipping migration..", db_id); continue; From bdd79af74c8b5b34a0661454383bd65cb51119fc Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 9 Jul 2024 15:06:52 +0100 Subject: [PATCH 171/186] minor fix --- mm2src/mm2_core/src/sql_connection_pool.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 4c8759ba91..6904c294ed 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -52,10 +52,11 @@ impl SqliteConnPool { // Connection pool is already initialized, insert new connection. if let Some(pool) = ctx.sqlite_conn_pool.as_option() { - let conns = pool.connections.read().await; + let conns = pool.connections.read().unwrap(); if conns.get(&db_id).is_some() { return Ok(()); } + drop(conns); let conn = Self::open_connection(sqlite_file_path); let mut pool = pool.connections.write().unwrap(); From 5470b257f967dccb3a3b041f9f9f0e0b8765c82a Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 19 Jul 2024 12:14:38 +0100 Subject: [PATCH 172/186] fix review notes --- mm2src/coins/eth/v2_activation.rs | 38 ++++------- .../utxo/utxo_builder/utxo_coin_builder.rs | 26 ++------ .../src/tendermint_with_assets_activation.rs | 66 ++++++++----------- mm2src/mm2_core/src/sql_connection_pool.rs | 16 ++++- mm2src/mm2_db/src/indexed_db/db_lock.rs | 14 +++- mm2src/mm2_main/src/lp_wallet.rs | 3 +- 6 files changed, 72 insertions(+), 91 deletions(-) diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 41e84a311f..c713d2bc58 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -8,8 +8,9 @@ use crate::nft::nft_structs::Chain; use common::executor::AbortedError; use crypto::{trezor::TrezorError, Bip32Error, CryptoCtxError, HwError}; use enum_derives::EnumFromTrait; -#[cfg(not(target_arch = "wasm32"))] use futures::SinkExt; use instant::Instant; +#[cfg(not(target_arch = "wasm32"))] +use mm2_core::sql_connection_pool::run_db_migration_for_new_pubkey; use mm2_err_handle::common_errors::WithInternal; #[cfg(target_arch = "wasm32")] use mm2_metamask::{from_metamask_error, MetamaskError, MetamaskRpcError, WithMetamaskRpcError}; @@ -664,7 +665,12 @@ pub(crate) async fn build_address_and_priv_key_policy( let bip39_secp_priv_key = global_hd_ctx.root_priv_key().clone(); #[cfg(not(target_arch = "wasm32"))] - run_db_migration_for_new_eth_pubkey(ctx, dhash160(activated_key.public().as_bytes()).to_string()).await?; + { + let pubkey = dhash160(activated_key.public().as_bytes()).to_string(); + run_db_migration_for_new_pubkey(ctx, pubkey) + .await + .map_to_mm(EthActivationV2Error::InternalError)?; + } let hd_wallet_rmd160 = *ctx.rmd160(); let hd_wallet_storage = HDWalletCoinStorage::init_with_rmd160(ctx, ticker.to_string(), hd_wallet_rmd160) @@ -922,37 +928,19 @@ fn compress_public_key(uncompressed: H520) -> MmResult MmResult<(), EthActivationV2Error> { - info!("Public key hash: {db_id:?}"); - - let mut db_migration_sender = ctx - .db_migration_watcher - .as_option() - .expect("Db migration watcher isn't intialized yet!") - .get_sender(); - db_migration_sender - .send(db_id) - .await - .map_to_mm(|err| EthActivationV2Error::InternalError(err.to_string()))?; - - Ok(()) -} - pub(super) async fn eth_shared_db_id(coin: &EthCoin, ctx: &MmArc) -> Option { - // Use the hd_wallet_rmd160 as the db_id in HD mode since it's unique to a device and not tied to a single address + // Use the hd_wallet_rmd160 as the db_id in HD mode only since it's unique to a device and not tied to a single address coin.derivation_method() .hd_wallet() .map(|_| ctx.default_shared_db_id().to_string()) } pub(super) async fn eth_account_db_id(coin: &EthCoin) -> Option { - if let Some(hd_wallet) = coin.derivation_method().hd_wallet() { - return hd_wallet + match coin.derivation_method() { + DerivationMethod::HDWallet(hd_wallet) => hd_wallet .get_enabled_address() .await - .map(|addr| hex::encode(dhash160(addr.pubkey().as_bytes()))); + .map(|addr| dhash160(addr.pubkey().as_bytes()).to_string()), + _ => None, } - - None } diff --git a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs index ccf6f27a47..68b0304b6f 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs @@ -24,12 +24,13 @@ use derive_more::Display; use futures::channel::mpsc::{channel, unbounded, Receiver as AsyncReceiver, UnboundedReceiver, UnboundedSender}; use futures::compat::Future01CompatExt; use futures::lock::Mutex as AsyncMutex; -#[cfg(not(target_arch = "wasm32"))] use futures::SinkExt; use futures::StreamExt; use keys::bytes::Bytes; pub use keys::{Address, AddressBuilder, AddressFormat as UtxoAddressFormat, AddressHashEnum, AddressScriptType, KeyPair, Private, Public, Secret}; use mm2_core::mm_ctx::MmArc; +#[cfg(not(target_arch = "wasm32"))] +use mm2_core::sql_connection_pool::run_db_migration_for_new_pubkey; use mm2_err_handle::prelude::*; use primitives::hash::H160; use rand::seq::SliceRandom; @@ -222,8 +223,10 @@ pub trait UtxoFieldsWithGlobalHDBuilder: UtxoCoinBuilderCommonOps { #[cfg(not(target_arch = "wasm32"))] { // db_id should be the current activated key for this hd wallet - run_db_migration_for_new_utxo_pubkey(self.ctx(), activated_key_pair.public().address_hash().to_string()) - .await? + let pubkey = activated_key_pair.public().address_hash().to_string(); + run_db_migration_for_new_pubkey(self.ctx(), pubkey) + .await + .map_to_mm(UtxoCoinBuildError::Internal)? } let address_format = self.address_format()?; @@ -1019,20 +1022,3 @@ async fn wait_for_protocol_version_checked(client: &ElectrumClientImpl) -> Resul // Flatten `Result< Result<(), String>, String >` .flatten() } - -#[cfg(not(target_arch = "wasm32"))] -pub async fn run_db_migration_for_new_utxo_pubkey(ctx: &MmArc, db_id: String) -> MmResult<(), UtxoCoinBuildError> { - info!("Public key hash: {db_id:?}"); - - let mut db_migration_sender = ctx - .db_migration_watcher - .as_option() - .expect("Db migration watcher isn't intialized yet!") - .get_sender(); - db_migration_sender - .send(db_id) - .await - .map_to_mm(|err| UtxoCoinBuildError::Internal(err.to_string()))?; - - Ok(()) -} diff --git a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs index 17e4687bf4..3718a6242b 100644 --- a/mm2src/coins_activation/src/tendermint_with_assets_activation.rs +++ b/mm2src/coins_activation/src/tendermint_with_assets_activation.rs @@ -18,10 +18,10 @@ use coins::tendermint::{tendermint_priv_key_policy, TendermintActivationPolicy, #[cfg(not(target_arch = "wasm32"))] use coins::utxo::dhash160; use coins::{CoinBalance, CoinProtocol, MarketCoinOps, MmCoin, MmCoinEnum, PrivKeyBuildPolicy}; use common::executor::{AbortSettings, SpawnAbortable}; -#[cfg(not(target_arch = "wasm32"))] use common::log::info; use common::{true_f, Future01CompatExt}; -#[cfg(not(target_arch = "wasm32"))] use futures::SinkExt; use mm2_core::mm_ctx::MmArc; +#[cfg(not(target_arch = "wasm32"))] +use mm2_core::sql_connection_pool::run_db_migration_for_new_pubkey; use mm2_err_handle::prelude::*; use mm2_event_stream::behaviour::{EventBehaviour, EventInitStatus}; use mm2_event_stream::EventStreamConfiguration; @@ -249,8 +249,14 @@ impl PlatformCoinWithTokensActivationOps for TendermintCoin { }); } #[cfg(not(target_arch = "wasm32"))] - run_db_migration_for_new_tendermint_pubkey(&ctx, dhash160(&pubkey.to_bytes()).to_string(), ticker.clone()) - .await?; + { + run_db_migration_for_new_pubkey(&ctx, dhash160(&pubkey.to_bytes()).to_string()) + .await + .map_to_mm(|err| TendermintInitError { + ticker: ticker.clone(), + kind: TendermintInitErrorKind::Internal(err), + })?; + } TendermintActivationPolicy::with_public_key(pubkey) } else { @@ -264,19 +270,22 @@ impl PlatformCoinWithTokensActivationOps for TendermintCoin { tendermint_priv_key_policy(&conf, &ticker, &private_key_policy, activation_request.path_to_address)?; #[cfg(not(target_arch = "wasm32"))] - if let PrivKeyBuildPolicy::GlobalHDAccount(_) = &private_key_policy { - let result = TendermintActivationPolicy::with_private_key_policy(tendermint_private_key_policy.clone()); - let pubkey = result.public_key().map_to_mm(|e| TendermintInitError { - ticker: ticker.clone(), - kind: TendermintInitErrorKind::Internal(e.to_string()), - })?; - run_db_migration_for_new_tendermint_pubkey( - &ctx, - dhash160(&pubkey.to_bytes()).to_string(), - ticker.clone(), - ) - .await?; - }; + { + if let PrivKeyBuildPolicy::GlobalHDAccount(_) = &private_key_policy { + let result = + TendermintActivationPolicy::with_private_key_policy(tendermint_private_key_policy.clone()); + let pubkey = result.public_key().map_to_mm(|e| TendermintInitError { + ticker: ticker.clone(), + kind: TendermintInitErrorKind::Internal(e.to_string()), + })?; + run_db_migration_for_new_pubkey(&ctx, dhash160(&pubkey.to_bytes()).to_string()) + .await + .map_to_mm(|err| TendermintInitError { + ticker: ticker.clone(), + kind: TendermintInitErrorKind::Internal(err), + })?; + }; + } TendermintActivationPolicy::with_private_key_policy(tendermint_private_key_policy) }; @@ -408,26 +417,3 @@ impl PlatformCoinWithTokensActivationOps for TendermintCoin { unimplemented!() } } - -#[cfg(not(target_arch = "wasm32"))] -async fn run_db_migration_for_new_tendermint_pubkey( - ctx: &MmArc, - db_id: String, - ticker: String, -) -> MmResult<(), TendermintInitError> { - info!("Public key hash: {db_id:?}"); - let mut db_migration_sender = ctx - .db_migration_watcher - .as_option() - .expect("Db migration watcher isn't intialized yet!") - .get_sender(); - db_migration_sender - .send(db_id) - .await - .map_to_mm(|err| TendermintInitError { - ticker: ticker.to_string(), - kind: TendermintInitErrorKind::Internal(err.to_string()), - })?; - - Ok(()) -} diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 6904c294ed..ffd7cda552 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -1,10 +1,12 @@ -use crate::mm_ctx::{log_sqlite_file_open_attempt, path_to_dbdir, MmCtx}; +use crate::mm_ctx::{log_sqlite_file_open_attempt, path_to_dbdir, MmArc, MmCtx}; use async_std::sync::RwLock as AsyncRwLock; use common::log::error; +use common::log::info; use db_common::async_sql_conn::AsyncConnection; use db_common::sqlite::rusqlite::Connection; use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::lock::Mutex as AsyncMutex; +use futures::SinkExt; use gstuff::try_s; use std::collections::HashMap; use std::path::PathBuf; @@ -309,3 +311,15 @@ impl DbMigrationWatcher { pub fn get_sender(&self) -> DbMigrationSender { self.sender.clone() } } + +pub async fn run_db_migration_for_new_pubkey(ctx: &MmArc, db_id: String) -> Result<(), String> { + info!("Public key hash: {db_id:?}"); + let mut db_migration_sender = ctx + .db_migration_watcher + .as_option() + .expect("Db migration watcher isn't intialized yet!") + .get_sender(); + db_migration_sender.send(db_id).await.map_err(|err| err.to_string())?; + + Ok(()) +} diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index 113fb14ef2..bd79f3fc38 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -4,6 +4,8 @@ use std::collections::HashMap; use std::sync::{Arc, Weak}; use tokio::sync::{Mutex as AsyncMutex, OwnedMappedMutexGuard, OwnedMutexGuard, RwLock}; +const GLOBAL_DB_ID: &str = "KOMODEFI"; + /// The mapped mutex guard. /// This implements `Deref`. pub type DbLocked = OwnedMappedMutexGuard, Db>; @@ -79,11 +81,18 @@ impl ConstructibleDb { self.get_or_initialize_impl(db_id, true).await } + // handle to get or initialize global db + pub async fn get_or_intiailize_global(&self) -> InitDbResult> { + self.get_or_initialize_impl(Some(GLOBAL_DB_ID), false).await + } + /// Locks the given mutex and checks if the inner database is initialized already or not, /// initializes it if it's required, and returns the locked instance. async fn get_or_initialize_impl(&self, db_id: Option<&str>, is_shared: bool) -> InitDbResult> { - let default_id = if is_shared { &self.shared_db_id } else { &self.db_id }; - let db_id = db_id.unwrap_or(default_id).to_owned(); + let db_id = { + let default_id = if is_shared { &self.shared_db_id } else { &self.db_id }; + db_id.unwrap_or(default_id).to_owned() + }; let mut connections = self.locks.write().await; if let Some(connection) = connections.get_mut(&db_id) { @@ -105,7 +114,6 @@ impl ConstructibleDb { let db = Db::init(DbIdentifier::new::(self.db_namespace, Some(db_id.clone()))).await?; let db = Arc::new(AsyncMutex::new(Some(db))); connections.insert(db_id, db.clone()); - // Drop connections lock as soon as possible. drop(connections); let locked_db = db.lock_owned().await; diff --git a/mm2src/mm2_main/src/lp_wallet.rs b/mm2src/mm2_main/src/lp_wallet.rs index 68e00a52c9..8b8b6d768f 100644 --- a/mm2src/mm2_main/src/lp_wallet.rs +++ b/mm2src/mm2_main/src/lp_wallet.rs @@ -96,8 +96,7 @@ impl WalletsContext { }))) } - // TODO - pub async fn wallets_db(&self) -> InitDbResult { self.wallets_db.get_or_initialize(None).await } + pub async fn wallets_db(&self) -> InitDbResult { self.wallets_db.get_or_intiailize_global().await } } // Utility function for deserialization to reduce repetition From b690e936ed194aa9d569681b1da3ac0659edd21c Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 22 Jul 2024 11:52:28 +0100 Subject: [PATCH 173/186] fix clippy --- mm2src/coins/lp_coins.rs | 326 ++++++++++++++++++--------------------- 1 file changed, 149 insertions(+), 177 deletions(-) diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index a623a81c87..497fe29a06 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -33,20 +33,13 @@ #![feature(stmt_expr_attributes)] #![feature(result_flattening)] -#[macro_use] -extern crate common; -#[macro_use] -extern crate gstuff; -#[macro_use] -extern crate lazy_static; -#[macro_use] -extern crate mm2_metrics; -#[macro_use] -extern crate serde_derive; -#[macro_use] -extern crate serde_json; -#[macro_use] -extern crate ser_error_derive; +#[macro_use] extern crate common; +#[macro_use] extern crate gstuff; +#[macro_use] extern crate lazy_static; +#[macro_use] extern crate mm2_metrics; +#[macro_use] extern crate serde_derive; +#[macro_use] extern crate serde_json; +#[macro_use] extern crate ser_error_derive; use async_trait::async_trait; use base58::FromBase58Error; @@ -64,7 +57,7 @@ use enum_derives::{EnumFromStringify, EnumFromTrait}; use ethereum_types::H256; use futures::compat::Future01CompatExt; use futures::lock::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; -use futures::{FutureExt, TryFutureExt}; +use futures::TryFutureExt; use futures01::Future; use hex::FromHexError; use http::{Response, StatusCode}; @@ -98,7 +91,7 @@ cfg_native! { use crate::lightning::ln_conf::PlatformCoinConfirmationTargets; use ::lightning::ln::PaymentHash as LightningPayment; use async_std::fs; - use futures::{AsyncWriteExt}; + use futures::{FutureExt, AsyncWriteExt}; use lightning_invoice::{Invoice, ParseOrSemanticError}; use std::io; use std::path::PathBuf; @@ -149,7 +142,7 @@ macro_rules! try_tx_fus_opt { return Box::new(futures01::future::err(crate::TransactionErr::Plain(ERRL!( "{:?}", $err )))); - } + }, } }; } @@ -170,7 +163,7 @@ macro_rules! try_tx_fus { TransactionEnum::from($tx), ERRL!("{:?}", err), ))); - } + }, } }; } @@ -187,7 +180,7 @@ macro_rules! try_tx_s { line!(), err ))); - } + }, } }; ($e: expr, $tx: expr) => { @@ -198,7 +191,7 @@ macro_rules! try_tx_s { TransactionEnum::from($tx), format!("{}:{}] {:?}", file!(), line!(), err), )); - } + }, } }; } @@ -228,7 +221,7 @@ macro_rules! ok_or_continue_after_sleep { error!("error {:?}", e); Timer::sleep($delay).await; continue; - } + }, } }; } @@ -240,16 +233,14 @@ pub mod coin_errors; pub mod coins_tests; pub mod eth; pub mod hd_wallet; -#[cfg(not(target_arch = "wasm32"))] -pub mod lightning; +#[cfg(not(target_arch = "wasm32"))] pub mod lightning; pub mod lp_price; #[cfg_attr(target_arch = "wasm32", allow(dead_code, unused_imports))] pub mod my_tx_history_v2; pub mod nft; pub mod qrc20; pub mod rpc_command; -#[cfg(feature = "enable-sia")] -pub mod sia; +#[cfg(feature = "enable-sia")] pub mod sia; #[doc(hidden)] #[allow(unused_variables)] #[cfg(all( @@ -301,8 +292,7 @@ use rpc_command::{get_new_address::{GetNewAddressTaskManager, GetNewAddressTaskM init_scan_for_new_addresses::{ScanAddressesTaskManager, ScanAddressesTaskManagerShared}, init_withdraw::{WithdrawTaskManager, WithdrawTaskManagerShared}}; use script::Script; -#[cfg(feature = "enable-sia")] -use sia::SiaCoin; +#[cfg(feature = "enable-sia")] use sia::SiaCoin; use tendermint::htlc::CustomTendermintMsgType; use tendermint::{CosmosTransaction, TendermintCoin, TendermintFeeDetails, TendermintProtocolInfo, TendermintToken, TendermintTokenProtocolInfo}; @@ -316,26 +306,26 @@ use utxo::utxo_standard::{utxo_standard_coin_with_policy, UtxoStandardCoin}; use utxo::{swap_proto_v2_scripts, BlockchainNetwork, GenerateTxError, UtxoActivationParams, UtxoFeeDetails, UtxoTx}; use z_coin::{ZCoin, ZcoinProtocolInfo}; -pub type TransactionFut = Box + Send>; +pub type TransactionFut = Box + Send>; pub type TransactionResult = Result; pub type BalanceResult = Result>; -pub type BalanceFut = Box> + Send>; -pub type NonZeroBalanceFut = Box> + Send>; +pub type BalanceFut = Box> + Send>; +pub type NonZeroBalanceFut = Box> + Send>; pub type NumConversResult = Result>; pub type StakingInfosResult = Result>; -pub type StakingInfosFut = Box> + Send>; +pub type StakingInfosFut = Box> + Send>; pub type DelegationResult = Result>; -pub type DelegationFut = Box> + Send>; +pub type DelegationFut = Box> + Send>; pub type WithdrawResult = Result>; -pub type WithdrawFut = Box> + Send>; +pub type WithdrawFut = Box> + Send>; pub type TradePreimageResult = Result>; -pub type TradePreimageFut = Box> + Send>; +pub type TradePreimageFut = Box> + Send>; pub type CoinFindResult = Result>; -pub type TxHistoryFut = Box> + Send>; +pub type TxHistoryFut = Box> + Send>; pub type TxHistoryResult = Result>; pub type RawTransactionResult = Result>; pub type RawTransactionFut<'a> = -Box> + Send + 'a>; + Box> + Send + 'a>; pub type RefundResult = Result>; /// Helper type used for swap transactions' spend preimage generation result pub type GenPreimageResult = MmResult, TxGenError>; @@ -393,7 +383,7 @@ impl HttpStatusCode for RawTransactionError { match self { RawTransactionError::InternalError(_) | RawTransactionError::SigningError(_) => { StatusCode::INTERNAL_SERVER_ERROR - } + }, RawTransactionError::NoSuchCoin { .. } | RawTransactionError::InvalidHashError(_) | RawTransactionError::HashNotExist(_) @@ -415,16 +405,7 @@ impl From for RawTransactionError { } } -#[derive( - Clone, - Debug, - Deserialize, - Display, - EnumFromStringify, - PartialEq, - Serialize, - SerializeErrorType -)] +#[derive(Clone, Debug, Deserialize, Display, EnumFromStringify, PartialEq, Serialize, SerializeErrorType)] #[serde(tag = "error_type", content = "error_data")] pub enum GetMyAddressError { CoinsConfCheckError(String), @@ -451,7 +432,7 @@ impl HttpStatusCode for GetMyAddressError { | GetMyAddressError::InvalidRequest(_) => StatusCode::BAD_REQUEST, GetMyAddressError::Internal(_) | GetMyAddressError::GetEthAddressError(_) => { StatusCode::INTERNAL_SERVER_ERROR - } + }, } } } @@ -591,8 +572,8 @@ pub enum PrivKeyPolicyNotAllowed { impl Serialize for PrivKeyPolicyNotAllowed { fn serialize(&self, serializer: S) -> Result - where - S: Serializer, + where + S: Serializer, { serializer.serialize_str(&self.to_string()) } @@ -897,10 +878,10 @@ impl<'a> SwapTxTypeWithSecretHash<'a> { match self { SwapTxTypeWithSecretHash::TakerOrMakerPayment { maker_secret_hash } => { payment_script(time_lock, maker_secret_hash, my_public, other_public) - } + }, SwapTxTypeWithSecretHash::TakerFunding { taker_secret_hash } => { swap_proto_v2_scripts::taker_funding_script(time_lock, taker_secret_hash, my_public, other_public) - } + }, SwapTxTypeWithSecretHash::MakerPaymentV2 { maker_secret_hash, taker_secret_hash, @@ -913,7 +894,7 @@ impl<'a> SwapTxTypeWithSecretHash<'a> { ), SwapTxTypeWithSecretHash::TakerPaymentV2 { maker_secret_hash } => { swap_proto_v2_scripts::taker_payment_script(time_lock, maker_secret_hash, my_public, other_public) - } + }, } } @@ -1124,7 +1105,7 @@ pub trait SwapOps { fn check_if_my_payment_sent( &self, if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, - ) -> Box, Error=String> + Send>; + ) -> Box, Error = String> + Send>; async fn search_for_swap_tx_spend_my( &self, @@ -1148,7 +1129,7 @@ pub trait SwapOps { /// Whether the refund transaction can be sent now /// For example: there are no additional conditions for ETH, but for some UTXO coins we should wait for /// locktime < MTP - fn can_refund_htlc(&self, locktime: u64) -> Box + Send + '_> { + fn can_refund_htlc(&self, locktime: u64) -> Box + Send + '_> { let now = now_sec(); let result = if now > locktime { CanRefundHtlc::CanRefundNow @@ -1758,10 +1739,10 @@ impl From for WaitForTakerPaymentSpendError { match err { WaitForOutputSpendErr::Timeout { wait_until, now } => { WaitForTakerPaymentSpendError::Timeout { wait_until, now } - } + }, WaitForOutputSpendErr::NoOutputWithIndex(index) => { WaitForTakerPaymentSpendError::InvalidInputTx(format!("Tx doesn't have output with index {}", index)) - } + }, } } } @@ -1789,13 +1770,13 @@ impl fmt::Debug for FundingTxSpend { match self { FundingTxSpend::RefundedTimelock(tx) => { write!(f, "RefundedTimelock({:?})", tx) - } + }, FundingTxSpend::RefundedSecret { tx, secret: _ } => { write!(f, "RefundedSecret {{ tx: {:?} }}", tx) - } + }, FundingTxSpend::TransferredToTakerPayment(tx) => { write!(f, "TransferredToTakerPayment({:?})", tx) - } + }, } } } @@ -1939,21 +1920,21 @@ pub trait MarketCoinOps { fn platform_ticker(&self) -> &str; /// Receives raw transaction bytes in hexadecimal format as input and returns tx hash in hexadecimal format - fn send_raw_tx(&self, tx: &str) -> Box + Send>; + fn send_raw_tx(&self, tx: &str) -> Box + Send>; /// Receives raw transaction bytes as input and returns tx hash in hexadecimal format - fn send_raw_tx_bytes(&self, tx: &[u8]) -> Box + Send>; + fn send_raw_tx_bytes(&self, tx: &[u8]) -> Box + Send>; /// Signs raw utxo transaction in hexadecimal format as input and returns signed transaction in hexadecimal format async fn sign_raw_tx(&self, args: &SignRawTransactionRequest) -> RawTransactionResult; - fn wait_for_confirmations(&self, input: ConfirmPaymentInput) -> Box + Send>; + fn wait_for_confirmations(&self, input: ConfirmPaymentInput) -> Box + Send>; fn wait_for_htlc_tx_spend(&self, args: WaitForHTLCTxSpendArgs<'_>) -> TransactionFut; fn tx_enum_from_bytes(&self, bytes: &[u8]) -> Result>; - fn current_block(&self) -> Box + Send>; + fn current_block(&self) -> Box + Send>; fn display_priv_key(&self) -> Result; @@ -2145,8 +2126,8 @@ pub enum TxFeeDetails { /// Deserialize the TxFeeDetails as an untagged enum. impl<'de> Deserialize<'de> for TxFeeDetails { fn deserialize(deserializer: D) -> Result>::Error> - where - D: Deserializer<'de>, + where + D: Deserializer<'de>, { #[derive(Deserialize)] #[serde(untagged)] @@ -2514,7 +2495,7 @@ impl TradePreimageError { available: BigDecimal::from(0), required, } - } + }, GenerateTxError::EmptyOutputs => TradePreimageError::InternalError(gen_tx_err.to_string()), GenerateTxError::OutputValueLessThanDust { value, dust } => { if is_upper_bound { @@ -2538,7 +2519,7 @@ impl TradePreimageError { let threshold = big_decimal_from_sat_unsigned(dust, decimals); TradePreimageError::AmountIsTooSmall { amount, threshold } } - } + }, GenerateTxError::DeductFeeFromOutputFailed { output_value, required, .. } => { @@ -2549,7 +2530,7 @@ impl TradePreimageError { available, required, } - } + }, GenerateTxError::NotEnoughUtxos { sum_utxos, required } => { let available = big_decimal_from_sat_unsigned(sum_utxos, decimals); let required = big_decimal_from_sat_unsigned(required, decimals); @@ -2558,7 +2539,7 @@ impl TradePreimageError { available, required, } - } + }, GenerateTxError::Transport(e) => TradePreimageError::Transport(e), GenerateTxError::Internal(e) => TradePreimageError::InternalError(e), } @@ -2613,7 +2594,7 @@ impl From for BalanceError { AccountUpdatingError::AddressLimitReached { .. } | AccountUpdatingError::InvalidBip44Chain(_) => { // Account updating is expected to be called after `address_id` and `chain` validation. BalanceError::Internal(format!("Unexpected internal error: {}", error)) - } + }, AccountUpdatingError::WalletStorageError(_) => BalanceError::WalletStorageError(error), } } @@ -2648,7 +2629,7 @@ impl From for StakingInfosError { match e { UtxoRpcError::Transport(rpc) | UtxoRpcError::ResponseParseError(rpc) => { StakingInfosError::Transport(rpc.to_string()) - } + }, UtxoRpcError::InvalidResponse(error) => StakingInfosError::Transport(error), UtxoRpcError::Internal(error) => StakingInfosError::Internal(error), } @@ -2661,7 +2642,7 @@ impl From for StakingInfosError { Qrc20AddressError::UnexpectedDerivationMethod(e) => StakingInfosError::UnexpectedDerivationMethod(e), Qrc20AddressError::ScriptHashTypeNotSupported { script_hash_type } => { StakingInfosError::Internal(format!("Script hash type '{}' is not supported", script_hash_type)) - } + }, } } } @@ -2727,7 +2708,7 @@ impl From for DelegationError { match e { UtxoRpcError::Transport(transport) | UtxoRpcError::ResponseParseError(transport) => { DelegationError::Transport(transport.to_string()) - } + }, UtxoRpcError::InvalidResponse(resp) => DelegationError::Transport(resp), UtxoRpcError::Internal(internal) => DelegationError::InternalError(internal), } @@ -2739,12 +2720,12 @@ impl From for DelegationError { match e { StakingInfosError::CoinDoesntSupportStakingInfos { coin } => { DelegationError::CoinDoesntSupportDelegation { coin } - } + }, StakingInfosError::NoSuchCoin { coin } => DelegationError::NoSuchCoin { coin }, StakingInfosError::Transport(e) => DelegationError::Transport(e), StakingInfosError::UnexpectedDerivationMethod(reason) => { DelegationError::DelegationOpsNotSupported { reason } - } + }, StakingInfosError::Internal(e) => DelegationError::InternalError(e), } } @@ -2764,7 +2745,7 @@ impl From for DelegationError { BalanceError::Transport(error) | BalanceError::InvalidResponse(error) => DelegationError::Transport(error), BalanceError::UnexpectedDerivationMethod(e) => { DelegationError::DelegationOpsNotSupported { reason: e.to_string() } - } + }, e @ BalanceError::WalletStorageError(_) => DelegationError::InternalError(e.to_string()), BalanceError::Internal(internal) => DelegationError::InternalError(internal), } @@ -2808,13 +2789,13 @@ impl DelegationError { available: BigDecimal::from(0), required, } - } + }, GenerateTxError::EmptyOutputs => DelegationError::InternalError(gen_tx_err.to_string()), GenerateTxError::OutputValueLessThanDust { value, dust } => { let amount = big_decimal_from_sat_unsigned(value, decimals); let threshold = big_decimal_from_sat_unsigned(dust, decimals); DelegationError::AmountTooLow { amount, threshold } - } + }, GenerateTxError::DeductFeeFromOutputFailed { output_value, required, .. } => { @@ -2825,7 +2806,7 @@ impl DelegationError { available, required, } - } + }, GenerateTxError::NotEnoughUtxos { sum_utxos, required } => { let available = big_decimal_from_sat_unsigned(sum_utxos, decimals); let required = big_decimal_from_sat_unsigned(required, decimals); @@ -2834,23 +2815,14 @@ impl DelegationError { available, required, } - } + }, GenerateTxError::Transport(e) => DelegationError::Transport(e), GenerateTxError::Internal(e) => DelegationError::InternalError(e), } } } -#[derive( - Clone, - Debug, - Display, - EnumFromStringify, - EnumFromTrait, - PartialEq, - Serialize, - SerializeErrorType -)] +#[derive(Clone, Debug, Display, EnumFromStringify, EnumFromTrait, PartialEq, Serialize, SerializeErrorType)] #[serde(tag = "error_type", content = "error_data")] pub enum WithdrawError { #[display( @@ -2922,10 +2894,10 @@ pub enum WithdrawError { Transport(String), #[from_trait(WithInternal::internal)] #[from_stringify( - "MyAddressError", - "NumConversError", - "UnexpectedDerivationMethod", - "PrivKeyPolicyNotAllowed" + "MyAddressError", + "NumConversError", + "UnexpectedDerivationMethod", + "PrivKeyPolicyNotAllowed" )] #[display(fmt = "Internal error: {}", _0)] InternalError(String), @@ -3013,7 +2985,7 @@ impl HttpStatusCode for WithdrawError { WithdrawError::BroadcastExpected(_) => StatusCode::BAD_REQUEST, WithdrawError::InternalError(_) | WithdrawError::DbError(_) | WithdrawError::NftProtocolNotSupported => { StatusCode::INTERNAL_SERVER_ERROR - } + }, WithdrawError::Transport(_) => StatusCode::BAD_GATEWAY, } } @@ -3024,7 +2996,7 @@ impl From for WithdrawError { match e { AddressDerivingError::InvalidBip44Chain { .. } | AddressDerivingError::Bip32Error(_) => { WithdrawError::UnexpectedFromAddress(e.to_string()) - } + }, AddressDerivingError::Internal(internal) => WithdrawError::InternalError(internal), } } @@ -3076,7 +3048,7 @@ impl From for WithdrawError { match e { GetValidEthWithdrawAddError::CoinDoesntSupportNftWithdraw { coin } => { WithdrawError::CoinDoesntSupportNftWithdraw { coin } - } + }, GetValidEthWithdrawAddError::InvalidAddress(e) => WithdrawError::InvalidAddress(e), } } @@ -3111,13 +3083,13 @@ impl WithdrawError { available: BigDecimal::from(0), required, } - } + }, GenerateTxError::EmptyOutputs => WithdrawError::InternalError(gen_tx_err.to_string()), GenerateTxError::OutputValueLessThanDust { value, dust } => { let amount = big_decimal_from_sat_unsigned(value, decimals); let threshold = big_decimal_from_sat_unsigned(dust, decimals); WithdrawError::AmountTooLow { amount, threshold } - } + }, GenerateTxError::DeductFeeFromOutputFailed { output_value, required, .. } => { @@ -3128,7 +3100,7 @@ impl WithdrawError { available, required, } - } + }, GenerateTxError::NotEnoughUtxos { sum_utxos, required } => { let available = big_decimal_from_sat_unsigned(sum_utxos, decimals); let required = big_decimal_from_sat_unsigned(required, decimals); @@ -3137,7 +3109,7 @@ impl WithdrawError { available, required, } - } + }, GenerateTxError::Transport(e) => WithdrawError::Transport(e), GenerateTxError::Internal(e) => WithdrawError::InternalError(e), } @@ -3208,10 +3180,10 @@ impl From for VerificationError { match e { FromBase58Error::InvalidBase58Character(c, _) => { VerificationError::AddressDecodingError(format!("Invalid Base58 Character: {}", c)) - } + }, FromBase58Error::InvalidBase58Length => { VerificationError::AddressDecodingError(String::from("Invalid Base58 Length")) - } + }, } } } @@ -3219,7 +3191,7 @@ impl From for VerificationError { /// NB: Implementations are expected to follow the pImpl idiom, providing cheap reference-counted cloning and garbage collection. #[async_trait] pub trait MmCoin: -SwapOps + TakerSwapMakerCoin + MakerSwapTakerCoin + WatcherOps + MarketCoinOps + Send + Sync + 'static + SwapOps + TakerSwapMakerCoin + MakerSwapTakerCoin + WatcherOps + MarketCoinOps + Send + Sync + 'static { // `MmCoin` is an extension fulcrum for something that doesn't fit the `MarketCoinOps`. Practical examples: // name (might be required for some APIs, CoinMarketCap for instance); @@ -3257,7 +3229,7 @@ SwapOps + TakerSwapMakerCoin + MakerSwapTakerCoin + WatcherOps + MarketCoinOps + fn validate_address(&self, address: &str) -> ValidateAddressResult; /// Loop collecting coin transaction history and saving it to local DB - fn process_history_loop(&self, ctx: MmArc) -> Box + Send>; + fn process_history_loop(&self, ctx: MmArc) -> Box + Send>; /// Retrieves a unique identifier for the account based on the coin's derivation method. /// E.g, If the coin is derived from an HD wallet, it uses the public key hash of the enabled address as the database ID. @@ -3320,7 +3292,7 @@ SwapOps + TakerSwapMakerCoin + MakerSwapTakerCoin + WatcherOps + MarketCoinOps + fn history_sync_status(&self) -> HistorySyncState; /// Get fee to be paid per 1 swap transaction - fn get_trade_fee(&self) -> Box + Send>; + fn get_trade_fee(&self) -> Box + Send>; /// Get fee to be paid by sender per whole swap (including possible refund) using the sending value and check if the wallet has sufficient balance to pay the fee. async fn get_sender_trade_fee( @@ -3402,8 +3374,8 @@ impl CoinFutSpawner { impl SpawnFuture for CoinFutSpawner { fn spawn(&self, f: F) - where - F: Future03 + Send + 'static, + where + F: Future03 + Send + 'static, { self.inner.spawn(f) } @@ -3411,8 +3383,8 @@ impl SpawnFuture for CoinFutSpawner { impl SpawnAbortable for CoinFutSpawner { fn spawn_with_settings(&self, fut: F, settings: AbortSettings) - where - F: Future03 + Send + 'static, + where + F: Future03 + Send + 'static, { self.inner.spawn_with_settings(fut, settings) } @@ -4026,16 +3998,16 @@ pub async fn extract_extended_pubkey_impl( xpub_extractor: Option, derivation_path: DerivationPath, ) -> MmResult - where - XPubExtractor: HDXPubExtractor + Send, - Coin: HDWalletCoinOps + CoinWithPrivKeyPolicy, +where + XPubExtractor: HDXPubExtractor + Send, + Coin: HDWalletCoinOps + CoinWithPrivKeyPolicy, { match xpub_extractor { Some(xpub_extractor) => { let trezor_coin = coin.trezor_coin()?; let xpub = xpub_extractor.extract_xpub(trezor_coin, derivation_path).await?; Secp256k1ExtendedPublicKey::from_str(&xpub).map_to_mm(|e| HDExtractPubkeyError::InvalidXpub(e.to_string())) - } + }, None => { let mut priv_key = coin .priv_key_policy() @@ -4049,7 +4021,7 @@ pub async fn extract_extended_pubkey_impl( } drop_mutability!(priv_key); Ok(priv_key.public_key()) - } + }, } } @@ -4092,9 +4064,9 @@ pub enum DerivationMethodResponse { /// 2. A hierarchical deterministic (HD) wallet that can derive multiple addresses. #[derive(Debug)] pub enum DerivationMethod - where - HDWallet: HDWalletOps, - HDWalletAddress: Into
, +where + HDWallet: HDWalletOps, + HDWalletAddress: Into
, { /// Represents the use of a single, static address for transactions and operations. SingleAddress(Address), @@ -4107,17 +4079,17 @@ pub enum DerivationMethod } impl DerivationMethod - where - Address: Clone, - HDWallet: HDWalletOps, - HDWalletAddress: Into
, +where + Address: Clone, + HDWallet: HDWalletOps, + HDWalletAddress: Into
, { pub async fn single_addr(&self) -> Option
{ match self { DerivationMethod::SingleAddress(my_address) => Some(my_address.clone()), DerivationMethod::HDWallet(hd_wallet) => { hd_wallet.get_enabled_address().await.map(|addr| addr.address().into()) - } + }, } } @@ -4155,7 +4127,7 @@ impl DerivationMethod Ok(DerivationMethodResponse::HDWallet( enabled_address.derivation_path().to_string(), )) - } + }, } } } @@ -4215,7 +4187,7 @@ pub trait CoinWithDerivationMethod: HDWalletCoinOps { } Ok(all_addresses) - } + }, } } } @@ -4467,14 +4439,14 @@ pub async fn lp_coininit(ctx: &MmArc, ticker: &str, req: &Json) -> Result { let params = try_s!(UtxoActivationParams::from_legacy_req(req)); try_s!(utxo_standard_coin_with_policy(ctx, ticker, &coins_en, ¶ms, priv_key_policy).await).into() - } + }, CoinProtocol::QTUM => { let params = try_s!(UtxoActivationParams::from_legacy_req(req)); try_s!(qtum_coin_with_policy(ctx, ticker, &coins_en, ¶ms, priv_key_policy).await).into() - } + }, CoinProtocol::ETH | CoinProtocol::ERC20 { .. } => { try_s!(eth_coin_from_conf_and_request(ctx, ticker, &coins_en, req, protocol, priv_key_policy).await).into() - } + }, CoinProtocol::QRC20 { platform, contract_address, @@ -4494,15 +4466,15 @@ pub async fn lp_coininit(ctx: &MmArc, ticker: &str, req: &Json) -> Result { let prefix = try_s!(CashAddrPrefix::from_str(slp_prefix)); let params = try_s!(BchActivationRequest::from_legacy_req(req)); let bch = try_s!(bch_coin_with_policy(ctx, ticker, &coins_en, params, prefix, priv_key_policy).await); bch.into() - } + }, CoinProtocol::SLPTOKEN { platform, token_id, @@ -4525,7 +4497,7 @@ pub async fn lp_coininit(ctx: &MmArc, ticker: &str, req: &Json) -> Result return ERR!("TENDERMINT protocol is not supported by lp_coininit"), CoinProtocol::TENDERMINTTOKEN(_) => return ERR!("TENDERMINTTOKEN protocol is not supported by lp_coininit"), CoinProtocol::ZHTLC { .. } => return ERR!("ZHTLC protocol is not supported by lp_coininit"), @@ -4535,15 +4507,15 @@ pub async fn lp_coininit(ctx: &MmArc, ticker: &str, req: &Json) -> Result { return ERR!("Solana protocol is not supported by lp_coininit - use enable_solana_with_tokens instead"); - } + }, #[cfg(all(feature = "enable-solana", not(target_arch = "wasm32")))] CoinProtocol::SPLTOKEN { .. } => { return ERR!("SplToken protocol is not supported by lp_coininit - use enable_spl instead"); - } + }, #[cfg(feature = "enable-sia")] CoinProtocol::SIA { .. } => { return ERR!("SIA protocol is not supported by lp_coininit. Use task::enable_sia::init"); - } + }, }; let register_params = RegisterCoinParams { @@ -4587,7 +4559,7 @@ pub async fn lp_register_coin( match coins.raw_entry_mut().from_key(&ticker) { RawEntryMut::Occupied(_oe) => { return MmError::err(RegisterCoinError::CoinIsInitializedAlready { coin: ticker.clone() }); - } + }, RawEntryMut::Vacant(ve) => ve.insert(ticker.clone(), MmCoinStruct::new(coin.clone())), }; @@ -4796,7 +4768,7 @@ pub async fn remove_delegation(ctx: MmArc, req: RemoveDelegateRequest) -> Delega return MmError::err(DelegationError::CoinDoesntSupportDelegation { coin: coin.ticker().to_string(), }); - } + }, } } @@ -4808,7 +4780,7 @@ pub async fn get_staking_infos(ctx: MmArc, req: GetStakingInfosRequest) -> Staki return MmError::err(StakingInfosError::CoinDoesntSupportStakingInfos { coin: coin.ticker().to_string(), }); - } + }, } } @@ -4821,7 +4793,7 @@ pub async fn add_delegation(ctx: MmArc, req: AddDelegateRequest) -> DelegationRe return MmError::err(DelegationError::CoinDoesntSupportDelegation { coin: coin.ticker().to_string(), }); - } + }, }; match req.staking_details { StakingDetails::Qtum(qtum_staking) => coin_concrete.add_delegation(qtum_staking).compat().await, @@ -4888,7 +4860,7 @@ pub async fn my_tx_history(ctx: MmArc, req: Json) -> Result>, S .position(|item| item.internal_id == *id) .ok_or(format!("from_id {:02x} is not found", id))) + 1 - } + }, None => match request.page_number { Some(page_n) => (page_n.get() - 1) * request.limit, None => 0, @@ -5070,7 +5042,7 @@ pub fn update_coins_config(mut config: Json) -> Result { contract_address, } } - } + }, _ => CoinProtocol::UTXO, }; @@ -5119,7 +5091,7 @@ pub fn address_by_coin_conf_and_pubkey_str( CoinProtocol::ERC20 { .. } | CoinProtocol::ETH | CoinProtocol::NFT { .. } => eth::addr_from_pubkey_str(pubkey), CoinProtocol::UTXO | CoinProtocol::QTUM | CoinProtocol::QRC20 { .. } | CoinProtocol::BCH { .. } => { utxo::address_by_conf_and_pubkey_str(coin, conf, pubkey, addr_format) - } + }, CoinProtocol::SLPTOKEN { platform, .. } => { let platform_conf = coin_conf(ctx, &platform); if platform_conf.is_null() { @@ -5130,10 +5102,10 @@ pub fn address_by_coin_conf_and_pubkey_str( match platform_protocol { CoinProtocol::BCH { slp_prefix } => { slp_addr_from_pubkey_str(pubkey, &slp_prefix).map_err(|e| ERRL!("{}", e)) - } + }, _ => ERR!("Platform protocol {:?} is not BCH", platform_protocol), } - } + }, CoinProtocol::TENDERMINT(protocol) => tendermint::account_id_from_pubkey_hex(&protocol.account_prefix, pubkey) .map(|id| id.to_string()) .map_err(|e| e.to_string()), @@ -5149,18 +5121,18 @@ pub fn address_by_coin_conf_and_pubkey_str( tendermint::account_id_from_pubkey_hex(&platform.account_prefix, pubkey) .map(|id| id.to_string()) .map_err(|e| e.to_string()) - } + }, _ => ERR!("Platform protocol {:?} is not TENDERMINT", platform_protocol), } - } + }, #[cfg(not(target_arch = "wasm32"))] CoinProtocol::LIGHTNING { .. } => { ERR!("address_by_coin_conf_and_pubkey_str is not implemented for lightning protocol yet!") - } + }, #[cfg(all(feature = "enable-solana", not(target_arch = "wasm32")))] CoinProtocol::SOLANA | CoinProtocol::SPLTOKEN { .. } => { ERR!("Solana pubkey is the public address - you do not need to use this rpc call.") - } + }, CoinProtocol::ZHTLC { .. } => ERR!("address_by_coin_conf_and_pubkey_str is not supported for ZHTLC protocol!"), #[cfg(feature = "enable-sia")] CoinProtocol::SIA { .. } => ERR!("address_by_coin_conf_and_pubkey_str is not supported for SIA protocol!"), // TODO Alright @@ -5173,8 +5145,8 @@ async fn load_history_from_file_impl( ctx: &MmArc, db_id: Option<&str>, ) -> TxHistoryResult> - where - T: MmCoin + ?Sized, +where + T: MmCoin + ?Sized, { let ctx = ctx.clone(); let ticker = coin.ticker().to_owned(); @@ -5206,8 +5178,8 @@ async fn load_history_from_file_impl( ctx: &MmArc, db_id: Option<&str>, ) -> TxHistoryResult> - where - T: MmCoin + ?Sized, +where + T: MmCoin + ?Sized, { let ticker = coin.ticker().to_owned(); let history_path = coin.tx_history_path(ctx, db_id); @@ -5218,7 +5190,7 @@ async fn load_history_from_file_impl( Ok(content) => content, Err(err) if err.kind() == io::ErrorKind::NotFound => { return Ok(Vec::new()); - } + }, Err(err) => { let error = format!( "Error '{}' reading from the history file {}", @@ -5226,7 +5198,7 @@ async fn load_history_from_file_impl( history_path.display() ); return MmError::err(TxHistoryError::ErrorLoading(error)); - } + }, }; let serde_err = match json::from_slice(&content) { Ok(txs) => return Ok(txs), @@ -5243,7 +5215,7 @@ async fn load_history_from_file_impl( .map_to_mm(|e| TxHistoryError::ErrorClearing(e.to_string()))?; Ok(Vec::new()) } - .await + .await } #[cfg(target_arch = "wasm32")] @@ -5252,8 +5224,8 @@ async fn save_history_to_file_impl( ctx: &MmArc, mut history: Vec, ) -> TxHistoryResult<()> - where - T: MmCoin + MarketCoinOps + ?Sized, +where + T: MmCoin + MarketCoinOps + ?Sized, { let ctx = ctx.clone(); let ticker = coin.ticker().to_owned(); @@ -5271,8 +5243,8 @@ async fn save_history_to_file_impl( #[cfg(not(target_arch = "wasm32"))] fn get_tx_history_migration_impl(coin: &T, ctx: &MmArc, db_id: Option<&str>) -> TxHistoryFut - where - T: MmCoin + MarketCoinOps + ?Sized, +where + T: MmCoin + MarketCoinOps + ?Sized, { let migration_path = coin.tx_migration_path(ctx, db_id); @@ -5286,7 +5258,7 @@ fn get_tx_history_migration_impl(coin: &T, ctx: &MmArc, db_id: Option<&str>) } else { 0 } - } + }, Err(_) => 0, }; @@ -5298,8 +5270,8 @@ fn get_tx_history_migration_impl(coin: &T, ctx: &MmArc, db_id: Option<&str>) #[cfg(not(target_arch = "wasm32"))] fn update_migration_file_impl(coin: &T, ctx: &MmArc, migration_number: u64, db_id: Option<&str>) -> TxHistoryFut<()> - where - T: MmCoin + MarketCoinOps + ?Sized, +where + T: MmCoin + MarketCoinOps + ?Sized, { let migration_path = coin.tx_migration_path(ctx, db_id); let tmp_file = format!("{}.tmp", migration_path.display()); @@ -5330,8 +5302,8 @@ async fn save_history_to_file_impl( ctx: &MmArc, mut history: Vec, ) -> TxHistoryResult<()> - where - T: MmCoin + MarketCoinOps + ?Sized, +where + T: MmCoin + MarketCoinOps + ?Sized, { let history_path = coin.tx_history_path(ctx, coin.account_db_id().await.as_deref()); let tmp_file = format!("{}.tmp", history_path.display()); @@ -5357,7 +5329,7 @@ async fn save_history_to_file_impl( Ok(()) } - .await + .await } pub(crate) fn compare_transaction_details(a: &TransactionDetails, b: &TransactionDetails) -> Ordering { @@ -5376,8 +5348,8 @@ impl TxIdHeight { } pub(crate) fn compare_transactions(a: TxIdHeight, b: TxIdHeight) -> Ordering - where - Id: Ord, +where + Id: Ord, { // the transactions with block_height == 0 are the most recent so we need to separately handle them while sorting if a.block_height == b.block_height { @@ -5417,7 +5389,7 @@ pub async fn get_my_address(ctx: MmArc, req: MyAddressReq) -> MmResult { eth_coin.set_swap_transaction_fee_policy(req.swap_tx_fee_policy); Ok(eth_coin.get_swap_transaction_fee_policy()) - } + }, MmCoinEnum::Qrc20Coin(qrc20_coin) => { qrc20_coin.set_swap_transaction_fee_policy(req.swap_tx_fee_policy); Ok(qrc20_coin.get_swap_transaction_fee_policy()) - } + }, _ => MmError::err(SwapTxFeePolicyError::NotSupported(req.coin)), } } @@ -5502,8 +5474,8 @@ pub async fn scan_for_new_addresses_impl( chain: Bip44Chain, gap_limit: u32, ) -> BalanceResult>>> - where - T: HDWalletBalanceOps + Sync, +where + T: HDWalletBalanceOps + Sync, { let mut balances = Vec::with_capacity(gap_limit as usize); @@ -5550,7 +5522,7 @@ pub async fn scan_for_new_addresses_impl( }); // Reset the counter of unused addresses to zero since we found a non-empty address. unused_addresses_counter = 0; - } + }, AddressBalanceStatus::NotUsed => unused_addresses_counter += 1, } @@ -5563,7 +5535,7 @@ pub async fn scan_for_new_addresses_impl( chain, checking_address_id - unused_addresses_counter, ) - .await?; + .await?; Ok(balances) } @@ -5670,7 +5642,7 @@ pub mod for_tests { task_id: init.task_id, forget_if_finished: true, }) - .await; + .await; if let Ok(status) = status { match status { RpcTaskStatus::Ok(tx_details) => break Ok(tx_details), From 1675a53752aaa5d3a01a5f88566592b42264f990 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 24 Jul 2024 18:51:28 +0100 Subject: [PATCH 174/186] fix review notes and remove left over --- mm2src/coins/eth/v2_activation.rs | 13 ++++++++----- mm2src/mm2_db/src/indexed_db/indexed_db.rs | 18 +++--------------- mm2src/mm2_main/src/lp_native_dex.rs | 15 ++++++++++++--- mm2src/mm2_main/src/lp_ordermatch.rs | 8 ++++++++ 4 files changed, 31 insertions(+), 23 deletions(-) diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 34421d0fd5..6a7328931a 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -742,7 +742,9 @@ pub(crate) async fn build_address_and_priv_key_policy( #[cfg(not(target_arch = "wasm32"))] { - let pubkey = dhash160(activated_key.public().as_bytes()).to_string(); + // Skip the first byte of the uncompressed public key before converting to the eth address. + let pubkey = Public::from_slice(&activated_key.public().as_bytes()[1..]); + let pubkey = public_to_address(&pubkey).to_string(); run_db_migration_for_new_pubkey(ctx, pubkey) .await .map_to_mm(EthActivationV2Error::InternalError)?; @@ -1013,10 +1015,11 @@ pub(super) async fn eth_shared_db_id(coin: &EthCoin, ctx: &MmArc) -> Option Option { match coin.derivation_method() { - DerivationMethod::HDWallet(hd_wallet) => hd_wallet - .get_enabled_address() - .await - .map(|addr| dhash160(addr.pubkey().as_bytes()).to_string()), + DerivationMethod::HDWallet(hd_wallet) => hd_wallet.get_enabled_address().await.map(|addr| { + // Skip the first byte of the uncompressed public key before converting to the eth address. + let pubkey = Public::from_slice(&addr.pubkey().as_bytes()[1..]); + public_to_address(&pubkey).to_string() + }), _ => None, } } diff --git a/mm2src/mm2_db/src/indexed_db/indexed_db.rs b/mm2src/mm2_db/src/indexed_db/indexed_db.rs index 75f2b6a821..21672fbbf8 100644 --- a/mm2src/mm2_db/src/indexed_db/indexed_db.rs +++ b/mm2src/mm2_db/src/indexed_db/indexed_db.rs @@ -111,28 +111,20 @@ impl DbIdentifier { } pub fn display_db_id(&self) -> String { self.db_id.clone().unwrap_or_else(|| "KOMODEFI".to_string()) } - - pub fn db_id(&self) -> String { - self.db_id - .clone() - .unwrap_or_else(|| hex::encode(H160::default().as_slice())) - } } pub struct IndexedDbBuilder { pub db_name: String, pub db_version: u32, pub tables: HashMap, - pub db_id: String, } impl IndexedDbBuilder { - pub fn new(db_id: DbIdentifier) -> IndexedDbBuilder { + pub fn new(db_ident: DbIdentifier) -> IndexedDbBuilder { IndexedDbBuilder { - db_name: db_id.to_string(), + db_name: db_ident.to_string(), db_version: 1, tables: HashMap::new(), - db_id: db_id.db_id(), } } @@ -148,13 +140,12 @@ impl IndexedDbBuilder { } pub async fn build(self) -> InitDbResult { - let db_id = self.db_id.clone(); let (init_tx, init_rx) = oneshot::channel(); let (event_tx, event_rx) = mpsc::unbounded(); self.init_and_spawn(init_tx, event_rx); init_rx.await.expect("The init channel must not be closed")?; - Ok(IndexedDb { event_tx, db_id }) + Ok(IndexedDb { event_tx }) } fn init_and_spawn( @@ -190,7 +181,6 @@ impl IndexedDbBuilder { pub struct IndexedDb { event_tx: DbEventTx, - db_id: String, } async fn send_event_recv_response( @@ -248,8 +238,6 @@ impl IndexedDb { // ignore if the receiver is closed result_tx.send(Ok(transaction_event_tx)).ok(); } - - pub fn get_db_id(&self) -> String { self.db_id.to_string() } } pub struct DbTransaction<'transaction> { diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 959ab37f81..4eea14dc37 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -460,7 +460,7 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { let mut migrations = HashSet::new(); let mut receiver = ctx .init_db_migration_watcher() - .expect("db_m igration_watcher initialization failed"); + .expect("db_migration_watcher initialization failed"); while let Some(db_id) = receiver.next().await { if migrations.contains(&db_id) { @@ -468,13 +468,13 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { continue; } - // run db migration for db_id if new activated pubkey is unique. + // run db migration for new db_id. if let Err(err) = run_db_migration_impl(&ctx, Some(&db_id), None).await { error!("{err:?}"); continue; }; - // insert new db_id to migration list + // insert new db_id to migrated list migrations.insert(db_id.to_owned()); // Fetch and extend ctx.coins_needed_for_kick_start from new intialized db. @@ -500,6 +500,15 @@ async fn run_db_migration_impl(ctx: &MmArc, db_id: Option<&str>, shared_db_id: O } pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { + #[cfg(not(target_arch = "wasm32"))] + { + let dbdir = ctx.dbdir(None); + fs::create_dir_all(&dbdir).map_to_mm(|e| MmInitError::ErrorCreatingDbDir { + path: dbdir.clone(), + error: e.to_string(), + })?; + } + init_ordermatch_context(&ctx)?; init_p2p(ctx.clone()).await?; diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 18ce367371..6f0586ea29 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -5428,6 +5428,10 @@ pub async fn orders_kick_start(ctx: &MmArc, db_id: Option<&str>) -> Result) -> Result Date: Sun, 28 Jul 2024 16:16:35 +0100 Subject: [PATCH 175/186] use display_eth_address --- mm2src/coins/eth/v2_activation.rs | 4 ++-- mm2src/mm2_main/src/lp_native_dex.rs | 17 ++++++++--------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 6a7328931a..fe508c9ecf 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -745,7 +745,7 @@ pub(crate) async fn build_address_and_priv_key_policy( // Skip the first byte of the uncompressed public key before converting to the eth address. let pubkey = Public::from_slice(&activated_key.public().as_bytes()[1..]); let pubkey = public_to_address(&pubkey).to_string(); - run_db_migration_for_new_pubkey(ctx, pubkey) + run_db_migration_for_new_pubkey(ctx, display_eth_address(&pubkey)) .await .map_to_mm(EthActivationV2Error::InternalError)?; } @@ -1018,7 +1018,7 @@ pub(super) async fn eth_account_db_id(coin: &EthCoin) -> Option { DerivationMethod::HDWallet(hd_wallet) => hd_wallet.get_enabled_address().await.map(|addr| { // Skip the first byte of the uncompressed public key before converting to the eth address. let pubkey = Public::from_slice(&addr.pubkey().as_bytes()[1..]); - public_to_address(&pubkey).to_string() + display_eth_address(&public_to_address(&pubkey)) }), _ => None, } diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 4eea14dc37..fea6db6599 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -500,15 +500,6 @@ async fn run_db_migration_impl(ctx: &MmArc, db_id: Option<&str>, shared_db_id: O } pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { - #[cfg(not(target_arch = "wasm32"))] - { - let dbdir = ctx.dbdir(None); - fs::create_dir_all(&dbdir).map_to_mm(|e| MmInitError::ErrorCreatingDbDir { - path: dbdir.clone(), - error: e.to_string(), - })?; - } - init_ordermatch_context(&ctx)?; init_p2p(ctx.clone()).await?; @@ -549,6 +540,14 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { pub async fn lp_init(ctx: MmArc, version: String, datetime: String) -> MmInitResult<()> { info!("Version: {} DT {}", version, datetime); + #[cfg(not(target_arch = "wasm32"))] + { + let dbdir = ctx.dbdir(None); + fs::create_dir_all(&dbdir).map_to_mm(|e| MmInitError::ErrorCreatingDbDir { + path: dbdir.clone(), + error: e.to_string(), + })?; + } // This either initializes the cryptographic context or sets up the context for "no login mode". initialize_wallet_passphrase(&ctx).await?; From 5f72e83ba2b28dce7cea5e3f2a21689b0de0c7ce Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Mon, 29 Jul 2024 11:30:08 +0100 Subject: [PATCH 176/186] minor changes and improvements --- mm2src/coins/eth/v2_activation.rs | 2 +- mm2src/kdf_wc_client/Cargo.toml | 46 ++++++++++++++++++++ mm2src/mm2_core/src/sql_connection_pool.rs | 49 ++++++++++++---------- mm2src/mm2_main/src/lp_native_dex.rs | 12 +++--- mm2src/mm2_main/src/lp_ordermatch.rs | 8 ---- 5 files changed, 81 insertions(+), 36 deletions(-) create mode 100644 mm2src/kdf_wc_client/Cargo.toml diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index fe508c9ecf..605740e110 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -744,7 +744,7 @@ pub(crate) async fn build_address_and_priv_key_policy( { // Skip the first byte of the uncompressed public key before converting to the eth address. let pubkey = Public::from_slice(&activated_key.public().as_bytes()[1..]); - let pubkey = public_to_address(&pubkey).to_string(); + let pubkey = public_to_address(&pubkey); run_db_migration_for_new_pubkey(ctx, display_eth_address(&pubkey)) .await .map_to_mm(EthActivationV2Error::InternalError)?; diff --git a/mm2src/kdf_wc_client/Cargo.toml b/mm2src/kdf_wc_client/Cargo.toml new file mode 100644 index 0000000000..fe209dbda8 --- /dev/null +++ b/mm2src/kdf_wc_client/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "kdf_wc_client" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +cfg-if = "1.0" +common = { path = "../common" } +data-encoding = "2.6.0" +derive_more = "0.99" +enum_derives = { path = "../derives/enum_derives" } +futures = { version = "0.3", package = "futures", features = [ + "compat", + "async-await", +] } +futures-util = { version = "0.3", default-features = false, features = [ + "sink", + "std", +] } +http = "1.0.0" +mm2_core = { path = "../mm2_core" } +url = "2.3" +relay_rpc = { git = "https://github.com/borngraced/WalletConnectRust.git" } +rand = { version = "0.8.5", features = ["std", "small_rng"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +serde_qs = "0.10" +chrono = { version = "0.4", default-features = false, features = [ + "alloc", + "std", + "wasmbind", +] } +tokio = { version = "1.22", features = ["sync", "macros"] } +tokio-tungstenite-wasm = { git = "https://github.com/KomodoPlatform/tokio-tungstenite-wasm.git", rev = "8fc7e2f", features = [ + "rustls-tls-native-roots", +] } +pin-project = "1.1.2" + +[target.'cfg(target_arch = "wasm32")'.dependencies] +js-sys = "0.3.27" +wasm-bindgen = "0.2.86" +wasm-bindgen-test = { version = "0.3.2" } +wasm-bindgen-futures = "0.4.21" +web-sys = { version = "0.3.55", features = ["WebSocket"] } diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index ffd7cda552..4f4698dfae 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -40,8 +40,8 @@ impl SqliteConnPool { } /// Initializes a shared database connection. - pub fn init_shared(ctx: &MmCtx, db_id: Option<&str>) -> Result<(), String> { - Self::init_impl(ctx, db_id, DbIdConnKind::Shared) + pub fn init_shared(ctx: &MmCtx) -> Result<(), String> { + Self::init_impl(ctx, None, DbIdConnKind::Shared) } /// Internal implementation to initialize a database connection. @@ -54,11 +54,12 @@ impl SqliteConnPool { // Connection pool is already initialized, insert new connection. if let Some(pool) = ctx.sqlite_conn_pool.as_option() { - let conns = pool.connections.read().unwrap(); - if conns.get(&db_id).is_some() { - return Ok(()); + { + let conns = pool.connections.read().unwrap(); + if conns.get(&db_id).is_some() { + return Ok(()); + } } - drop(conns); let conn = Self::open_connection(sqlite_file_path); let mut pool = pool.connections.write().unwrap(); @@ -124,11 +125,12 @@ impl SqliteConnPool { /// Internal implementation to retrieve or create a connection. fn sqlite_conn_impl(&self, db_id: Option<&str>, kind: DbIdConnKind) -> Arc> { let db_id = self.db_id(db_id, &kind); - let connections = self.connections.read().unwrap(); - if let Some(connection) = connections.get(&db_id) { - return Arc::clone(connection); + { + let connections = self.connections.read().unwrap(); + if let Some(connection) = connections.get(&db_id) { + return Arc::clone(connection); + } } - drop(connections); let mut connections = self.connections.write().unwrap(); let sqlite_file_path = self.sqlite_file_path(&db_id, &kind); @@ -201,9 +203,11 @@ impl AsyncSqliteConnPool { let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160.to_string()); if let Some(pool) = ctx.async_sqlite_conn_pool.as_option() { - let conns = pool.connections.read().await; - if conns.get(&db_id).is_some() { - return Ok(()); + { + let conns = pool.connections.read().await; + if conns.get(&db_id).is_some() { + return Ok(()); + } } let conn = Self::open_connection(&pool.sqlite_file_path).await; @@ -230,11 +234,12 @@ impl AsyncSqliteConnPool { let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160.to_string()); if let Some(pool) = ctx.async_sqlite_conn_pool.as_option() { - let conns = pool.connections.read().await; - if conns.get(&db_id).is_some() { - return Ok(()); + { + let conns = pool.connections.read().await; + if conns.get(&db_id).is_some() { + return Ok(()); + } } - drop(conns); let mut pool = pool.connections.write().await; let conn = Arc::new(AsyncMutex::new(AsyncConnection::open_in_memory().await.unwrap())); @@ -260,10 +265,12 @@ impl AsyncSqliteConnPool { pub async fn async_sqlite_conn(&self, db_id: Option<&str>) -> Arc> { let db_id = db_id.unwrap_or(&self.default_db_id); - let connections = self.connections.read().await; - if let Some(connection) = connections.get(db_id) { - return Arc::clone(connection); - }; + { + let connections = self.connections.read().await; + if let Some(connection) = connections.get(db_id) { + return Arc::clone(connection); + }; + } let mut connections = self.connections.write().await; let connection = Self::open_connection(&self.sqlite_file_path).await; diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index fea6db6599..1028cc23ed 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -469,7 +469,7 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { } // run db migration for new db_id. - if let Err(err) = run_db_migration_impl(&ctx, Some(&db_id), None).await { + if let Err(err) = run_db_migration_impl(&ctx, Some(&db_id)).await { error!("{err:?}"); continue; }; @@ -486,13 +486,12 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { } #[cfg(not(target_arch = "wasm32"))] -async fn run_db_migration_impl(ctx: &MmArc, db_id: Option<&str>, shared_db_id: Option<&str>) -> MmInitResult<()> { - fix_directories(ctx, db_id, shared_db_id)?; +async fn run_db_migration_impl(ctx: &MmArc, db_id: Option<&str>,) -> MmInitResult<()> { + fix_directories(ctx, db_id, None)?; AsyncSqliteConnPool::init(ctx, db_id) .await .map_to_mm(MmInitError::ErrorSqliteInitializing)?; SqliteConnPool::init(ctx, db_id).map_to_mm(MmInitError::ErrorSqliteInitializing)?; - SqliteConnPool::init_shared(ctx, shared_db_id).map_to_mm(MmInitError::ErrorSqliteInitializing)?; init_and_migrate_sql_db(ctx, db_id).await?; migrate_db(ctx, db_id)?; @@ -509,7 +508,8 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { #[cfg(not(target_arch = "wasm32"))] { - run_db_migration_impl(&ctx, None, None).await?; + run_db_migration_impl(&ctx, None).await?; + SqliteConnPool::init_shared(&ctx).map_to_mm(MmInitError::ErrorSqliteInitializing)?; ctx.spawner().spawn(init_db_migration_watcher_loop(ctx.clone())); } @@ -557,8 +557,8 @@ pub async fn lp_init(ctx: MmArc, version: String, datetime: String) -> MmInitRes let ctx_id = ctx.ffi_handle().map_to_mm(MmInitError::Internal)?; spawn_rpc(ctx_id); - let ctx_c = ctx.clone(); + let ctx_c = ctx.clone(); ctx.spawner().spawn(async move { if let Err(err) = ctx_c.init_metrics() { warn!("Couldn't initialize metrics system: {}", err); diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 6f0586ea29..18ce367371 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -5428,10 +5428,6 @@ pub async fn orders_kick_start(ctx: &MmArc, db_id: Option<&str>) -> Result) -> Result Date: Mon, 29 Jul 2024 12:31:51 +0100 Subject: [PATCH 177/186] cargo fmt --- mm2src/coins/nft.rs | 120 ++++++++++----------- mm2src/mm2_core/src/sql_connection_pool.rs | 4 +- mm2src/mm2_main/src/lp_native_dex.rs | 2 +- 3 files changed, 61 insertions(+), 65 deletions(-) diff --git a/mm2src/coins/nft.rs b/mm2src/coins/nft.rs index 2bd5a1586d..8b5b8460b3 100644 --- a/mm2src/coins/nft.rs +++ b/mm2src/coins/nft.rs @@ -6,8 +6,7 @@ pub(crate) mod nft_errors; pub mod nft_structs; pub(crate) mod storage; -#[cfg(any(test, target_arch = "wasm32"))] -mod nft_tests; +#[cfg(any(test, target_arch = "wasm32"))] mod nft_tests; use crate::{coin_conf, get_my_address, lp_coinfind_or_err, CoinsContext, HDPathAccountToAddressId, MarketCoinOps, MmCoinEnum, MmCoinStruct, MyAddressReq, WithdrawError}; @@ -91,7 +90,7 @@ pub async fn get_nft_list(ctx: MmArc, req: NftListReq) -> MmResult .map_to_mm(GetNftInfoError::Internal)?; let get_nfts = - |id: String, chains: Vec| -> Pin> + Send>> { + |id: String, chains: Vec| -> Pin> + Send>> { let ctx_clone = ctx.clone(); let req = req.clone(); @@ -202,7 +201,7 @@ pub async fn get_nft_transfers( let get_nft_transfers = |db_id: String, chains: Vec| - -> Pin> + Send>> { + -> Pin> + Send>> { let ctx = ctx.clone(); let req = req.clone(); @@ -267,7 +266,7 @@ async fn process_transfers_confirmations( MmCoinEnum::EthCoin(eth_coin) => { let current_block = current_block_impl(eth_coin).await?; Ok((ticker, current_block)) - } + }, _ => MmError::err(TransferConfirmationsError::CoinDoesntSupportNft { coin: coin_enum.ticker().to_owned(), }), @@ -300,7 +299,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft .map_to_mm(UpdateNftError::Internal)?; let futures = - |db_id: String, chains: Vec| -> Pin> + Send>> { + |db_id: String, chains: Vec| -> Pin> + Send>> { let ctx = ctx.clone(); let req = req.clone(); Box::pin(async move { @@ -327,7 +326,7 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft return MmError::err(UpdateNftError::CoinDoesntSupportNft { coin: coin_enum.ticker().to_owned(), }); - } + }, }; let my_address = eth_coin.my_address()?; let signed_message = @@ -345,30 +344,29 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft Ok(Some(block)) => block, Ok(None) => { // if there are no rows in NFT LIST table we can try to get nft list from moralis. - let nft_list = - cache_nfts_from_moralis(&ctx, &storage, &wrapper).await?; + let nft_list = cache_nfts_from_moralis(&ctx, &storage, &wrapper).await?; update_meta_in_transfers(&storage, chain, nft_list).await?; update_transfers_with_empty_meta(&storage, &wrapper).await?; update_spam(&storage, *chain, &req.url_antispam).await?; update_phishing(&storage, chain, &req.url_antispam).await?; continue; - } + }, Err(_) => { // if there is an error, then NFT LIST table doesnt exist, so we need to cache nft list from moralis. NftListStorageOps::init(&storage, chain).await?; - let nft_list = - cache_nfts_from_moralis(&ctx, &storage, &wrapper).await?; + let nft_list = cache_nfts_from_moralis(&ctx, &storage, &wrapper).await?; update_meta_in_transfers(&storage, chain, nft_list).await?; update_transfers_with_empty_meta(&storage, &wrapper).await?; update_spam(&storage, *chain, &req.url_antispam).await?; update_phishing(&storage, chain, &req.url_antispam).await?; continue; - } + }, }; - let scanned_block = storage.get_last_scanned_block(chain).await?.ok_or_else(|| + let scanned_block = storage.get_last_scanned_block(chain).await?.ok_or_else(|| { UpdateNftError::LastScannedBlockNotFound { last_nft_block: nft_block.to_string(), - })?; + } + })?; // if both block numbers exist, last scanned block should be equal // or higher than last block number from NFT LIST table. if scanned_block < nft_block { @@ -409,17 +407,17 @@ pub async fn update_nft(ctx: MmArc, req: UpdateNftReq) -> MmResult<(), UpdateNft /// This function uses the up-to-date NFT list for a given chain and updates the /// corresponding global NFT information in the coins context. async fn update_nft_global_in_coins_ctx(ctx: &MmArc, storage: &T, chain: Chain) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let coins_ctx = CoinsContext::from_ctx(ctx).map_to_mm(UpdateNftError::Internal)?; let mut coins = coins_ctx.lock_coins().await; let ticker = chain.to_nft_ticker(); if let Some(MmCoinStruct { - inner: MmCoinEnum::EthCoin(nft_global), - .. - }) = coins.get_mut(ticker) + inner: MmCoinEnum::EthCoin(nft_global), + .. + }) = coins.get_mut(ticker) { let nft_list = storage.get_nft_list(vec![chain], true, 1, None, None).await?; update_nft_infos(nft_global, nft_list.nfts).await; @@ -463,8 +461,8 @@ async fn update_nft_infos(nft_global: &mut EthCoin, nft_list: Vec) { /// `update_spam` function updates spam contracts info in NFT list and NFT transfers. async fn update_spam(storage: &T, chain: Chain, url_antispam: &Url) -> MmResult<(), UpdateSpamPhishingError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let token_addresses = storage.get_token_addresses(chain).await?; if !token_addresses.is_empty() { @@ -490,8 +488,8 @@ async fn update_spam(storage: &T, chain: Chain, url_antispam: &Url) -> MmResu } async fn update_phishing(storage: &T, chain: &Chain, url_antispam: &Url) -> MmResult<(), UpdateSpamPhishingError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let transfer_domains = storage.get_domains(chain).await?; let nft_domains = storage.get_animation_external_domains(chain).await?; @@ -583,7 +581,7 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu return MmError::err(UpdateNftError::CoinDoesntSupportNft { coin: coin_enum.ticker().to_owned(), }); - } + }, }; let my_address = eth_coin.my_address()?; let signed_message = @@ -606,7 +604,7 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu .update_transfer_spam_by_token_address(&req.chain, token_address_str.clone(), true) .await?; return Ok(()); - } + }, }; let mut nft_db = storage .get_nft(&req.chain, token_address_str.clone(), req.token_id.clone()) @@ -626,7 +624,7 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu moralis_meta.common.possible_spam, nft_db.possible_phishing, ) - .await; + .await; // Gather domains for phishing checks let domains = gather_domains(&token_domain, &uri_meta); nft_db.common.collection_name = moralis_meta.common.collection_name; @@ -654,8 +652,8 @@ pub async fn refresh_nft_metadata(ctx: MmArc, req: RefreshMetadataReq) -> MmResu /// The `update_transfer_meta_using_nft` function updates the transfer metadata associated with the given NFT. /// If metadata info contains potential spam links, function sets `possible_spam` true. async fn update_transfer_meta_using_nft(storage: &T, chain: &Chain, nft: &mut Nft) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let transfer_meta = TransferMeta::from(nft.clone()); storage @@ -689,8 +687,8 @@ async fn refresh_possible_spam( nft_db: &mut Nft, url_antispam: &Url, ) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let address_hex = eth_addr_to_hex(&nft_db.common.token_address); let spam_res = send_spam_request(chain, url_antispam, address_hex.clone()).await?; @@ -714,8 +712,8 @@ async fn refresh_possible_phishing( nft_db: &mut Nft, url_antispam: &Url, ) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { if !domains.is_empty() { let domain_list = domains.into_iter().collect::>().join(","); @@ -961,7 +959,7 @@ async fn get_fee_details(eth_coin: &EthCoin, transaction_hash: &str) -> Option { let web3_tx = eth_coin .web3() @@ -977,10 +975,10 @@ async fn get_fee_details(eth_coin: &EthCoin, transaction_hash: &str) -> Option None, } } @@ -1145,11 +1143,11 @@ async fn handle_nft_transfer handle_send_erc721(storage, chain, transfer).await, (TransferStatus::Receive, ContractType::Erc721) => { handle_receive_erc721(storage, transfer, wrapper, my_address).await - } + }, (TransferStatus::Send, ContractType::Erc1155) => handle_send_erc1155(storage, chain, transfer).await, (TransferStatus::Receive, ContractType::Erc1155) => { handle_receive_erc1155(storage, transfer, wrapper, my_address).await - } + }, } } @@ -1205,7 +1203,7 @@ async fn handle_receive_erc721 { let mut nft = match get_moralis_metadata(token_address_str.clone(), transfer.token_id.clone(), wrapper) .await @@ -1217,16 +1215,16 @@ async fn handle_receive_erc721 { mark_as_spam_and_build_empty_meta(storage, chain, token_address_str, &transfer, my_address).await? - } + }, }; storage .add_nfts_to_list(*chain, vec![nft.clone()], transfer.block_number) .await?; update_transfer_meta_using_nft(storage, chain, &mut nft).await?; - } + }, } Ok(()) } @@ -1249,19 +1247,19 @@ async fn handle_send_erc1155 { nft_db.common.amount -= transfer.common.amount; storage .update_nft_amount(chain, nft_db.clone(), transfer.block_number) .await?; - } + }, Ordering::Less => { return MmError::err(UpdateNftError::InsufficientAmountInCache { amount_list: nft_db.common.amount.to_string(), amount_history: transfer.common.amount.to_string(), }); - } + }, } Ok(()) } @@ -1290,23 +1288,23 @@ async fn handle_receive_erc1155 { let nft = match get_moralis_metadata(token_address_str.clone(), transfer.token_id.clone(), wrapper).await { Ok(moralis_meta) => { create_nft_from_moralis_metadata(moralis_meta, &transfer, my_address, chain, wrapper.url_antispam) .await? - } + }, Err(_) => { mark_as_spam_and_build_empty_meta(storage, chain, token_address_str, &transfer, my_address).await? - } + }, }; storage .add_nfts_to_list(*chain, [nft.clone()], transfer.block_number) .await?; nft - } + }, }; update_transfer_meta_using_nft(storage, chain, &mut nft).await?; Ok(()) @@ -1339,7 +1337,7 @@ async fn create_nft_from_moralis_metadata( moralis_meta.common.possible_spam, moralis_meta.possible_phishing, ) - .await; + .await; let nft = Nft { common: NftCommon { token_address: moralis_meta.common.token_address, @@ -1410,8 +1408,8 @@ async fn cache_nfts_from_moralis(storage: &T, chain: &Chain, nfts: Vec) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { for mut nft in nfts.into_iter() { update_transfer_meta_using_nft(storage, chain, &mut nft).await?; @@ -1421,8 +1419,8 @@ async fn update_meta_in_transfers(storage: &T, chain: &Chain, nfts: Vec) /// `update_transfers_with_empty_meta` function updates empty metadata in transfers. async fn update_transfers_with_empty_meta(storage: &T, wrapper: &UrlSignWrapper<'_>) -> MmResult<(), UpdateNftError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let chain = wrapper.chain; let token_addr_id = storage.get_transfers_with_empty_meta(*chain).await?; @@ -1438,7 +1436,7 @@ async fn update_transfers_with_empty_meta(storage: &T, wrapper: &UrlSignWrapp .update_transfer_spam_by_token_address(chain, addr_id_pair.token_address, true) .await?; continue; - } + }, }; update_transfer_meta_using_nft(storage, chain, &mut nft_meta).await?; } @@ -1475,7 +1473,7 @@ fn process_text_for_spam_link(text: &mut Option, redact: bool) -> Result *text = Some("URL redacted for user protection".to_string()); } Ok(true) - } + }, _ => Ok(false), } } @@ -1554,7 +1552,7 @@ fn process_metadata_field( ); } Ok(true) - } + }, _ => Ok(false), } } @@ -1576,7 +1574,7 @@ async fn build_nft_from_moralis( nft_moralis.common.possible_spam, false, ) - .await; + .await; let token_domain = get_domain_from_url(token_uri.as_deref()); Nft { common: NftCommon { @@ -1659,8 +1657,8 @@ pub async fn clear_nft_db(ctx: MmArc, req: ClearNftDbReq) -> MmResult<(), ClearN } async fn clear_data_for_chain(storage: &T, chain: &Chain) -> MmResult<(), ClearNftDbError> - where - T: NftListStorageOps + NftTransferHistoryStorageOps, +where + T: NftListStorageOps + NftTransferHistoryStorageOps, { let (is_nft_list_init, is_history_init) = ( NftListStorageOps::is_initialized(storage, chain).await?, diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 4f4698dfae..5de3cd83e8 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -40,9 +40,7 @@ impl SqliteConnPool { } /// Initializes a shared database connection. - pub fn init_shared(ctx: &MmCtx) -> Result<(), String> { - Self::init_impl(ctx, None, DbIdConnKind::Shared) - } + pub fn init_shared(ctx: &MmCtx) -> Result<(), String> { Self::init_impl(ctx, None, DbIdConnKind::Shared) } /// Internal implementation to initialize a database connection. fn init_impl(ctx: &MmCtx, db_id: Option<&str>, kind: DbIdConnKind) -> Result<(), String> { diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 1028cc23ed..18cf7f99a0 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -486,7 +486,7 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { } #[cfg(not(target_arch = "wasm32"))] -async fn run_db_migration_impl(ctx: &MmArc, db_id: Option<&str>,) -> MmInitResult<()> { +async fn run_db_migration_impl(ctx: &MmArc, db_id: Option<&str>) -> MmInitResult<()> { fix_directories(ctx, db_id, None)?; AsyncSqliteConnPool::init(ctx, db_id) .await From 34f9d2762eccae3d96127f7f712307c14b6d932d Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 30 Jul 2024 15:15:38 +0100 Subject: [PATCH 178/186] allow swap continuation with correct db_id on swap restart for swap_v2 --- mm2src/mm2_core/src/sql_connection_pool.rs | 21 +++++++------ mm2src/mm2_main/src/database/my_swaps.rs | 4 ++- mm2src/mm2_main/src/lp_native_dex.rs | 15 ++++++--- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 14 ++++++++- mm2src/mm2_main/src/lp_swap/swap_v2_common.rs | 31 ++++++++++++++++--- mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 14 ++++++++- .../src/storable_state_machine.rs | 6 ++-- 7 files changed, 81 insertions(+), 24 deletions(-) diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 5de3cd83e8..98e780ed5a 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -1,4 +1,5 @@ -use crate::mm_ctx::{log_sqlite_file_open_attempt, path_to_dbdir, MmArc, MmCtx}; +use crate::mm_ctx::MmCtx; +use crate::mm_ctx::{log_sqlite_file_open_attempt, path_to_dbdir, MmArc}; use async_std::sync::RwLock as AsyncRwLock; use common::log::error; use common::log::info; @@ -35,15 +36,15 @@ pub struct SqliteConnPool { impl SqliteConnPool { /// Initializes a single-user database connection. - pub fn init(ctx: &MmCtx, db_id: Option<&str>) -> Result<(), String> { + pub fn init(ctx: &MmArc, db_id: Option<&str>) -> Result<(), String> { Self::init_impl(ctx, db_id, DbIdConnKind::Single) } /// Initializes a shared database connection. - pub fn init_shared(ctx: &MmCtx) -> Result<(), String> { Self::init_impl(ctx, None, DbIdConnKind::Shared) } + pub fn init_shared(ctx: &MmArc) -> Result<(), String> { Self::init_impl(ctx, None, DbIdConnKind::Shared) } /// Internal implementation to initialize a database connection. - fn init_impl(ctx: &MmCtx, db_id: Option<&str>, kind: DbIdConnKind) -> Result<(), String> { + fn init_impl(ctx: &MmArc, db_id: Option<&str>, kind: DbIdConnKind) -> Result<(), String> { let db_id = Self::db_id_from_ctx(ctx, db_id, &kind); let sqlite_file_path = match kind { DbIdConnKind::Shared => ctx.shared_dbdir(Some(&db_id)).join(SQLITE_SHARED_DB_ID), @@ -81,13 +82,13 @@ impl SqliteConnPool { } /// Test method for initializing a single-user database connection in-memory. - pub fn init_test(ctx: &MmCtx) -> Result<(), String> { Self::init_impl_test(ctx, None, DbIdConnKind::Single) } + pub fn init_test(ctx: &MmArc) -> Result<(), String> { Self::init_impl_test(ctx, None, DbIdConnKind::Single) } /// Test method for initializing a shared database connection in-memory. - pub fn init_shared_test(ctx: &MmCtx) -> Result<(), String> { Self::init_impl_test(ctx, None, DbIdConnKind::Shared) } + pub fn init_shared_test(ctx: &MmArc) -> Result<(), String> { Self::init_impl_test(ctx, None, DbIdConnKind::Shared) } /// Internal test implementation to initialize a database connection in-memory. - fn init_impl_test(ctx: &MmCtx, db_id: Option<&str>, kind: DbIdConnKind) -> Result<(), String> { + fn init_impl_test(ctx: &MmArc, db_id: Option<&str>, kind: DbIdConnKind) -> Result<(), String> { let db_id = Self::db_id_from_ctx(ctx, db_id, &kind); if let Some(pool) = ctx.sqlite_conn_pool.as_option() { let connection = Arc::new(Mutex::new(Connection::open_in_memory().unwrap())); @@ -171,7 +172,7 @@ impl SqliteConnPool { .unwrap_or_else(|| self.default_db_id.to_owned()), } } - fn db_id_from_ctx(ctx: &MmCtx, db_id: Option<&str>, kind: &DbIdConnKind) -> String { + fn db_id_from_ctx(ctx: &MmArc, db_id: Option<&str>, kind: &DbIdConnKind) -> String { match kind { DbIdConnKind::Shared => db_id .map(|e| e.to_owned()) @@ -197,7 +198,7 @@ pub struct AsyncSqliteConnPool { impl AsyncSqliteConnPool { /// Initialize a database connection. - pub async fn init(ctx: &MmCtx, db_id: Option<&str>) -> Result<(), String> { + pub async fn init(ctx: &MmArc, db_id: Option<&str>) -> Result<(), String> { let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160.to_string()); if let Some(pool) = ctx.async_sqlite_conn_pool.as_option() { @@ -228,7 +229,7 @@ impl AsyncSqliteConnPool { } /// Initialize a database connection. - pub async fn init_test(ctx: &MmCtx, db_id: Option<&str>) -> Result<(), String> { + pub async fn init_test(ctx: &MmArc, db_id: Option<&str>) -> Result<(), String> { let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160.to_string()); if let Some(pool) = ctx.async_sqlite_conn_pool.as_option() { diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index c58eefe7a3..e415ab0250 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -343,7 +343,9 @@ pub const SELECT_MY_SWAP_V2_BY_UUID: &str = r#"SELECT taker_coin_confs, taker_coin_nota, p2p_privkey, - other_p2p_pub + other_p2p_pub, + taker_coin_db_id, + maker_coin_db_id FROM my_swaps WHERE uuid = :uuid; "#; diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 18cf7f99a0..9284f3b134 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -469,7 +469,7 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { } // run db migration for new db_id. - if let Err(err) = run_db_migration_impl(&ctx, Some(&db_id)).await { + if let Err(err) = run_db_migration_impl(&ctx, Some(&db_id), false).await { error!("{err:?}"); continue; }; @@ -486,12 +486,19 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { } #[cfg(not(target_arch = "wasm32"))] -async fn run_db_migration_impl(ctx: &MmArc, db_id: Option<&str>) -> MmInitResult<()> { +async fn run_db_migration_impl(ctx: &MmArc, db_id: Option<&str>, init_shared: bool) -> MmInitResult<()> { fix_directories(ctx, db_id, None)?; + AsyncSqliteConnPool::init(ctx, db_id) .await .map_to_mm(MmInitError::ErrorSqliteInitializing)?; SqliteConnPool::init(ctx, db_id).map_to_mm(MmInitError::ErrorSqliteInitializing)?; + + // init shared_db once. + if init_shared { + SqliteConnPool::init_shared(ctx).map_to_mm(MmInitError::ErrorSqliteInitializing)?; + } + init_and_migrate_sql_db(ctx, db_id).await?; migrate_db(ctx, db_id)?; @@ -508,8 +515,7 @@ pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { #[cfg(not(target_arch = "wasm32"))] { - run_db_migration_impl(&ctx, None).await?; - SqliteConnPool::init_shared(&ctx).map_to_mm(MmInitError::ErrorSqliteInitializing)?; + run_db_migration_impl(&ctx, None, true).await?; ctx.spawner().spawn(init_db_migration_watcher_loop(ctx.clone())); } @@ -592,6 +598,7 @@ async fn kick_start(ctx: MmArc, db_id: Option<&str>) -> MmInitResult<()> { .await .map_to_mm(MmInitError::OrdersKickStartError)?, ); + let mut lock = ctx .coins_needed_for_kick_start .lock() diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index 8ee09aafa5..db7197d991 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -289,6 +289,10 @@ pub struct MakerSwapDbRepr { pub events: Vec, /// Taker's P2P pubkey pub taker_p2p_pub: Secp256k1PubkeySerialize, + // Taker's coin db_id. + pub taker_coin_db_id: Option, + // Maker's coin db_id. + pub maker_coin_db_id: Option, } impl StateMachineDbRepr for MakerSwapDbRepr { @@ -301,6 +305,10 @@ impl GetSwapCoins for MakerSwapDbRepr { fn maker_coin(&self) -> &str { &self.maker_coin } fn taker_coin(&self) -> &str { &self.taker_coin } + + fn taker_coin_db_id(&self) -> &Option { &self.taker_coin_db_id } + + fn maker_coin_db_id(&self) -> &Option { &self.maker_coin_db_id } } #[cfg(not(target_arch = "wasm32"))] @@ -355,6 +363,8 @@ impl MakerSwapDbRepr { .map_err(|e| SqlError::FromSqlConversionFailure(19, SqlType::Blob, Box::new(e))) })? .into(), + taker_coin_db_id: row.get(20)?, + maker_coin_db_id: row.get(21)?, }) } } @@ -434,7 +444,7 @@ impl; type RecreateError = MmError; - fn to_db_repr(&self) -> MakerSwapDbRepr { + async fn to_db_repr(&self) -> MakerSwapDbRepr { MakerSwapDbRepr { maker_coin: self.maker_coin.ticker().into(), maker_volume: self.maker_volume.clone(), @@ -453,6 +463,8 @@ impl &str; fn taker_coin(&self) -> &str; + + // Represenets the taker's coin db_id(coin's pubkey) used for this swap + fn taker_coin_db_id(&self) -> &Option; + // Represenets the maker's coin db_id(coin's pubkey) used for this swap + fn maker_coin_db_id(&self) -> &Option; } /// Generic function for upgraded swaps kickstart handling. @@ -354,10 +359,19 @@ pub(super) async fn swap_kickstart_handler< T::RecreateError: std::fmt::Display, { let taker_coin_ticker = swap_repr.taker_coin(); - + let expected_taker_db_id = swap_repr.taker_coin_db_id(); let taker_coin = loop { match lp_coinfind(&ctx, taker_coin_ticker).await { - Ok(Some(c)) => break c, + Ok(Some(c)) => { + if &c.account_db_id().await == expected_taker_db_id { + break c; + } + info!( + "Can't kickstart taker swap {} until the coin {} is activated with unexpected pubkey:", + uuid, taker_coin_ticker + ); + Timer::sleep(1.).await; + }, Ok(None) => { info!( "Can't kickstart the swap {} until the coin {} is activated", @@ -373,10 +387,19 @@ pub(super) async fn swap_kickstart_handler< }; let maker_coin_ticker = swap_repr.maker_coin(); - + let expected_maker_db_id = swap_repr.maker_coin_db_id(); let maker_coin = loop { match lp_coinfind(&ctx, maker_coin_ticker).await { - Ok(Some(c)) => break c, + Ok(Some(c)) => { + if &c.account_db_id().await == expected_maker_db_id { + break c; + } + info!( + "Can't kickstart maker swap {} until the coin {} is activated with unexpected_pubkey", + uuid, taker_coin_ticker + ); + Timer::sleep(1.).await; + }, Ok(None) => { info!( "Can't kickstart the swap {} until the coin {} is activated", diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index 3d6397d43a..7b6a7b9c21 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -321,6 +321,10 @@ pub struct TakerSwapDbRepr { pub events: Vec, /// Maker's P2P pubkey pub maker_p2p_pub: Secp256k1PubkeySerialize, + // Taker's coin db_id. + pub taker_coin_db_id: Option, + // Maker's coin db_id. + pub maker_coin_db_id: Option, } #[cfg(not(target_arch = "wasm32"))] @@ -375,6 +379,8 @@ impl TakerSwapDbRepr { .map_err(|e| SqlError::FromSqlConversionFailure(19, SqlType::Blob, Box::new(e))) })? .into(), + taker_coin_db_id: row.get(20)?, + maker_coin_db_id: row.get(21)?, }) } } @@ -389,6 +395,10 @@ impl GetSwapCoins for TakerSwapDbRepr { fn maker_coin(&self) -> &str { &self.maker_coin } fn taker_coin(&self) -> &str { &self.taker_coin } + + fn taker_coin_db_id(&self) -> &Option { &self.taker_coin_db_id } + + fn maker_coin_db_id(&self) -> &Option { &self.maker_coin_db_id } } /// Represents the state machine for taker's side of the Trading Protocol Upgrade swap (v2). @@ -464,7 +474,7 @@ impl; type RecreateError = MmError; - fn to_db_repr(&self) -> TakerSwapDbRepr { + async fn to_db_repr(&self) -> TakerSwapDbRepr { TakerSwapDbRepr { maker_coin: self.maker_coin.ticker().into(), maker_volume: self.maker_volume.clone(), @@ -483,6 +493,8 @@ impl ::DbRepr; + async fn to_db_repr(&self) -> ::DbRepr; /// Gets a mutable reference to the storage for the state machine. fn storage(&mut self) -> &mut Self::Storage; @@ -248,7 +248,7 @@ impl StateMachineTrait for T { let reentrancy_lock = self.acquire_reentrancy_lock().await?; let id = self.id(); if !self.storage().has_record_for(&id).await? { - let repr = self.to_db_repr(); + let repr = self.to_db_repr().await; self.storage().store_repr(id, repr).await?; } self.spawn_reentrancy_lock_renew(reentrancy_lock); @@ -457,7 +457,7 @@ mod tests { type RecreateCtx = (); type RecreateError = Infallible; - fn to_db_repr(&self) -> TestStateMachineRepr { TestStateMachineRepr {} } + async fn to_db_repr(&self) -> TestStateMachineRepr { TestStateMachineRepr {} } fn storage(&mut self) -> &mut Self::Storage { &mut self.storage } From 79f379289303afb5fc624a785c0567a44c3f4a52 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 30 Jul 2024 19:14:17 +0100 Subject: [PATCH 179/186] improve swap_v2 swap kickstart and eth pubkey deriv --- mm2src/coins/eth/v2_activation.rs | 29 +++++++++++++++----- mm2src/mm2_main/src/database.rs | 5 ++++ mm2src/mm2_main/src/database/my_swaps.rs | 13 +++++++-- mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs | 2 ++ mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs | 2 ++ 5 files changed, 42 insertions(+), 9 deletions(-) diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 605740e110..d572b0f25e 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -742,10 +742,19 @@ pub(crate) async fn build_address_and_priv_key_policy( #[cfg(not(target_arch = "wasm32"))] { - // Skip the first byte of the uncompressed public key before converting to the eth address. - let pubkey = Public::from_slice(&activated_key.public().as_bytes()[1..]); - let pubkey = public_to_address(&pubkey); - run_db_migration_for_new_pubkey(ctx, display_eth_address(&pubkey)) + let pubkey = { + let p_bytes = activated_key.public().as_bytes(); + if p_bytes.len() == 65 { + // Skip the first byte of the uncompressed public key before converting to the eth address. + let pubkey = Public::from_slice(&p_bytes[1..]); + display_eth_address(&public_to_address(&pubkey)) + } else { + let pubkey = Public::from_slice(&p_bytes); + display_eth_address(&public_to_address(&pubkey)) + } + }; + + run_db_migration_for_new_pubkey(ctx, pubkey) .await .map_to_mm(EthActivationV2Error::InternalError)?; } @@ -1016,9 +1025,15 @@ pub(super) async fn eth_shared_db_id(coin: &EthCoin, ctx: &MmArc) -> Option Option { match coin.derivation_method() { DerivationMethod::HDWallet(hd_wallet) => hd_wallet.get_enabled_address().await.map(|addr| { - // Skip the first byte of the uncompressed public key before converting to the eth address. - let pubkey = Public::from_slice(&addr.pubkey().as_bytes()[1..]); - display_eth_address(&public_to_address(&pubkey)) + let p_key = addr.pubkey(); + if p_key.as_bytes().len() == 65 { + // Skip the first byte of the uncompressed public key before converting to the eth address. + let pubkey = Public::from_slice(&p_key.as_bytes()[1..]); + display_eth_address(&public_to_address(&pubkey)) + } else { + let pubkey = Public::from_slice(&p_key.as_bytes()); + display_eth_address(&public_to_address(&pubkey)) + } }), _ => None, } diff --git a/mm2src/mm2_main/src/database.rs b/mm2src/mm2_main/src/database.rs index 1ce3d73197..b5c5cfc95f 100644 --- a/mm2src/mm2_main/src/database.rs +++ b/mm2src/mm2_main/src/database.rs @@ -131,6 +131,10 @@ fn migration_12() -> Vec<(&'static str, Vec)> { ] } +fn migration_13() -> Vec<(&'static str, Vec)> { + db_common::sqlite::execute_batch(my_swaps::ADD_COIN_DB_ID_FIELD) +} + async fn statements_for_migration(ctx: &MmArc, current_migration: i64) -> Option)>> { match current_migration { 1 => Some(migration_1(ctx).await), @@ -145,6 +149,7 @@ async fn statements_for_migration(ctx: &MmArc, current_migration: i64) -> Option 10 => Some(migration_10(ctx).await), 11 => Some(migration_11()), 12 => Some(migration_12()), + 13 => Some(migration_13()), _ => None, } } diff --git a/mm2src/mm2_main/src/database/my_swaps.rs b/mm2src/mm2_main/src/database/my_swaps.rs index e415ab0250..f7b7287924 100644 --- a/mm2src/mm2_main/src/database/my_swaps.rs +++ b/mm2src/mm2_main/src/database/my_swaps.rs @@ -56,6 +56,11 @@ pub const ADD_OTHER_P2P_PUBKEY_FIELD: &str = "ALTER TABLE my_swaps ADD COLUMN ot // Storing rational numbers as text to maintain precision pub const ADD_DEX_FEE_BURN_FIELD: &str = "ALTER TABLE my_swaps ADD COLUMN dex_fee_burn TEXT;"; +pub const ADD_COIN_DB_ID_FIELD: &[&str] = &[ + "ALTER TABLE my_swaps ADD COLUMN taker_coin_db_id TEXT;", + "ALTER TABLE my_swaps ADD COLUMN maker_coin_db_id TEXT;", +]; + /// The query to insert swap on migration 1, during this migration swap_type column doesn't exist /// in my_swaps table yet. const INSERT_MY_SWAP_MIGRATION_1: &str = @@ -96,7 +101,9 @@ const INSERT_MY_SWAP_V2: &str = r#"INSERT INTO my_swaps ( maker_coin_nota, taker_coin_confs, taker_coin_nota, - other_p2p_pub + other_p2p_pub, + taker_coin_db_id, + maker_coin_db_id ) VALUES ( :my_coin, :other_coin, @@ -117,7 +124,9 @@ const INSERT_MY_SWAP_V2: &str = r#"INSERT INTO my_swaps ( :maker_coin_nota, :taker_coin_confs, :taker_coin_nota, - :other_p2p_pub + :other_p2p_pub, + :taker_coin_db_id, + :maker_coin_db_id );"#; pub fn insert_new_swap_v2(conn: &Connection, params: &[(&str, &dyn ToSql)]) -> SqlResult<()> { diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index db7197d991..24f795c0c7 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -186,6 +186,8 @@ impl StateMachineStorage for MakerSwapStorage { ":taker_coin_confs": repr.conf_settings.taker_coin_confs, ":taker_coin_nota": repr.conf_settings.taker_coin_nota, ":other_p2p_pub": repr.taker_p2p_pub.to_bytes(), + ":taker_coin_db_id": repr.taker_coin_db_id, + ":maker_coin_db_id": repr.maker_coin_db_id, }; insert_new_swap_v2(&conn, sql_params) })?; diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index 7b6a7b9c21..570f9f118e 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -218,6 +218,8 @@ impl StateMachineStorage for TakerSwapStorage { ":taker_coin_confs": repr.conf_settings.taker_coin_confs, ":taker_coin_nota": repr.conf_settings.taker_coin_nota, ":other_p2p_pub": repr.maker_p2p_pub.to_bytes(), + ":taker_coin_db_id": repr.taker_coin_db_id, + ":maker_coin_db_id": repr.maker_coin_db_id, }; insert_new_swap_v2(&conn, sql_params) })?; From 117aa4402f6f8237ef95b56818b3f41e0c4ed987 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Thu, 1 Aug 2024 01:29:17 +0100 Subject: [PATCH 180/186] Fix: Improve db_id validation for taker order processing --- mm2src/coins/eth/v2_activation.rs | 4 +- mm2src/mm2_main/src/database/my_orders.rs | 6 +- mm2src/mm2_main/src/lp_native_dex.rs | 18 ++- mm2src/mm2_main/src/lp_ordermatch.rs | 127 ++++++++++++++---- .../src/lp_ordermatch/my_orders_storage.rs | 40 +++--- mm2src/mm2_main/src/lp_swap.rs | 18 ++- mm2src/mm2_main/src/lp_swap/maker_swap.rs | 12 +- .../src/lp_swap/recreate_swap_data.rs | 8 +- mm2src/mm2_main/src/lp_swap/taker_swap.rs | 12 +- mm2src/mm2_main/src/ordermatch_tests.rs | 87 ++++++++---- 10 files changed, 227 insertions(+), 105 deletions(-) diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index d572b0f25e..0b045591de 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -749,7 +749,7 @@ pub(crate) async fn build_address_and_priv_key_policy( let pubkey = Public::from_slice(&p_bytes[1..]); display_eth_address(&public_to_address(&pubkey)) } else { - let pubkey = Public::from_slice(&p_bytes); + let pubkey = Public::from_slice(p_bytes); display_eth_address(&public_to_address(&pubkey)) } }; @@ -1031,7 +1031,7 @@ pub(super) async fn eth_account_db_id(coin: &EthCoin) -> Option { let pubkey = Public::from_slice(&p_key.as_bytes()[1..]); display_eth_address(&public_to_address(&pubkey)) } else { - let pubkey = Public::from_slice(&p_key.as_bytes()); + let pubkey = Public::from_slice(p_key.as_bytes()); display_eth_address(&public_to_address(&pubkey)) } }), diff --git a/mm2src/mm2_main/src/database/my_orders.rs b/mm2src/mm2_main/src/database/my_orders.rs index f2441a1eb5..09d7c8bf25 100644 --- a/mm2src/mm2_main/src/database/my_orders.rs +++ b/mm2src/mm2_main/src/database/my_orders.rs @@ -56,7 +56,7 @@ pub fn insert_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder) -> SqlRes 0.to_string(), "Created".to_string(), ]; - ctx.run_sql_query(order.db_id().as_deref(), move |conn| { + ctx.run_sql_query(order.account_id().as_deref(), move |conn| { conn.execute(INSERT_MY_ORDER, params_from_iter(params.iter())) .map(|_| ()) }) @@ -83,7 +83,7 @@ pub fn insert_taker_order(ctx: &MmArc, uuid: Uuid, order: &TakerOrder) -> SqlRes "Created".to_string(), ]; - ctx.run_sql_query(order.db_id().as_deref(), move |conn| { + ctx.run_sql_query(order.account_id().as_deref(), move |conn| { conn.execute(INSERT_MY_ORDER, params_from_iter(params.iter())) .map(|_| ()) }) @@ -98,7 +98,7 @@ pub fn update_maker_order(ctx: &MmArc, uuid: Uuid, order: &MakerOrder) -> SqlRes order.updated_at.unwrap_or(0).to_string(), "Updated".to_string(), ]; - ctx.run_sql_query(order.db_id().as_deref(), move |conn| { + ctx.run_sql_query(order.account_id().as_deref(), move |conn| { conn.execute(UPDATE_MY_ORDER, params_from_iter(params.iter())) .map(|_| ()) }) diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index 9284f3b134..cd88566c8c 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -590,14 +590,18 @@ pub async fn lp_init(ctx: MmArc, version: String, datetime: String) -> MmInitRes } async fn kick_start(ctx: MmArc, db_id: Option<&str>) -> MmInitResult<()> { - let mut coins_needed_for_kick_start = swap_kick_starts(ctx.clone(), db_id) - .await - .map_to_mm(MmInitError::SwapsKickStartError)?; - coins_needed_for_kick_start.extend( - orders_kick_start(&ctx, db_id) + let coins_needed_for_kick_start = { + let mut coins_needed_for_kick_start = swap_kick_starts(ctx.clone(), db_id) .await - .map_to_mm(MmInitError::OrdersKickStartError)?, - ); + .map_to_mm(MmInitError::SwapsKickStartError)?; + coins_needed_for_kick_start.extend( + orders_kick_start(&ctx, db_id) + .await + .map_to_mm(MmInitError::OrdersKickStartError)?, + ); + + coins_needed_for_kick_start + }; let mut lock = ctx .coins_needed_for_kick_start diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 18ce367371..d0a0e86495 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -25,8 +25,8 @@ use best_orders::BestOrdersAction; use blake2::digest::{Update, VariableOutput}; use blake2::Blake2bVar; use coins::utxo::{compressed_pub_key_from_priv_raw, ChecksumType, UtxoAddressFormat}; -use coins::{coin_conf, find_pair, find_unique_account_ids_active, lp_coinfind, BalanceTradeFeeUpdatedHandler, - CoinProtocol, CoinsContext, FeeApproxStage, MarketCoinOps, MmCoinEnum}; +use coins::{coin_conf, find_pair, find_unique_account_ids_active, lp_coinfind, lp_coinfind_any, + BalanceTradeFeeUpdatedHandler, CoinProtocol, CoinsContext, FeeApproxStage, MarketCoinOps, MmCoinEnum}; use common::executor::{simple_map::AbortableSimpleMap, AbortSettings, AbortableSystem, AbortedError, SpawnAbortable, SpawnFuture, Timer}; use common::log::{error, info, warn, LogOnError}; @@ -1517,7 +1517,8 @@ impl<'a> TakerOrderBuilder<'a> { base_orderbook_ticker: self.base_orderbook_ticker, rel_orderbook_ticker: self.rel_orderbook_ticker, p2p_privkey, - db_id: self.base_coin.account_db_id().await, + base_coin_account_id: self.base_coin.account_db_id().await, + rel_coin_account_id: self.rel_coin.account_db_id().await, }) } @@ -1558,7 +1559,8 @@ impl<'a> TakerOrderBuilder<'a> { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: self.base_coin.account_db_id().await, + base_coin_account_id: self.base_coin.account_db_id().await, + rel_coin_account_id: self.rel_coin.account_db_id().await, } } } @@ -1580,7 +1582,8 @@ pub struct TakerOrder { /// A custom priv key for more privacy to prevent linking orders of the same node between each other /// Commonly used with privacy coins (ARRR, ZCash, etc.) p2p_privkey: Option, - db_id: Option, + base_coin_account_id: Option, + rel_coin_account_id: Option, } /// Result of match_reserved function @@ -1682,7 +1685,12 @@ impl TakerOrder { fn p2p_keypair(&self) -> Option<&KeyPair> { self.p2p_privkey.as_ref().map(|key| key.key_pair()) } - pub fn db_id(&self) -> Option { self.db_id.clone() } + pub fn account_id(&self) -> &Option { + match self.request.action { + TakerAction::Buy => &self.rel_coin_account_id, + TakerAction::Sell => &self.base_coin_account_id, + } + } } #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] @@ -1714,7 +1722,8 @@ pub struct MakerOrder { /// A custom priv key for more privacy to prevent linking orders of the same node between each other /// Commonly used with privacy coins (ARRR, ZCash, etc.) p2p_privkey: Option, - db_id: Option, + base_coin_account_id: Option, + rel_coin_account_id: Option, } pub struct MakerOrderBuilder<'a> { @@ -1970,7 +1979,8 @@ impl<'a> MakerOrderBuilder<'a> { base_orderbook_ticker: self.base_orderbook_ticker, rel_orderbook_ticker: self.rel_orderbook_ticker, p2p_privkey, - db_id: self.base_coin.account_db_id().await, + base_coin_account_id: self.base_coin.account_db_id().await, + rel_coin_account_id: self.rel_coin.account_db_id().await, }) } @@ -1995,7 +2005,8 @@ impl<'a> MakerOrderBuilder<'a> { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: self.base_coin.account_db_id().await, + base_coin_account_id: self.base_coin.account_db_id().await, + rel_coin_account_id: self.rel_coin.account_db_id().await, } } } @@ -2104,7 +2115,7 @@ impl MakerOrder { fn p2p_keypair(&self) -> Option<&KeyPair> { self.p2p_privkey.as_ref().map(|key| key.key_pair()) } - pub fn db_id(&self) -> Option { self.db_id.clone() } + pub fn account_id(&self) -> &Option { &self.base_coin_account_id } } impl From for MakerOrder { @@ -2128,7 +2139,8 @@ impl From for MakerOrder { base_orderbook_ticker: taker_order.base_orderbook_ticker, rel_orderbook_ticker: taker_order.rel_orderbook_ticker, p2p_privkey: taker_order.p2p_privkey, - db_id: taker_order.db_id, + base_coin_account_id: taker_order.base_coin_account_id, + rel_coin_account_id: taker_order.rel_coin_account_id, }, // The "buy" taker order is recreated with reversed pair as Maker order is always considered as "sell" TakerAction::Buy => { @@ -2151,7 +2163,8 @@ impl From for MakerOrder { base_orderbook_ticker: taker_order.rel_orderbook_ticker, rel_orderbook_ticker: taker_order.base_orderbook_ticker, p2p_privkey: taker_order.p2p_privkey, - db_id: taker_order.db_id, + base_coin_account_id: taker_order.base_coin_account_id, + rel_coin_account_id: taker_order.rel_coin_account_id, } }, } @@ -3417,7 +3430,7 @@ async fn handle_timed_out_taker_orders(ctx: MmArc, ordermatch_ctx: &OrdermatchCo .error_log_with_msg("!save_new_active_maker_order"); if maker_order.save_in_history { storage - .update_was_taker_in_filtering_history(uuid, maker_order.db_id().as_deref()) + .update_was_taker_in_filtering_history(uuid, maker_order.account_id().as_deref()) .await .error_log_with_msg("!update_was_taker_in_filtering_history"); } @@ -5039,10 +5052,10 @@ impl Order { } } - pub fn db_id(&self) -> Option { + pub fn account_id(&self) -> &Option { match self { - Order::Maker(maker) => maker.db_id(), - Order::Taker(taker) => taker.db_id(), + Order::Maker(maker) => maker.account_id(), + Order::Taker(taker) => taker.account_id(), } } } @@ -5418,6 +5431,14 @@ pub struct HistoricalOrder { conf_settings: Option, } +/// Initializes and restarts the order matching system by loading saved orders from storage +/// and populating the order contexts. +/// +/// # Notes +/// - For maker orders, only those with matching `rel_coin_account_id` are processed. +/// - For taker orders, only those with matching `base_coin_account_id` are processed. +/// - This ensures account consistency for both maker and taker orders. +/// - Invalid orders are silently skipped and not added to the order contexts. pub async fn orders_kick_start(ctx: &MmArc, db_id: Option<&str>) -> Result, String> { let ordermatch_ctx = try_s!(OrdermatchContext::from_ctx(ctx)); let storage = MyOrdersStorage::new(ctx.clone()); @@ -5426,19 +5447,77 @@ pub async fn orders_kick_start(ctx: &MmArc, db_id: Option<&str>) -> Result { + let rel_coin_db_id = rel_coin.inner.account_db_id().await.unwrap_or(ctx.default_db_id()); + if rel_coin_db_id == order_rel_coin_db_id { + coins.insert(order.base.clone()); + coins.insert(order.rel.clone()); + let mut maker_orders_ctx = ordermatch_ctx.maker_orders_ctx.lock(); + maker_orders_ctx.add_order(ctx.weak(), order.clone(), None); + + break; + } + + info!( + "Failed to add maker order {:?} to kick_start queue. Coin account id mismatch: got=([{rel_coin_db_id}]), expected=([{order_rel_coin_db_id}])", + order.uuid + ); + Timer::sleep(5.).await; + continue; + }, + None => { + info!( + "Failed to add maker order {:?} to kick_start queue, rel coin:{} with account_id:{order_rel_coin_db_id} needs to be activated!", + order.uuid, + order.rel + ); + Timer::sleep(5.).await; + continue; + }, + } + } } } - let mut taker_orders = ordermatch_ctx.my_taker_orders.lock().await; for order in saved_taker_orders { - coins.insert(order.request.base.clone()); - coins.insert(order.request.rel.clone()); - taker_orders.insert(order.request.uuid, order); + let order_base_coin_account_id = order.base_coin_account_id.clone().unwrap_or(ctx.default_db_id()); + + loop { + match try_s!(lp_coinfind_any(ctx, &order.request.base).await) { + Some(base_coin) => { + let base_coin_account_id = base_coin.inner.account_db_id().await.unwrap_or(ctx.default_db_id()); + if base_coin_account_id == order_base_coin_account_id { + coins.insert(order.request.base.clone()); + coins.insert(order.request.rel.clone()); + let mut taker_orders = ordermatch_ctx.my_taker_orders.lock().await; + taker_orders.insert(order.request.uuid, order); + + break; + }; + + info!( + "Failed to add taker order {} to kick_start queue. {} Coin account id mismatch: got=([{base_coin_account_id}]), expected=([{order_base_coin_account_id}])", + order.request.uuid, + order.request.base + ); + }, + None => { + info!( + "Failed to add taker order {} to kick_start queue. Base coin:{} with account_id:{order_base_coin_account_id} needs to be activated!", + order.request.uuid, + order.request.base + ); + + Timer::sleep(5.).await; + continue; + }, + } + } } Ok(coins) diff --git a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs index c3adbe4295..46a84c9dea 100644 --- a/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs +++ b/mm2src/mm2_main/src/lp_ordermatch/my_orders_storage.rs @@ -76,7 +76,7 @@ pub fn delete_my_taker_order(ctx: MmArc, order: TakerOrder, reason: TakerOrderCa let fut = async move { let uuid = order.request.uuid; let save_in_history = order.save_in_history; - let db_id = order.db_id(); + let db_id = order.account_id().clone(); let storage = MyOrdersStorage::new(ctx); storage @@ -117,14 +117,14 @@ pub fn delete_my_maker_order(ctx: MmArc, order: MakerOrder, reason: MakerOrderCa let storage = MyOrdersStorage::new(ctx); if order_to_save.was_updated() { if let Ok(order_from_file) = storage - .load_active_maker_order(order_to_save.uuid, order_to_save.db_id().as_deref()) + .load_active_maker_order(order_to_save.uuid, order_to_save.account_id().as_deref()) .await { order_to_save = order_from_file; } } storage - .delete_active_maker_order(uuid, order_to_save.db_id().as_deref()) + .delete_active_maker_order(uuid, order_to_save.account_id().as_deref()) .await .error_log_with_msg("!delete_active_maker_order"); @@ -134,7 +134,11 @@ pub fn delete_my_maker_order(ctx: MmArc, order: MakerOrder, reason: MakerOrderCa .await .error_log_with_msg("!save_order_in_history"); storage - .update_order_status_in_filtering_history(uuid, reason.to_string(), order_to_save.db_id().as_deref()) + .update_order_status_in_filtering_history( + uuid, + reason.to_string(), + order_to_save.account_id().as_deref(), + ) .await .error_log_with_msg("!update_order_status_in_filtering_history"); } @@ -266,13 +270,13 @@ mod native_impl { } async fn save_new_active_maker_order(&self, order: &MakerOrder) -> MyOrdersResult<()> { - let path = my_maker_order_file_path(&self.ctx, &order.uuid, order.db_id().as_deref()); + let path = my_maker_order_file_path(&self.ctx, &order.uuid, order.account_id().as_deref()); write_json(order, &path, USE_TMP_FILE).await?; Ok(()) } async fn save_new_active_taker_order(&self, order: &TakerOrder) -> MyOrdersResult<()> { - let path = my_taker_order_file_path(&self.ctx, &order.request.uuid, order.db_id().as_deref()); + let path = my_taker_order_file_path(&self.ctx, &order.request.uuid, order.account_id().as_deref()); write_json(order, &path, USE_TMP_FILE).await?; Ok(()) } @@ -305,7 +309,7 @@ mod native_impl { #[async_trait] impl MyOrdersHistory for MyOrdersStorage { async fn save_order_in_history(&self, order: &Order) -> MyOrdersResult<()> { - let path = my_order_history_file_path(&self.ctx, &order.uuid(), order.db_id().as_deref()); + let path = my_order_history_file_path(&self.ctx, &order.uuid(), order.account_id().as_deref()); write_json(order, &path, USE_TMP_FILE).await?; Ok(()) } @@ -458,7 +462,7 @@ mod wasm_impl { } async fn save_new_active_maker_order(&self, order: &MakerOrder) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(order.db_id.as_deref()).await?; + let db = self.ctx.ordermatch_db(order.account_id().as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -471,7 +475,7 @@ mod wasm_impl { } async fn save_new_active_taker_order(&self, order: &TakerOrder) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(order.db_id.as_deref()).await?; + let db = self.ctx.ordermatch_db(order.account_id().as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -500,7 +504,7 @@ mod wasm_impl { } async fn update_active_maker_order(&self, order: &MakerOrder) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(order.db_id.as_deref()).await?; + let db = self.ctx.ordermatch_db(order.account_id().as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -513,7 +517,7 @@ mod wasm_impl { } async fn update_active_taker_order(&self, order: &TakerOrder) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(order.db_id.as_deref()).await?; + let db = self.ctx.ordermatch_db(order.account_id().as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -531,7 +535,7 @@ mod wasm_impl { #[async_trait] impl MyOrdersHistory for MyOrdersStorage { async fn save_order_in_history(&self, order: &Order) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(order.db_id().as_deref()).await?; + let db = self.ctx.ordermatch_db(order.account_id().as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; @@ -585,7 +589,7 @@ mod wasm_impl { async fn save_maker_order_in_filtering_history(&self, order: &MakerOrder) -> MyOrdersResult<()> { let item = maker_order_to_filtering_history_item(order, "Created".to_owned(), false)?; - let db = self.ctx.ordermatch_db(order.db_id.as_deref()).await?; + let db = self.ctx.ordermatch_db(order.account_id().as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; table.add_item(&item).await?; @@ -595,7 +599,7 @@ mod wasm_impl { async fn save_taker_order_in_filtering_history(&self, order: &TakerOrder) -> MyOrdersResult<()> { let item = taker_order_to_filtering_history_item(order, "Created".to_owned())?; - let db = self.ctx.ordermatch_db(order.db_id.as_deref()).await?; + let db = self.ctx.ordermatch_db(order.account_id().as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; table.add_item(&item).await?; @@ -603,7 +607,7 @@ mod wasm_impl { } async fn update_maker_order_in_filtering_history(&self, order: &MakerOrder) -> MyOrdersResult<()> { - let db = self.ctx.ordermatch_db(order.db_id.as_deref()).await?; + let db = self.ctx.ordermatch_db(order.account_id().as_deref()).await?; let transaction = db.transaction().await?; let table = transaction.table::().await?; // get the previous item to see if the order was taker @@ -752,7 +756,8 @@ mod tests { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, } } @@ -781,7 +786,8 @@ mod tests { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, } } diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index c1467c8674..b951871a64 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -1538,21 +1538,21 @@ pub async fn swap_kick_starts(ctx: MmArc, db_id: Option<&str>) -> Result (swap.maker_coin_db_id.as_deref(), swap.taker_coin_db_id.as_deref()), - SavedSwap::Taker(swap) => (swap.maker_coin_db_id.as_deref(), swap.taker_coin_db_id.as_deref()), + let taker_coin_account_id = match &swap { + SavedSwap::Maker(swap) => swap.taker_coin_account_id.as_deref(), + SavedSwap::Taker(swap) => swap.taker_coin_account_id.as_deref(), }; let taker_coin = loop { match lp_coinfind(&ctx, &taker_coin_ticker).await { Ok(Some(c)) => { - if taker_coin_db_id == c.account_db_id().await.as_deref() { + if taker_coin_account_id == c.account_db_id().await.as_deref() { break c; }; info!( "Can't kickstart the swap {} until the coin {} is activated with pubkey: {}", swap.uuid(), taker_coin_ticker, - taker_coin_db_id.unwrap_or(&ctx.rmd160.to_string()) + taker_coin_account_id.unwrap_or(&ctx.rmd160.to_string()) ); Timer::sleep(5.).await; }, @@ -1571,17 +1571,21 @@ async fn kickstart_thread_handler(ctx: MmArc, swap: SavedSwap, maker_coin_ticker }; }; + let maker_coin_account_id = match &swap { + SavedSwap::Maker(swap) => swap.maker_coin_account_id.as_deref(), + SavedSwap::Taker(swap) => swap.maker_coin_account_id.as_deref(), + }; let maker_coin = loop { match lp_coinfind(&ctx, &maker_coin_ticker).await { Ok(Some(c)) => { - if maker_coin_db_id == c.account_db_id().await.as_deref() { + if maker_coin_account_id == c.account_db_id().await.as_deref() { break c; }; info!( "Can't kickstart the swap {} until the coin {} is activated with pubkey: {}", swap.uuid(), maker_coin_ticker, - maker_coin_db_id.unwrap_or(&ctx.rmd160.to_string()) + maker_coin_account_id.unwrap_or(&ctx.rmd160.to_string()) ); Timer::sleep(5.).await; }, diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index 771c53ad40..6e5e60e40c 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -108,8 +108,8 @@ async fn save_my_maker_swap_event( events: vec![], success_events: MAKER_SUCCESS_EVENTS.iter().map(|event| event.to_string()).collect(), error_events: MAKER_ERROR_EVENTS.iter().map(|event| event.to_string()).collect(), - maker_coin_db_id: swap.maker_coin.account_db_id().await, - taker_coin_db_id: swap.taker_coin.account_db_id().await, + maker_coin_account_id: swap.maker_coin.account_db_id().await, + taker_coin_account_id: swap.taker_coin.account_db_id().await, }), Err(e) => return ERR!("{}", e), }; @@ -1825,9 +1825,9 @@ pub struct MakerSavedSwap { pub success_events: Vec, pub error_events: Vec, /// needed to validate if pending maker coin is activated with the correct `db_id` in `kickstart_thread_handler` - pub maker_coin_db_id: Option, + pub maker_coin_account_id: Option, /// needed to validate if pending taker coin is activated with the correct `db_id` in `kickstart_thread_handler` - pub taker_coin_db_id: Option, + pub taker_coin_account_id: Option, } #[cfg(test)] @@ -1885,8 +1885,8 @@ impl MakerSavedSwap { mm_version: None, success_events: vec![], error_events: vec![], - maker_coin_db_id: None, - taker_coin_db_id: None, + maker_coin_account_id: None, + taker_coin_account_id: None, } } } diff --git a/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs b/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs index 8635d7a34c..97a03e5618 100644 --- a/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs +++ b/mm2src/mm2_main/src/lp_swap/recreate_swap_data.rs @@ -94,8 +94,8 @@ fn recreate_maker_swap(ctx: MmArc, taker_swap: TakerSavedSwap) -> RecreateSwapRe mm_version: Some(ctx.mm_version.clone()), success_events: MAKER_SUCCESS_EVENTS.iter().map(|event| event.to_string()).collect(), error_events: MAKER_ERROR_EVENTS.iter().map(|event| event.to_string()).collect(), - taker_coin_db_id: taker_swap.taker_coin_db_id, - maker_coin_db_id: taker_swap.maker_coin_db_id, + taker_coin_account_id: taker_swap.taker_coin_account_id, + maker_coin_account_id: taker_swap.maker_coin_account_id, }; let mut event_it = taker_swap.events.into_iter(); @@ -298,8 +298,8 @@ async fn recreate_taker_swap(ctx: MmArc, maker_swap: MakerSavedSwap) -> Recreate mm_version: Some(ctx.mm_version.clone()), success_events: TAKER_SUCCESS_EVENTS.iter().map(|event| event.to_string()).collect(), error_events: TAKER_ERROR_EVENTS.iter().map(|event| event.to_string()).collect(), - taker_coin_db_id: maker_swap.taker_coin_db_id, - maker_coin_db_id: maker_swap.maker_coin_db_id, + taker_coin_account_id: maker_swap.taker_coin_account_id, + maker_coin_account_id: maker_swap.maker_coin_account_id, }; let mut event_it = maker_swap.events.into_iter(); diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index a2f249a26f..4c565e0558 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -141,8 +141,8 @@ async fn save_my_taker_swap_event( TAKER_SUCCESS_EVENTS.iter().map(<&str>::to_string).collect() }, error_events: TAKER_ERROR_EVENTS.iter().map(<&str>::to_string).collect(), - taker_coin_db_id: swap.taker_coin.account_db_id().await, - maker_coin_db_id: swap.maker_coin.account_db_id().await, + taker_coin_account_id: swap.taker_coin.account_db_id().await, + maker_coin_account_id: swap.maker_coin.account_db_id().await, }), Err(e) => return ERR!("{}", e), }; @@ -220,10 +220,10 @@ pub struct TakerSavedSwap { pub mm_version: Option, pub success_events: Vec, pub error_events: Vec, - /// needed to validate if pending maker coin is activated with the correct `db_id` in `kickstart_thread_handler` - pub maker_coin_db_id: Option, - /// needed to validate if pending taker coin is activated with the correct `db_id` in `kickstart_thread_handler` - pub taker_coin_db_id: Option, + /// needed to validate if pending maker coin is activated with the correct `account_id` in `kickstart_thread_handler` + pub maker_coin_account_id: Option, + /// needed to validate if pending taker coin is activated with the correct `account_id` in `kickstart_thread_handler` + pub taker_coin_account_id: Option, } impl TakerSavedSwap { diff --git a/mm2src/mm2_main/src/ordermatch_tests.rs b/mm2src/mm2_main/src/ordermatch_tests.rs index e28c978600..7400461e32 100644 --- a/mm2src/mm2_main/src/ordermatch_tests.rs +++ b/mm2src/mm2_main/src/ordermatch_tests.rs @@ -35,7 +35,8 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let request = TakerRequest { @@ -74,7 +75,8 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let request = TakerRequest { @@ -113,7 +115,8 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let request = TakerRequest { @@ -152,7 +155,8 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let request = TakerRequest { @@ -191,7 +195,8 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let request = TakerRequest { @@ -230,7 +235,8 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let request = TakerRequest { @@ -271,7 +277,8 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let request = TakerRequest { base: "KMD".to_owned(), @@ -312,7 +319,8 @@ fn test_match_maker_order_and_taker_request() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let request = TakerRequest { base: "REL".to_owned(), @@ -390,7 +398,8 @@ fn test_maker_order_available_amount() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; maker.matches.insert(new_uuid(), MakerMatch { request: TakerRequest { @@ -492,7 +501,8 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let reserved = MakerReserved { @@ -537,7 +547,8 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let reserved = MakerReserved { @@ -582,7 +593,8 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let reserved = MakerReserved { @@ -627,7 +639,8 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let reserved = MakerReserved { @@ -672,7 +685,8 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let reserved = MakerReserved { @@ -717,7 +731,8 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let reserved = MakerReserved { @@ -762,7 +777,8 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let reserved = MakerReserved { @@ -807,7 +823,8 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let reserved = MakerReserved { @@ -852,7 +869,8 @@ fn test_taker_match_reserved() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let reserved = MakerReserved { @@ -900,7 +918,8 @@ fn test_taker_order_cancellable() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; assert!(order.is_cancellable()); @@ -931,7 +950,8 @@ fn test_taker_order_cancellable() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; order.matches.insert(new_uuid(), TakerMatch { @@ -989,7 +1009,8 @@ fn prepare_for_cancel_by(ctx: &MmArc) -> mpsc::Receiver { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }, None, ); @@ -1012,7 +1033,8 @@ fn prepare_for_cancel_by(ctx: &MmArc) -> mpsc::Receiver { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }, None, ); @@ -1035,7 +1057,8 @@ fn prepare_for_cancel_by(ctx: &MmArc) -> mpsc::Receiver { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }, None, ); @@ -1063,7 +1086,8 @@ fn prepare_for_cancel_by(ctx: &MmArc) -> mpsc::Receiver { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }); rx } @@ -1159,7 +1183,8 @@ fn test_taker_order_match_by() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let reserved = MakerReserved { @@ -1214,7 +1239,8 @@ fn test_maker_order_was_updated() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let mut update_msg = MakerOrderUpdated::new(maker_order.uuid); update_msg.with_new_price(BigRational::from_integer(2.into())); @@ -3240,7 +3266,8 @@ fn test_maker_order_balance_loops() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; let morty_order = MakerOrder { @@ -3260,7 +3287,8 @@ fn test_maker_order_balance_loops() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; assert!(!maker_orders_ctx.balance_loop_exists(rick_ticker)); @@ -3293,7 +3321,8 @@ fn test_maker_order_balance_loops() { base_orderbook_ticker: None, rel_orderbook_ticker: None, p2p_privkey: None, - db_id: None, + base_coin_account_id: None, + rel_coin_account_id: None, }; maker_orders_ctx.add_order(ctx.weak(), rick_order_2.clone(), None); From 2f3086c6ec15bfef9e4907ff6cc09bd3d1497627 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Fri, 2 Aug 2024 04:34:07 +0100 Subject: [PATCH 181/186] cleanups --- mm2src/coins/eth/v2_activation.rs | 20 ++--- .../coins/hd_wallet/storage/wasm_storage.rs | 6 +- mm2src/mm2_db/src/indexed_db/db_lock.rs | 78 ++++++++++--------- .../src/account/storage/wasm_storage.rs | 2 +- 4 files changed, 56 insertions(+), 50 deletions(-) diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 0b045591de..335d96a27f 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -729,15 +729,17 @@ pub(crate) async fn build_address_and_priv_key_policy( // Consider storing `derivation_path` at `EthCoinImpl`. let path_to_coin = json::from_value(conf["derivation_path"].clone()) .map_to_mm(|e| EthActivationV2Error::ErrorDeserializingDerivationPath(e.to_string()))?; - let raw_priv_key = global_hd_ctx - .derive_secp256k1_secret( - &path_to_address - .to_derivation_path(&path_to_coin) - .mm_err(|e| EthActivationV2Error::InvalidPathToAddress(e.to_string()))?, - ) - .mm_err(|e| EthActivationV2Error::InternalError(e.to_string()))?; - let activated_key = KeyPair::from_secret_slice(raw_priv_key.as_slice()) - .map_to_mm(|e| EthActivationV2Error::InternalError(e.to_string()))?; + let activated_key = { + let raw_priv_key = global_hd_ctx + .derive_secp256k1_secret( + &path_to_address + .to_derivation_path(&path_to_coin) + .mm_err(|e| EthActivationV2Error::InvalidPathToAddress(e.to_string()))?, + ) + .mm_err(|e| EthActivationV2Error::InternalError(e.to_string()))?; + KeyPair::from_secret_slice(raw_priv_key.as_slice()) + .map_to_mm(|e| EthActivationV2Error::InternalError(e.to_string()))? + }; let bip39_secp_priv_key = global_hd_ctx.root_priv_key().clone(); #[cfg(not(target_arch = "wasm32"))] diff --git a/mm2src/coins/hd_wallet/storage/wasm_storage.rs b/mm2src/coins/hd_wallet/storage/wasm_storage.rs index 07672c975f..f87fcb05a4 100644 --- a/mm2src/coins/hd_wallet/storage/wasm_storage.rs +++ b/mm2src/coins/hd_wallet/storage/wasm_storage.rs @@ -271,9 +271,7 @@ impl HDWalletIndexedDbStorage { } async fn lock_db_mutex(db: &SharedDb) -> HDWalletStorageResult { - db.get_or_initialize_shared(None) - .await - .mm_err(HDWalletStorageError::from) + db.get_or_initialize_shared().await.mm_err(HDWalletStorageError::from) } async fn find_account( @@ -319,7 +317,7 @@ impl HDWalletIndexedDbStorage { #[cfg(any(test, target_arch = "wasm32"))] pub(super) async fn get_all_storage_items(ctx: &MmArc) -> Vec { let coins_ctx = CoinsContext::from_ctx(ctx).unwrap(); - let db = coins_ctx.hd_wallet_db.get_or_initialize_shared(None).await.unwrap(); + let db = coins_ctx.hd_wallet_db.get_or_initialize_shared().await.unwrap(); let transaction = db.inner.transaction().await.unwrap(); let table = transaction.table::().await.unwrap(); table diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index bd79f3fc38..3bfb4a973f 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -1,5 +1,6 @@ use super::{DbIdentifier, DbInstance, InitDbResult}; use mm2_core::{mm_ctx::MmArc, DbNamespaceId}; +use std::collections::hash_map::Entry; use std::collections::HashMap; use std::sync::{Arc, Weak}; use tokio::sync::{Mutex as AsyncMutex, OwnedMappedMutexGuard, OwnedMutexGuard, RwLock}; @@ -11,13 +12,14 @@ const GLOBAL_DB_ID: &str = "KOMODEFI"; pub type DbLocked = OwnedMappedMutexGuard, Db>; pub type SharedDb = Arc>; pub type WeakDb = Weak>; +type ConnectionsDb = Arc>>>>>; #[allow(clippy::type_complexity)] pub struct ConstructibleDb { /// It's better to use something like [`Constructible`], but it doesn't provide a method to get the inner value by the mutable reference. - locks: Arc>>>>>, + locks: ConnectionsDb, db_namespace: DbNamespaceId, - // Default mm2 d_id derive from passphrase rmd160 + // Default mm2 db_id derive from passphrase rmd160 db_id: String, // Default mm2 shared_db_id derive from passphrase shared_db_id: String, @@ -30,10 +32,9 @@ impl ConstructibleDb { /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new(ctx: &MmArc, db_id: Option<&str>) -> Self { let default_db_id = ctx.rmd160().to_string(); - let shared_db_id = ctx.default_shared_db_id().to_string(); - let db_id = db_id.unwrap_or(&default_db_id).to_string(); - let conns = HashMap::from([(db_id.to_owned(), Arc::new(AsyncMutex::new(None)))]); + let shared_db_id = ctx.default_shared_db_id().to_string(); + let conns = HashMap::from([(db_id.to_owned(), Default::default())]); ConstructibleDb { locks: Arc::new(RwLock::new(conns)), @@ -47,9 +48,10 @@ impl ConstructibleDb { /// derived from the same passphrase. /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new_shared_db(ctx: &MmArc) -> Self { - let db_id = hex::encode(ctx.rmd160().as_slice()); + let db_id = ctx.default_db_id(); let shared_db_id = ctx.default_shared_db_id().to_string(); - let conns = HashMap::from([(shared_db_id.clone(), Arc::new(AsyncMutex::new(None)))]); + let conns = HashMap::from([(shared_db_id.clone(), Default::default())]); + ConstructibleDb { locks: Arc::new(RwLock::new(conns)), db_namespace: ctx.db_namespace, @@ -61,7 +63,7 @@ impl ConstructibleDb { /// Creates a new uninitialized `Db` instance shared between all wallets/seed. /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new_global_db(ctx: &MmArc) -> Self { - let db_id = ctx.rmd160().to_string(); + let db_id = ctx.default_db_id(); let shared_db_id = ctx.default_shared_db_id().to_string(); ConstructibleDb { locks: Arc::new(RwLock::new(HashMap::default())), @@ -77,8 +79,8 @@ impl ConstructibleDb { } // handle to get or initialize shared db - pub async fn get_or_initialize_shared(&self, db_id: Option<&str>) -> InitDbResult> { - self.get_or_initialize_impl(db_id, true).await + pub async fn get_or_initialize_shared(&self) -> InitDbResult> { + self.get_or_initialize_impl(Some(&self.shared_db_id), true).await } // handle to get or initialize global db @@ -89,35 +91,39 @@ impl ConstructibleDb { /// Locks the given mutex and checks if the inner database is initialized already or not, /// initializes it if it's required, and returns the locked instance. async fn get_or_initialize_impl(&self, db_id: Option<&str>, is_shared: bool) -> InitDbResult> { - let db_id = { - let default_id = if is_shared { &self.shared_db_id } else { &self.db_id }; - db_id.unwrap_or(default_id).to_owned() - }; + let db_id = db_id + .unwrap_or(if is_shared { &self.shared_db_id } else { &self.db_id }) + .to_owned(); let mut connections = self.locks.write().await; - if let Some(connection) = connections.get_mut(&db_id) { - let mut locked_db = connection.clone().lock_owned().await; - // Drop connections lock as soon as possible. - drop(connections); - // check and return found connection if already initialized. - if locked_db.is_some() { - return Ok(unwrap_db_instance(locked_db)); - }; - - // existing connection found but not initialized, hence, we initialize and return this connection. - let db = Db::init(DbIdentifier::new::(self.db_namespace, Some(db_id.clone()))).await?; - *locked_db = Some(db); - return Ok(unwrap_db_instance(locked_db)); + match connections.entry(db_id.clone()) { + Entry::Occupied(conn) => { + let mut locked_db = conn.get().clone().lock_owned().await; + drop(connections); + // check and return found connection if already initialized. + if locked_db.is_some() { + return Ok(unwrap_db_instance(locked_db)); + }; + // existing connection found but not initialized, hence, we initialize and return this connection. + let db = Db::init(DbIdentifier::new::(self.db_namespace, Some(db_id.clone()))).await?; + *locked_db = Some(db); + Ok(unwrap_db_instance(locked_db)) + }, + Entry::Vacant(entry) => { + // No connection found so we create a new connection with immediate initialization + let db = { + let db = Db::init(DbIdentifier::new::(self.db_namespace, Some(db_id.clone()))).await?; + let db = Arc::new(AsyncMutex::new(Some(db))); + entry.insert(db.clone()); + drop(connections); + + db + }; + + let locked_db = db.lock_owned().await; + Ok(unwrap_db_instance(locked_db)) + }, } - - // No connection found so we create a new connection with immediate initialization - let db = Db::init(DbIdentifier::new::(self.db_namespace, Some(db_id.clone()))).await?; - let db = Arc::new(AsyncMutex::new(Some(db))); - connections.insert(db_id, db.clone()); - drop(connections); - - let locked_db = db.lock_owned().await; - Ok(unwrap_db_instance(locked_db)) } } diff --git a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs index 3be805723b..82036b71e0 100644 --- a/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs +++ b/mm2src/mm2_gui_storage/src/account/storage/wasm_storage.rs @@ -69,7 +69,7 @@ impl WasmAccountStorage { async fn lock_db_mutex(&self) -> AccountStorageResult { self.account_db - .get_or_initialize_shared(None) + .get_or_initialize_shared() .await .mm_err(AccountStorageError::from) } From 06ce83bc8199285680952d3bf89a3d066a72086b Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 6 Aug 2024 13:50:52 +0100 Subject: [PATCH 182/186] save dev state --- mm2src/coins/eth/v2_activation.rs | 28 ++--- mm2src/coins/tendermint/tendermint_coin.rs | 2 +- mm2src/kdf_wc_client/Cargo.toml | 46 ------- mm2src/mm2_core/src/mm_ctx.rs | 11 +- mm2src/mm2_core/src/sql_connection_pool.rs | 22 ++-- mm2src/mm2_db/src/indexed_db/db_lock.rs | 6 +- mm2src/mm2_main/src/lp_native_dex.rs | 112 ++++++++++-------- .../src/rpc/lp_commands/lp_commands.rs | 2 +- 8 files changed, 88 insertions(+), 141 deletions(-) delete mode 100644 mm2src/kdf_wc_client/Cargo.toml diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 762cb12eba..78e9eba3f9 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -790,15 +790,9 @@ pub(crate) async fn build_address_and_priv_key_policy( #[cfg(not(target_arch = "wasm32"))] { let pubkey = { - let p_bytes = activated_key.public().as_bytes(); - if p_bytes.len() == 65 { - // Skip the first byte of the uncompressed public key before converting to the eth address. - let pubkey = Public::from_slice(&p_bytes[1..]); - display_eth_address(&public_to_address(&pubkey)) - } else { - let pubkey = Public::from_slice(p_bytes); - display_eth_address(&public_to_address(&pubkey)) - } + let pubkey = Public::from_slice(activated_key.public().as_bytes()); + let addr = display_eth_address(&public_to_address(&pubkey)); + addr.trim_start_matches("0x").to_string() }; run_db_migration_for_new_pubkey(ctx, pubkey) @@ -1064,23 +1058,15 @@ fn compress_public_key(uncompressed: H520) -> MmResult Option { // Use the hd_wallet_rmd160 as the db_id in HD mode only since it's unique to a device and not tied to a single address - coin.derivation_method() - .hd_wallet() - .map(|_| ctx.default_shared_db_id().to_string()) + coin.derivation_method().hd_wallet().map(|_| ctx.default_shared_db_id()) } pub(super) async fn eth_account_db_id(coin: &EthCoin) -> Option { match coin.derivation_method() { DerivationMethod::HDWallet(hd_wallet) => hd_wallet.get_enabled_address().await.map(|addr| { - let p_key = addr.pubkey(); - if p_key.as_bytes().len() == 65 { - // Skip the first byte of the uncompressed public key before converting to the eth address. - let pubkey = Public::from_slice(&p_key.as_bytes()[1..]); - display_eth_address(&public_to_address(&pubkey)) - } else { - let pubkey = Public::from_slice(p_key.as_bytes()); - display_eth_address(&public_to_address(&pubkey)) - } + let pubkey = Public::from_slice(addr.pubkey().as_bytes()); + let addr = display_eth_address(&public_to_address(&pubkey)); + addr.trim_start_matches("0x").to_string() }), _ => None, } diff --git a/mm2src/coins/tendermint/tendermint_coin.rs b/mm2src/coins/tendermint/tendermint_coin.rs index c9179cc734..15307f488c 100644 --- a/mm2src/coins/tendermint/tendermint_coin.rs +++ b/mm2src/coins/tendermint/tendermint_coin.rs @@ -2452,7 +2452,7 @@ impl MmCoin for TendermintCoin { async fn shared_db_id(&self, ctx: &MmArc) -> Option { if let TendermintActivationPolicy::PrivateKey(PrivKeyPolicy::HDWallet { .. }) = self.activation_policy { - return Some(ctx.default_shared_db_id().to_string()); + return Some(ctx.default_shared_db_id()); }; // Fallback to the account db_id for non-HD wallets diff --git a/mm2src/kdf_wc_client/Cargo.toml b/mm2src/kdf_wc_client/Cargo.toml deleted file mode 100644 index fe209dbda8..0000000000 --- a/mm2src/kdf_wc_client/Cargo.toml +++ /dev/null @@ -1,46 +0,0 @@ -[package] -name = "kdf_wc_client" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -cfg-if = "1.0" -common = { path = "../common" } -data-encoding = "2.6.0" -derive_more = "0.99" -enum_derives = { path = "../derives/enum_derives" } -futures = { version = "0.3", package = "futures", features = [ - "compat", - "async-await", -] } -futures-util = { version = "0.3", default-features = false, features = [ - "sink", - "std", -] } -http = "1.0.0" -mm2_core = { path = "../mm2_core" } -url = "2.3" -relay_rpc = { git = "https://github.com/borngraced/WalletConnectRust.git" } -rand = { version = "0.8.5", features = ["std", "small_rng"] } -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -serde_qs = "0.10" -chrono = { version = "0.4", default-features = false, features = [ - "alloc", - "std", - "wasmbind", -] } -tokio = { version = "1.22", features = ["sync", "macros"] } -tokio-tungstenite-wasm = { git = "https://github.com/KomodoPlatform/tokio-tungstenite-wasm.git", rev = "8fc7e2f", features = [ - "rustls-tls-native-roots", -] } -pin-project = "1.1.2" - -[target.'cfg(target_arch = "wasm32")'.dependencies] -js-sys = "0.3.27" -wasm-bindgen = "0.2.86" -wasm-bindgen-test = { version = "0.3.2" } -wasm-bindgen-futures = "0.4.21" -web-sys = { version = "0.3.55", features = ["WebSocket"] } diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index ecc79f7d66..60d1b07835 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -205,13 +205,15 @@ impl MmCtx { pub fn db_id_or_default(&self, db_id: Option<&str>) -> String { db_id.unwrap_or(&self.default_db_id()).to_owned() } - pub fn default_shared_db_id(&self) -> &H160 { + pub fn shared_db_id(&self) -> &H160 { lazy_static! { static ref DEFAULT: H160 = [0; 20].into(); } self.shared_db_id.or(&|| &*DEFAULT) } + pub fn default_shared_db_id(&self) -> String { self.shared_db_id().to_string() } + #[cfg(not(target_arch = "wasm32"))] pub fn rpc_ip_port(&self) -> Result { let port = match self.conf.get("rpcport") { @@ -319,11 +321,8 @@ impl MmCtx { /// /// No checks in this method, the paths should be checked in the `fn fix_directories` instead. #[cfg(not(target_arch = "wasm32"))] - pub fn shared_dbdir(&self, db_id: Option<&str>) -> PathBuf { - let db_id = db_id - .map(|d| d.to_owned()) - .unwrap_or_else(|| hex::encode(self.default_shared_db_id().as_slice())); - path_to_dbdir(self.conf["dbdir"].as_str(), &db_id) + pub fn shared_dbdir(&self) -> PathBuf { + path_to_dbdir(self.conf["dbdir"].as_str(), &self.shared_db_id().to_string()) } pub fn is_watcher(&self) -> bool { self.conf["is_watcher"].as_bool().unwrap_or_default() } diff --git a/mm2src/mm2_core/src/sql_connection_pool.rs b/mm2src/mm2_core/src/sql_connection_pool.rs index 98e780ed5a..bf67b22d7a 100644 --- a/mm2src/mm2_core/src/sql_connection_pool.rs +++ b/mm2src/mm2_core/src/sql_connection_pool.rs @@ -47,7 +47,7 @@ impl SqliteConnPool { fn init_impl(ctx: &MmArc, db_id: Option<&str>, kind: DbIdConnKind) -> Result<(), String> { let db_id = Self::db_id_from_ctx(ctx, db_id, &kind); let sqlite_file_path = match kind { - DbIdConnKind::Shared => ctx.shared_dbdir(Some(&db_id)).join(SQLITE_SHARED_DB_ID), + DbIdConnKind::Shared => ctx.shared_dbdir().join(SQLITE_SHARED_DB_ID), DbIdConnKind::Single => ctx.dbdir(Some(&db_id)).join(SYNC_SQLITE_DB_ID), }; @@ -73,8 +73,8 @@ impl SqliteConnPool { let db_root = ctx.conf["dbdir"].as_str(); try_s!(ctx.sqlite_conn_pool.pin(Self { connections, - default_db_id: ctx.rmd160.to_string(), - shared_db_id: ctx.default_shared_db_id().to_string(), + default_db_id: ctx.default_db_id(), + shared_db_id: ctx.default_shared_db_id(), db_root: db_root.map(|d| d.to_owned()) })); @@ -103,8 +103,8 @@ impl SqliteConnPool { let db_root = ctx.conf["dbdir"].as_str(); try_s!(ctx.sqlite_conn_pool.pin(Self { connections, - default_db_id: ctx.rmd160.to_string(), - shared_db_id: ctx.default_shared_db_id().to_string(), + default_db_id: ctx.default_db_id(), + shared_db_id: ctx.default_shared_db_id(), db_root: db_root.map(|d| d.to_owned()) })); @@ -176,8 +176,8 @@ impl SqliteConnPool { match kind { DbIdConnKind::Shared => db_id .map(|e| e.to_owned()) - .unwrap_or_else(|| ctx.default_shared_db_id().to_string()), - DbIdConnKind::Single => db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160.to_string()), + .unwrap_or_else(|| ctx.default_shared_db_id()), + DbIdConnKind::Single => db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.default_db_id()), } } fn sqlite_file_path(&self, db_id: &str, kind: &DbIdConnKind) -> PathBuf { @@ -199,7 +199,7 @@ pub struct AsyncSqliteConnPool { impl AsyncSqliteConnPool { /// Initialize a database connection. pub async fn init(ctx: &MmArc, db_id: Option<&str>) -> Result<(), String> { - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160.to_string()); + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.default_db_id()); if let Some(pool) = ctx.async_sqlite_conn_pool.as_option() { { @@ -222,7 +222,7 @@ impl AsyncSqliteConnPool { try_s!(ctx.async_sqlite_conn_pool.pin(Self { connections, sqlite_file_path, - default_db_id: ctx.rmd160.to_string(), + default_db_id: ctx.default_db_id(), })); Ok(()) @@ -230,7 +230,7 @@ impl AsyncSqliteConnPool { /// Initialize a database connection. pub async fn init_test(ctx: &MmArc, db_id: Option<&str>) -> Result<(), String> { - let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.rmd160.to_string()); + let db_id = db_id.map(|e| e.to_owned()).unwrap_or_else(|| ctx.default_db_id()); if let Some(pool) = ctx.async_sqlite_conn_pool.as_option() { { @@ -255,7 +255,7 @@ impl AsyncSqliteConnPool { try_s!(ctx.async_sqlite_conn_pool.pin(Self { connections, sqlite_file_path: PathBuf::new(), - default_db_id: ctx.rmd160.to_string(), + default_db_id: ctx.default_db_id(), })); Ok(()) } diff --git a/mm2src/mm2_db/src/indexed_db/db_lock.rs b/mm2src/mm2_db/src/indexed_db/db_lock.rs index 3bfb4a973f..a99886bc7e 100644 --- a/mm2src/mm2_db/src/indexed_db/db_lock.rs +++ b/mm2src/mm2_db/src/indexed_db/db_lock.rs @@ -33,7 +33,7 @@ impl ConstructibleDb { pub fn new(ctx: &MmArc, db_id: Option<&str>) -> Self { let default_db_id = ctx.rmd160().to_string(); let db_id = db_id.unwrap_or(&default_db_id).to_string(); - let shared_db_id = ctx.default_shared_db_id().to_string(); + let shared_db_id = ctx.default_shared_db_id(); let conns = HashMap::from([(db_id.to_owned(), Default::default())]); ConstructibleDb { @@ -49,7 +49,7 @@ impl ConstructibleDb { /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new_shared_db(ctx: &MmArc) -> Self { let db_id = ctx.default_db_id(); - let shared_db_id = ctx.default_shared_db_id().to_string(); + let shared_db_id = ctx.default_shared_db_id(); let conns = HashMap::from([(shared_db_id.clone(), Default::default())]); ConstructibleDb { @@ -64,7 +64,7 @@ impl ConstructibleDb { /// This can be initialized later using [`ConstructibleDb::get_or_initialize`]. pub fn new_global_db(ctx: &MmArc) -> Self { let db_id = ctx.default_db_id(); - let shared_db_id = ctx.default_shared_db_id().to_string(); + let shared_db_id = ctx.default_shared_db_id(); ConstructibleDb { locks: Arc::new(RwLock::new(HashMap::default())), db_namespace: ctx.db_namespace, diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index cd88566c8c..7c9ec99af9 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -333,8 +333,11 @@ fn default_seednodes(netid: u16) -> Vec { } #[cfg(not(target_arch = "wasm32"))] -pub fn fix_directories(ctx: &MmCtx, db_id: Option<&str>, shared_db_id: Option<&str>) -> MmInitResult<()> { - fix_shared_dbdir(ctx, shared_db_id)?; +pub fn fix_directories(ctx: &MmCtx, db_id: Option<&str>, fix_shared: bool) -> MmInitResult<()> { + if fix_shared { + fix_shared_dbdir(ctx)?; + }; + let dbdir = ctx.dbdir(db_id); fs::create_dir_all(&dbdir).map_to_mm(|e| MmInitError::ErrorCreatingDbDir { path: dbdir.clone(), @@ -393,8 +396,8 @@ pub fn fix_directories(ctx: &MmCtx, db_id: Option<&str>, shared_db_id: Option<&s } #[cfg(not(target_arch = "wasm32"))] -fn fix_shared_dbdir(ctx: &MmCtx, db_id: Option<&str>) -> MmInitResult<()> { - let shared_db = ctx.shared_dbdir(db_id); +fn fix_shared_dbdir(ctx: &MmCtx) -> MmInitResult<()> { + let shared_db = ctx.shared_dbdir(); fs::create_dir_all(&shared_db).map_to_mm(|e| MmInitError::ErrorCreatingDbDir { path: shared_db.clone(), error: e.to_string(), @@ -468,8 +471,13 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { continue; } + // fix directories for new db_id. + if let Err(err) = fix_directories(&ctx, Some(&db_id), false) { + error!("{err:?}"); + continue; + }; // run db migration for new db_id. - if let Err(err) = run_db_migration_impl(&ctx, Some(&db_id), false).await { + if let Err(err) = run_db_migration_impl(&ctx, Some(&db_id)).await { error!("{err:?}"); continue; }; @@ -486,64 +494,18 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) { } #[cfg(not(target_arch = "wasm32"))] -async fn run_db_migration_impl(ctx: &MmArc, db_id: Option<&str>, init_shared: bool) -> MmInitResult<()> { - fix_directories(ctx, db_id, None)?; - +async fn run_db_migration_impl(ctx: &MmArc, db_id: Option<&str>) -> MmInitResult<()> { AsyncSqliteConnPool::init(ctx, db_id) .await .map_to_mm(MmInitError::ErrorSqliteInitializing)?; SqliteConnPool::init(ctx, db_id).map_to_mm(MmInitError::ErrorSqliteInitializing)?; - // init shared_db once. - if init_shared { - SqliteConnPool::init_shared(ctx).map_to_mm(MmInitError::ErrorSqliteInitializing)?; - } - init_and_migrate_sql_db(ctx, db_id).await?; migrate_db(ctx, db_id)?; Ok(()) } -pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { - init_ordermatch_context(&ctx)?; - init_p2p(ctx.clone()).await?; - - if !CryptoCtx::is_init(&ctx)? { - return Ok(()); - } - - #[cfg(not(target_arch = "wasm32"))] - { - run_db_migration_impl(&ctx, None, true).await?; - ctx.spawner().spawn(init_db_migration_watcher_loop(ctx.clone())); - } - - init_message_service(&ctx).await?; - - let balance_update_ordermatch_handler = BalanceUpdateOrdermatchHandler::new(ctx.clone()); - register_balance_update_handler(ctx.clone(), Box::new(balance_update_ordermatch_handler)).await; - - ctx.initialized.pin(true).map_to_mm(MmInitError::Internal)?; - - // launch kickstart threads before RPC is available, this will prevent the API user to place - // an order and start new swap that might get started 2 times because of kick-start - kick_start(ctx.clone(), None).await?; - - init_event_streaming(&ctx).await?; - - ctx.spawner().spawn(lp_ordermatch_loop(ctx.clone())); - - ctx.spawner().spawn(broadcast_maker_orders_keep_alive_loop(ctx.clone())); - - #[cfg(target_arch = "wasm32")] - init_wasm_event_streaming(&ctx); - - ctx.spawner().spawn(clean_memory_loop(ctx.weak())); - - Ok(()) -} - pub async fn lp_init(ctx: MmArc, version: String, datetime: String) -> MmInitResult<()> { info!("Version: {} DT {}", version, datetime); #[cfg(not(target_arch = "wasm32"))] @@ -589,6 +551,52 @@ pub async fn lp_init(ctx: MmArc, version: String, datetime: String) -> MmInitRes Ok(()) } +pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> { + init_ordermatch_context(&ctx)?; + init_p2p(ctx.clone()).await?; + + if !CryptoCtx::is_init(&ctx)? { + return Ok(()); + } + + #[cfg(not(target_arch = "wasm32"))] + { + // fix directory for shared and global db. + fix_directories(&ctx, None, true)?; + // init shared_db once. + SqliteConnPool::init_shared(&ctx).map_to_mm(MmInitError::ErrorSqliteInitializing)?; + run_db_migration_impl(&ctx, None).await?; + ctx.spawner().spawn(init_db_migration_watcher_loop(ctx.clone())); + } + + init_message_service(&ctx).await?; + + let balance_update_ordermatch_handler = BalanceUpdateOrdermatchHandler::new(ctx.clone()); + register_balance_update_handler(ctx.clone(), Box::new(balance_update_ordermatch_handler)).await; + + ctx.initialized.pin(true).map_to_mm(MmInitError::Internal)?; + + // launch kickstart threads before RPC is available, this will prevent the API user to place + // an order and start new swap that might get started 2 times because of kick-start + kick_start(ctx.clone(), None).await?; + + init_event_streaming(&ctx).await?; + + ctx.spawner().spawn(lp_ordermatch_loop(ctx.clone())); + + ctx.spawner().spawn(broadcast_maker_orders_keep_alive_loop(ctx.clone())); + + #[cfg(target_arch = "wasm32")] + init_wasm_event_streaming(&ctx); + + ctx.spawner().spawn(clean_memory_loop(ctx.weak())); + + Ok(()) +} + +// kick_start calls swap_kick_starts to get list of coins needed for swap kick start, +// additionally calls orders_kick_start to get list of coins needed for orders and +// then extend Mmctx::coins_needed_for_kick_start list async fn kick_start(ctx: MmArc, db_id: Option<&str>) -> MmInitResult<()> { let coins_needed_for_kick_start = { let mut coins_needed_for_kick_start = swap_kick_starts(ctx.clone(), db_id) diff --git a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs index 333526fcc7..6d632abdee 100644 --- a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs +++ b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands.rs @@ -57,7 +57,7 @@ pub struct GetSharedDbIdResponse { } pub async fn get_shared_db_id(ctx: MmArc, _req: Json) -> GetSharedDbIdResult { - let shared_db_id = ctx.default_shared_db_id().to_owned().into(); + let shared_db_id = ctx.shared_db_id().to_owned().into(); Ok(GetSharedDbIdResponse { shared_db_id }) } From 5cbfd89a7af9f8513d1bb24689e8ea5c1bdbee20 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Tue, 6 Aug 2024 15:04:12 +0100 Subject: [PATCH 183/186] fix clippy --- mm2src/mm2_main/src/lp_swap.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index b951871a64..1c52f5fef4 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -2397,7 +2397,7 @@ mod lp_swap_tests { .unwrap() .mm2_internal_key_pair(); - fix_directories(&maker_ctx, None, None).unwrap(); + fix_directories(&maker_ctx, None, true).unwrap(); block_on(init_p2p(maker_ctx.clone())).unwrap(); SqliteConnPool::init_test(&maker_ctx).unwrap(); @@ -2435,7 +2435,7 @@ mod lp_swap_tests { .unwrap() .mm2_internal_key_pair(); - fix_directories(&taker_ctx, None, None).unwrap(); + fix_directories(&taker_ctx, None, true).unwrap(); block_on(init_p2p(taker_ctx.clone())).unwrap(); SqliteConnPool::init_test(&taker_ctx).unwrap(); From cea464bdaed9bfb39e2025a2ad18f48d473a0013 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 7 Aug 2024 02:41:54 +0100 Subject: [PATCH 184/186] fix orders_kick_start --- mm2src/mm2_main/src/lp_ordermatch.rs | 233 +++++++++++++++++---------- 1 file changed, 147 insertions(+), 86 deletions(-) diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index d0a0e86495..bfa09c06c7 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -127,6 +127,9 @@ const TAKER_ORDER_TIMEOUT: u64 = 30; const ORDER_MATCH_TIMEOUT: u64 = 30; const ORDERBOOK_REQUESTING_TIMEOUT: u64 = MIN_ORDER_KEEP_ALIVE_INTERVAL * 2; const MAX_ORDERS_NUMBER_IN_ORDERBOOK_RESPONSE: usize = 1000; +/// How many times we'll try to check if the other coin is ready before giving up. +/// We don't want to wait forever, so we'll give it 5 shots. +const VALIDATE_OTHER_COIN_TIMEOUT: u64 = 5; #[cfg(not(test))] const TRIE_STATE_HISTORY_TIMEOUT: u64 = 14400; #[cfg(test)] @@ -1685,12 +1688,35 @@ impl TakerOrder { fn p2p_keypair(&self) -> Option<&KeyPair> { self.p2p_privkey.as_ref().map(|key| key.key_pair()) } + /// Gets the account ID for the coin we're trading. + /// If we're buying, it's the base coin; if we're selling, it's the rel coin. pub fn account_id(&self) -> &Option { + match self.request.action { + TakerAction::Buy => &self.base_coin_account_id, + TakerAction::Sell => &self.rel_coin_account_id, + } + } + + /// Gets the account ID for the 'other' coin in the trade. + /// If we're buying, it's the rel coin; if we're selling, it's the base coin. + pub fn other_coin_account_id(&self) -> &Option { match self.request.action { TakerAction::Buy => &self.rel_coin_account_id, TakerAction::Sell => &self.base_coin_account_id, } } + + /// Makes sure the 'other' coin in our trade is ready to go. + /// It'll keep trying for a bit if it doesn't work right away. + pub async fn validate_other_coin_account_id(&self, ctx: &MmArc) -> Result<(), String> { + validate_other_coin_account_id_impl( + ctx, + self.taker_coin_ticker(), + self.other_coin_account_id(), + "TakerOrder", + ) + .await + } } #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] @@ -2115,7 +2141,15 @@ impl MakerOrder { fn p2p_keypair(&self) -> Option<&KeyPair> { self.p2p_privkey.as_ref().map(|key| key.key_pair()) } + /// Gets the account ID for the base coin. + /// For maker orders, this is always the coin we're offering to trade. pub fn account_id(&self) -> &Option { &self.base_coin_account_id } + + /// Checks if the other coin (the one we want in exchange) is good to go. + /// It'll give it a few tries if it doesn't work right away. + pub async fn validate_other_coin_account_id(&self, ctx: &MmArc) -> Result<(), String> { + validate_other_coin_account_id_impl(ctx, &self.rel, &self.rel_coin_account_id, "MakerOrder").await + } } impl From for MakerOrder { @@ -2163,8 +2197,8 @@ impl From for MakerOrder { base_orderbook_ticker: taker_order.rel_orderbook_ticker, rel_orderbook_ticker: taker_order.base_orderbook_ticker, p2p_privkey: taker_order.p2p_privkey, - base_coin_account_id: taker_order.base_coin_account_id, - rel_coin_account_id: taker_order.rel_coin_account_id, + base_coin_account_id: taker_order.rel_coin_account_id, + rel_coin_account_id: taker_order.base_coin_account_id, } }, } @@ -3413,27 +3447,7 @@ async fn handle_timed_out_taker_orders(ctx: MmArc, ordermatch_ctx: &OrdermatchCo } // transform the timed out taker order to maker - - delete_my_taker_order(ctx.clone(), order.clone(), TakerOrderCancellationReason::ToMaker) - .compat() - .await - .ok(); - let maker_order: MakerOrder = order.into(); - ordermatch_ctx - .maker_orders_ctx - .lock() - .add_order(ctx.weak(), maker_order.clone(), None); - - storage - .save_new_active_maker_order(&maker_order) - .await - .error_log_with_msg("!save_new_active_maker_order"); - if maker_order.save_in_history { - storage - .update_was_taker_in_filtering_history(uuid, maker_order.account_id().as_deref()) - .await - .error_log_with_msg("!update_was_taker_in_filtering_history"); - } + let maker_order = handle_transform_my_taker_order(&ctx, uuid, order, ordermatch_ctx, &storage).await; // notify other peers if let Ok(Some((base, rel))) = find_pair(&ctx, &maker_order.base, &maker_order.rel).await { @@ -3449,6 +3463,39 @@ async fn handle_timed_out_taker_orders(ctx: MmArc, ordermatch_ctx: &OrdermatchCo *my_taker_orders = my_actual_taker_orders; } +/// Transforms a taker order into a maker order, +/// updating relevant contexts and storage. +async fn handle_transform_my_taker_order( + ctx: &MmArc, + uuid: Uuid, + order: TakerOrder, + ordermatch_ctx: &OrdermatchContext, + storage: &MyOrdersStorage, +) -> MakerOrder { + delete_my_taker_order(ctx.clone(), order.clone(), TakerOrderCancellationReason::ToMaker) + .compat() + .await + .ok(); + let maker_order: MakerOrder = order.into(); + ordermatch_ctx + .maker_orders_ctx + .lock() + .add_order(ctx.weak(), maker_order.clone(), None); + + storage + .save_new_active_maker_order(&maker_order) + .await + .error_log_with_msg("!save_new_active_maker_order"); + if maker_order.save_in_history { + storage + .update_was_taker_in_filtering_history(uuid, maker_order.account_id().as_deref()) + .await + .error_log_with_msg("!update_was_taker_in_filtering_history"); + }; + + maker_order +} + /// # Safety /// /// The function locks the [`OrdermatchContext::my_maker_orders`] mutex. @@ -3457,6 +3504,26 @@ async fn check_balance_for_maker_orders(ctx: MmArc, ordermatch_ctx: &OrdermatchC for (uuid, order) in my_maker_orders { let order = order.lock().await; + // validate other coin's account id + if let Err(err) = order.validate_other_coin_account_id(&ctx).await { + warn!("{err:?} while validating other_coin account id for maker order: {uuid}"); + let removed_order_mutex = ordermatch_ctx.maker_orders_ctx.lock().remove_order(&uuid); + + // This checks that the order hasn't been removed by another process + if removed_order_mutex.is_some() { + maker_order_cancelled_p2p_notify(ctx.clone(), &order); + delete_my_maker_order( + ctx.clone(), + order.clone(), + MakerOrderCancellationReason::OtherCoinIdMisMatch, + ) + .compat() + .await + .ok(); + } + continue; + }; + if order.available_amount() >= order.min_base_vol || order.has_ongoing_matches() { continue; } @@ -4999,6 +5066,7 @@ pub async fn order_status(ctx: MmArc, req: Json) -> Result>, St pub enum MakerOrderCancellationReason { Fulfilled, InsufficientBalance, + OtherCoinIdMisMatch, Cancelled, } @@ -5447,77 +5515,36 @@ pub async fn orders_kick_start(ctx: &MmArc, db_id: Option<&str>) -> Result { - let rel_coin_db_id = rel_coin.inner.account_db_id().await.unwrap_or(ctx.default_db_id()); - if rel_coin_db_id == order_rel_coin_db_id { - coins.insert(order.base.clone()); - coins.insert(order.rel.clone()); - let mut maker_orders_ctx = ordermatch_ctx.maker_orders_ctx.lock(); - maker_orders_ctx.add_order(ctx.weak(), order.clone(), None); - - break; - } - - info!( - "Failed to add maker order {:?} to kick_start queue. Coin account id mismatch: got=([{rel_coin_db_id}]), expected=([{order_rel_coin_db_id}])", - order.uuid - ); - Timer::sleep(5.).await; - continue; - }, - None => { - info!( - "Failed to add maker order {:?} to kick_start queue, rel coin:{} with account_id:{order_rel_coin_db_id} needs to be activated!", - order.uuid, - order.rel - ); - Timer::sleep(5.).await; - continue; - }, - } - } + coins.insert(order.base.clone()); + coins.insert(order.rel.clone()); + maker_orders_ctx.add_order(ctx.weak(), order.clone(), None); } } + let mut taker_orders = ordermatch_ctx.my_taker_orders.lock().await; for order in saved_taker_orders { - let order_base_coin_account_id = order.base_coin_account_id.clone().unwrap_or(ctx.default_db_id()); - - loop { - match try_s!(lp_coinfind_any(ctx, &order.request.base).await) { - Some(base_coin) => { - let base_coin_account_id = base_coin.inner.account_db_id().await.unwrap_or(ctx.default_db_id()); - if base_coin_account_id == order_base_coin_account_id { - coins.insert(order.request.base.clone()); - coins.insert(order.request.rel.clone()); - let mut taker_orders = ordermatch_ctx.my_taker_orders.lock().await; - taker_orders.insert(order.request.uuid, order); - - break; - }; - - info!( - "Failed to add taker order {} to kick_start queue. {} Coin account id mismatch: got=([{base_coin_account_id}]), expected=([{order_base_coin_account_id}])", - order.request.uuid, - order.request.base - ); - }, - None => { - info!( - "Failed to add taker order {} to kick_start queue. Base coin:{} with account_id:{order_base_coin_account_id} needs to be activated!", - order.request.uuid, - order.request.base - ); + if let Err(err) = order.validate_other_coin_account_id(ctx).await { + warn!( + "{err:?} while validating other_coin account id for TakerOrder: {:?}", + order.request.uuid + ); - Timer::sleep(5.).await; - continue; - }, + if !order.matches.is_empty() || order.order_type != OrderType::GoodTillCancelled { + delete_my_taker_order(ctx.clone(), order, TakerOrderCancellationReason::TimedOut) + .compat() + .await + .ok(); + continue; } + + handle_transform_my_taker_order(ctx, order.request.uuid, order, &ordermatch_ctx, &storage).await; + continue; } + coins.insert(order.request.base.clone()); + coins.insert(order.request.rel.clone()); + taker_orders.insert(order.request.uuid, order); } Ok(coins) @@ -5982,3 +6009,37 @@ fn orderbook_address( CoinProtocol::SIA { .. } => MmError::err(OrderbookAddrErr::CoinIsNotSupported(coin.to_owned())), } } + +/// Tries to find a coin and make sure it's activated with the right account ID. +/// If it doesn't work at first, it'll keep trying for a bit. +async fn validate_other_coin_account_id_impl( + ctx: &MmArc, + other_coin_ticker: &str, + other_coin_account_id: &Option, + order_type: &str, +) -> Result<(), String> { + for attempt in 0..=VALIDATE_OTHER_COIN_TIMEOUT { + if let Some(coin) = try_s!(lp_coinfind_any(ctx, other_coin_ticker).await) { + if &coin.inner.account_db_id().await == other_coin_account_id { + info!("Correct {order_type} coin found: {}", coin.inner.ticker()); + return Ok(()); + } + } + + if attempt < VALIDATE_OTHER_COIN_TIMEOUT { + info!( + "{order_type}: validate_other_coin_account_id attempt {} failed: Coin {} not found or not activated with pubkey: {}", + attempt + 1, + other_coin_ticker, + other_coin_account_id.as_deref().unwrap_or(&ctx.default_db_id()) + ); + Timer::sleep(2.).await + } + } + + Err(format!( + "{order_type}: Coin {} not found or not activated with the expected account key after {} attempts", + other_coin_ticker, + VALIDATE_OTHER_COIN_TIMEOUT + 1 + )) +} From 07a478c1512caefd88692e66b5830c544d44a459 Mon Sep 17 00:00:00 2001 From: Samuel Onoja Date: Wed, 7 Aug 2024 02:59:54 +0100 Subject: [PATCH 185/186] fix minor review notes --- mm2src/mm2_main/src/lp_ordermatch.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index bfa09c06c7..0f171da6ef 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -1585,7 +1585,11 @@ pub struct TakerOrder { /// A custom priv key for more privacy to prevent linking orders of the same node between each other /// Commonly used with privacy coins (ARRR, ZCash, etc.) p2p_privkey: Option, + /// The pubkey rmd160 for the coin we're offering to trade. + /// It's optional as we'll fallback to default account pubkey. base_coin_account_id: Option, + /// The pubkey rmd160 for the coin we want in return. + /// Also optional, just like the base coin's rel_coin_account_id: Option, } @@ -1748,7 +1752,11 @@ pub struct MakerOrder { /// A custom priv key for more privacy to prevent linking orders of the same node between each other /// Commonly used with privacy coins (ARRR, ZCash, etc.) p2p_privkey: Option, + /// The pubkey rmd160 for the coin we're offering to trade. + /// It's optional as we'll fallback to default account pubkey. base_coin_account_id: Option, + /// The pubkey rmd160 for the coin we want in return. + /// Also optional, just like the base coin's rel_coin_account_id: Option, } @@ -2791,7 +2799,6 @@ struct OrdermatchContext { ordermatch_db: ConstructibleDb, } -#[allow(unused)] pub fn init_ordermatch_context(ctx: &MmArc) -> OrdermatchInitResult<()> { // Helper #[derive(Deserialize)] @@ -3033,7 +3040,7 @@ fn lp_connect_start_bob(ctx: MmArc, maker_match: MakerMatch, maker_order: MakerO }, }; - let account_db_id = maker_coin.account_db_id().await; + let account_db_id = maker_order.account_id(); if ctx.use_trading_proto_v2() { let secret_hash_algo = detect_secret_hash_algo(&maker_coin, &taker_coin); match (maker_coin, taker_coin) { @@ -3187,7 +3194,7 @@ fn lp_connected_alice(ctx: MmArc, taker_order: TakerOrder, taker_match: TakerMat ); let now = now_sec(); - let account_db_id = taker_coin.account_db_id().await; + let account_db_id = taker_order.account_id(); if ctx.use_trading_proto_v2() { let taker_secret = match generate_secret() { Ok(s) => s.into(), @@ -5048,7 +5055,6 @@ pub async fn order_status(ctx: MmArc, req: Json) -> Result>, St storage.load_order_from_history(req.uuid, db_id.as_deref()).await, &storage.select_order_status(req.uuid, db_id.as_deref()).await, ) { - info!("Order with UUID=({})", req.uuid); let res = json!(OrderForRpcWithCancellationReason { order: OrderForRpc::from(&order), cancellation_reason, From 07177cf1dc7c116e5a515bc0d56ff0007d3451a9 Mon Sep 17 00:00:00 2001 From: shamardy Date: Sat, 14 Sep 2024 03:54:06 +0300 Subject: [PATCH 186/186] post merge formating --- mm2src/coins/eth.rs | 137 +++++++++++++-------------- mm2src/coins/lp_coins.rs | 5 +- mm2src/mm2_main/src/lp_native_dex.rs | 20 ++-- mm2src/mm2_main/src/lp_stats.rs | 4 +- 4 files changed, 77 insertions(+), 89 deletions(-) diff --git a/mm2src/coins/eth.rs b/mm2src/coins/eth.rs index af3ea2d979..40071058a1 100644 --- a/mm2src/coins/eth.rs +++ b/mm2src/coins/eth.rs @@ -20,36 +20,17 @@ // // Copyright © 2023 Pampex LTD and TillyHK LTD. All rights reserved. // -use super::eth::Action::{Call, Create}; -use super::watcher_common::{validate_watcher_reward, REWARD_GAS_AMOUNT}; -use super::*; -use crate::coin_balance::{EnableCoinBalanceError, EnabledCoinBalanceParams, HDAccountBalance, HDAddressBalance, - HDWalletBalance, HDWalletBalanceOps}; -use crate::eth::eth_rpc::ETH_RPC_REQUEST_TIMEOUT; -use crate::eth::web3_transport::websocket_transport::{WebsocketTransport, WebsocketTransportNode}; -use crate::hd_wallet::{HDAccountOps, HDCoinAddress, HDCoinWithdrawOps, HDConfirmAddress, HDPathAccountToAddressId, - HDWalletCoinOps, HDXPubExtractor}; -use crate::lp_price::get_base_price_in_rel; -use crate::nft::nft_errors::ParseContractTypeError; -use crate::nft::nft_structs::{ContractType, ConvertChain, NftInfo, TransactionNftDetails, WithdrawErc1155, - WithdrawErc721}; -use crate::nft::WithdrawNftResult; -use crate::rpc_command::account_balance::{AccountBalanceParams, AccountBalanceRpcOps, HDAccountBalanceResponse}; -use crate::rpc_command::get_new_address::{GetNewAddressParams, GetNewAddressResponse, GetNewAddressRpcError, - GetNewAddressRpcOps}; -use crate::rpc_command::hd_account_balance_rpc_error::HDAccountBalanceRpcError; -use crate::rpc_command::init_account_balance::{InitAccountBalanceParams, InitAccountBalanceRpcOps}; -use crate::rpc_command::init_create_account::{CreateAccountRpcError, CreateAccountState, CreateNewAccountParams, - InitCreateAccountRpcOps}; -use crate::rpc_command::init_scan_for_new_addresses::{InitScanAddressesRpcOps, ScanAddressesParams, - ScanAddressesResponse}; -use crate::rpc_command::init_withdraw::{InitWithdrawCoin, WithdrawTaskHandleShared}; -use crate::rpc_command::{account_balance, get_new_address, init_account_balance, init_create_account, - init_scan_for_new_addresses}; -use crate::{coin_balance, scan_for_new_addresses_impl, BalanceResult, CoinWithDerivationMethod, DerivationMethod, - DexFee, Eip1559Ops, MakerNftSwapOpsV2, ParseCoinAssocTypes, ParseNftAssocTypes, PayForGasParams, - PrivKeyPolicy, RpcCommonOps, SendNftMakerPaymentArgs, SpendNftMakerPaymentArgs, ToBytes, - ValidateNftMakerPaymentArgs, ValidateWatcherSpendInput, WatcherSpendType}; + +// 1. Standard library imports +use std::collections::HashMap; +use std::convert::{TryFrom, TryInto}; +use std::ops::Deref; +use std::str::{from_utf8, FromStr}; +use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +// 2. External crates imports use async_trait::async_trait; use bitcrypto::{dhash160, keccak256, ripemd160, sha256}; use common::custom_futures::repeatable::{Ready, Retry, RetryOnError}; @@ -62,13 +43,17 @@ use common::{now_sec, small_rng, DEX_FEE_ADDR_RAW_PUBKEY}; use crypto::privkey::key_pair_from_secret; use crypto::{Bip44Chain, CryptoCtx, CryptoCtxError, GlobalHDAccountArc, KeyPairPolicy}; use derive_more::Display; +use eip1559_gas_fee::{BlocknativeGasApiCaller, FeePerGasSimpleEstimator, GasApiConfig, GasApiProvider, + InfuraGasApiCaller}; use enum_derives::EnumFromStringify; +use eth_hd_wallet::EthHDWallet; +use eth_rpc::ETH_RPC_REQUEST_TIMEOUT; +use eth_withdraw::{EthWithdraw, InitEthWithdraw, StandardEthWithdraw}; use ethabi::{Contract, Function, Token}; use ethcore_transaction::tx_builders::TxBuilderError; use ethcore_transaction::{Action, TransactionWrapper, TransactionWrapperBuilder as UnSignedEthTxBuilder, UnverifiedEip1559Transaction, UnverifiedEip2930Transaction, UnverifiedLegacyTransaction, UnverifiedTransactionWrapper}; -pub use ethcore_transaction::{SignedTransaction as SignedEthTx, TxType}; use ethereum_types::{Address, H160, H256, U256}; use ethkey::{public_to_address, sign, verify_address, KeyPair, Public, Signature}; use futures::compat::Future01CompatExt; @@ -80,7 +65,7 @@ use mm2_core::mm_ctx::{MmArc, MmWeak}; use mm2_event_stream::behaviour::{EventBehaviour, EventInitStatus}; use mm2_number::bigdecimal_custom::CheckedDivision; use mm2_number::{BigDecimal, BigUint, MmNumber}; -#[cfg(test)] use mocktopus::macros::*; +use nonce::ParityNonce; use rand::seq::SliceRandom; use rlp::{DecoderError, Encodable, RlpStream}; use rpc::v1::types::Bytes as BytesJson; @@ -88,17 +73,48 @@ use secp256k1::PublicKey; use serde_json::{self as json, Value as Json}; use serialization::{CompactInteger, Serializable, Stream}; use sha3::{Digest, Keccak256}; -use std::collections::HashMap; -use std::convert::{TryFrom, TryInto}; -use std::ops::Deref; -use std::str::from_utf8; -use std::str::FromStr; -use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; -use std::sync::{Arc, Mutex}; -use std::time::Duration; +use v2_activation::{build_address_and_priv_key_policy, eth_account_db_id, eth_shared_db_id, EthActivationV2Error}; use web3::types::{Action as TraceAction, BlockId, BlockNumber, Bytes, CallRequest, FilterBuilder, Log, Trace, TraceFilterBuilder, Transaction as Web3Transaction, TransactionId, U64}; use web3::{self, Web3}; +use web3_transport::http_transport::HttpTransportNode; +use web3_transport::websocket_transport::{WebsocketTransport, WebsocketTransportNode}; +use web3_transport::Web3Transport; + +// 3. Internal crate imports +use crate::coin_balance::{EnableCoinBalanceError, EnabledCoinBalanceParams, HDAccountBalance, HDAddressBalance, + HDWalletBalance, HDWalletBalanceOps}; +use crate::hd_wallet::{HDAccountOps, HDCoinAddress, HDCoinWithdrawOps, HDConfirmAddress, HDPathAccountToAddressId, + HDWalletCoinOps, HDXPubExtractor}; +use crate::lp_price::get_base_price_in_rel; +use crate::nft::nft_errors::ParseContractTypeError; +use crate::nft::nft_structs::{ContractType, ConvertChain, NftInfo, TransactionNftDetails, WithdrawErc1155, + WithdrawErc721}; +use crate::nft::WithdrawNftResult; +use crate::rpc_command::account_balance::{AccountBalanceParams, AccountBalanceRpcOps, HDAccountBalanceResponse}; +use crate::rpc_command::get_new_address::{GetNewAddressParams, GetNewAddressResponse, GetNewAddressRpcError, + GetNewAddressRpcOps}; +use crate::rpc_command::hd_account_balance_rpc_error::HDAccountBalanceRpcError; +use crate::rpc_command::init_account_balance::{InitAccountBalanceParams, InitAccountBalanceRpcOps}; +use crate::rpc_command::init_create_account::{CreateAccountRpcError, CreateAccountState, CreateNewAccountParams, + InitCreateAccountRpcOps}; +use crate::rpc_command::init_scan_for_new_addresses::{InitScanAddressesRpcOps, ScanAddressesParams, + ScanAddressesResponse}; +use crate::rpc_command::init_withdraw::{InitWithdrawCoin, WithdrawTaskHandleShared}; +use crate::rpc_command::{account_balance, get_new_address, init_account_balance, init_create_account, + init_scan_for_new_addresses}; +use crate::{coin_balance, scan_for_new_addresses_impl, BalanceResult, CoinWithDerivationMethod, DerivationMethod, + DexFee, Eip1559Ops, MakerNftSwapOpsV2, ParseCoinAssocTypes, ParseNftAssocTypes, PayForGasParams, + PrivKeyPolicy, RpcCommonOps, SendNftMakerPaymentArgs, SpendNftMakerPaymentArgs, ToBytes, + ValidateNftMakerPaymentArgs, ValidateWatcherSpendInput, WatcherSpendType}; + +// 4. Local module imports +use super::eth::Action::{Call, Create}; +use super::watcher_common::{validate_watcher_reward, REWARD_GAS_AMOUNT}; +use super::*; + +// 5. Conditionally compiled imports +#[cfg(test)] use mocktopus::macros::*; cfg_wasm32! { use common::{now_ms, wait_until_ms}; @@ -108,34 +124,22 @@ cfg_wasm32! { use web3::types::TransactionRequest; } -use super::{coin_conf, lp_coinfind_or_err, AsyncMutex, BalanceError, BalanceFut, CheckIfMyPaymentSentArgs, - CoinBalance, CoinFutSpawner, CoinProtocol, CoinTransportMetrics, CoinsContext, ConfirmPaymentInput, - EthValidateFeeArgs, FeeApproxStage, FoundSwapTxSpend, HistorySyncState, IguanaPrivKey, MakerSwapTakerCoin, - MarketCoinOps, MmCoin, MmCoinEnum, MyAddressError, MyWalletAddress, NegotiateSwapContractAddrErr, - NumConversError, NumConversResult, PaymentInstructionArgs, PaymentInstructions, PaymentInstructionsErr, - PrivKeyBuildPolicy, PrivKeyPolicyNotAllowed, RawTransactionError, RawTransactionFut, - RawTransactionRequest, RawTransactionRes, RawTransactionResult, RefundError, RefundPaymentArgs, - RefundResult, RewardTarget, RpcClientType, RpcTransportEventHandler, RpcTransportEventHandlerShared, - SearchForSwapTxSpendInput, SendMakerPaymentSpendPreimageInput, SendPaymentArgs, SignEthTransactionParams, - SignRawTransactionEnum, SignRawTransactionRequest, SignatureError, SignatureResult, SpendPaymentArgs, - SwapOps, SwapTxFeePolicy, TakerSwapMakerCoin, TradeFee, TradePreimageError, TradePreimageFut, - TradePreimageResult, TradePreimageValue, Transaction, TransactionDetails, TransactionEnum, TransactionErr, - TransactionFut, TransactionType, TxMarshalingErr, UnexpectedDerivationMethod, ValidateAddressResult, - ValidateFeeArgs, ValidateInstructionsErr, ValidateOtherPubKeyErr, ValidatePaymentError, - ValidatePaymentFut, ValidatePaymentInput, VerificationError, VerificationResult, WaitForHTLCTxSpendArgs, - WatcherOps, WatcherReward, WatcherRewardError, WatcherSearchForSwapTxSpendInput, - WatcherValidatePaymentInput, WatcherValidateTakerFeeInput, WithdrawError, WithdrawFee, WithdrawFut, - WithdrawRequest, WithdrawResult, EARLY_CONFIRMATION_ERR_LOG, INVALID_CONTRACT_ADDRESS_ERR_LOG, - INVALID_PAYMENT_STATE_ERR_LOG, INVALID_RECEIVER_ERR_LOG, INVALID_SENDER_ERR_LOG, INVALID_SWAP_ID_ERR_LOG}; -pub use rlp; cfg_native! { use std::path::PathBuf; } +// 6. Re-exports +pub use ethcore_transaction::{SignedTransaction as SignedEthTx, TxType}; +pub use rlp; + +// 7. Local modules declarations mod eip1559_gas_fee; +pub(crate) use eip1559_gas_fee::FeePerGasEstimated; + mod eth_balance_events; pub mod eth_hd_wallet; mod eth_rpc; +pub(crate) mod eth_swap_v2; #[cfg(test)] mod eth_tests; #[cfg(target_arch = "wasm32")] mod eth_wasm_tests; mod eth_withdraw; @@ -145,19 +149,6 @@ mod nonce; #[path = "eth/v2_activation.rs"] pub mod v2_activation; mod web3_transport; -use crate::eth::eth_hd_wallet::EthHDWallet; -use crate::eth::eth_withdraw::{EthWithdraw, InitEthWithdraw, StandardEthWithdraw}; -use crate::eth::nonce::ParityNonce; -use crate::eth::v2_activation::{build_address_and_priv_key_policy, eth_account_db_id, eth_shared_db_id, - EthActivationV2Error}; -use crate::eth::web3_transport::http_transport::HttpTransportNode; -use crate::eth::web3_transport::Web3Transport; -pub(crate) use eip1559_gas_fee::FeePerGasEstimated; -use eip1559_gas_fee::{BlocknativeGasApiCaller, FeePerGasSimpleEstimator, GasApiConfig, GasApiProvider, - InfuraGasApiCaller}; - -pub(crate) mod eth_swap_v2; - /// https://github.com/artemii235/etomic-swap/blob/master/contracts/EtomicSwap.sol /// Dev chain (195.201.137.5:8565) contract address: 0x83965C539899cC0F918552e5A26915de40ee8852 /// Ropsten: https://ropsten.etherscan.io/address/0x7bc1bbdd6a0a722fc9bffc49c921b685ecb84b94 diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 1c1de329e7..1e1681602a 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -275,12 +275,11 @@ pub use solana::spl::SplToken; pub use solana::{SolTransaction, SolanaActivationParams, SolanaCoin, SolanaFeeDetails}; pub use test_coin::TestCoin; -use crate::eth::eth_swap_v2::{PaymentStatusErr, PrepareTxDataError, ValidatePaymentV2Err}; use coin_balance::{AddressBalanceStatus, BalanceObjectOps, HDAddressBalance, HDWalletBalanceObject, HDWalletBalanceOps}; use coin_errors::{MyAddressError, ValidatePaymentError, ValidatePaymentFut, ValidatePaymentResult}; -use eth::GetValidEthWithdrawAddError; +use eth::eth_swap_v2::{PaymentStatusErr, PrepareTxDataError, ValidatePaymentV2Err}; use eth::{eth_coin_from_conf_and_request, get_eth_address, EthCoin, EthGasDetailsErr, EthTxFeeDetails, - GetEthAddressError, SignedEthTx}; + GetEthAddressError, GetValidEthWithdrawAddError, SignedEthTx}; use ethereum_types::U256; use hd_wallet::{AccountUpdatingError, AddressDerivingError, HDAccountOps, HDAddressId, HDAddressOps, HDCoinAddress, HDCoinHDAccount, HDExtractPubkeyError, HDPathAccountToAddressId, HDWalletAddress, HDWalletCoinOps, diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index a454454474..4f7a8c23ea 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -18,6 +18,14 @@ // marketmaker // +use crate::heartbeat_event::HeartbeatEvent; +use crate::lp_message_service::{init_message_service, InitMessageServiceError}; +use crate::lp_network::{lp_network_ports, p2p_event_process_loop, NetIdError}; +use crate::lp_ordermatch::{broadcast_maker_orders_keep_alive_loop, clean_memory_loop, init_ordermatch_context, + lp_ordermatch_loop, orders_kick_start, BalanceUpdateOrdermatchHandler, OrdermatchInitError}; +use crate::lp_swap::{running_swaps_num, swap_kick_starts}; +use crate::lp_wallet::{initialize_wallet_passphrase, WalletInitError}; +use crate::rpc::spawn_rpc; use bitcrypto::sha256; use coins::register_balance_update_handler; use common::executor::{SpawnFuture, Timer}; @@ -45,18 +53,8 @@ use std::str; use std::time::Duration; use std::{fs, usize}; -#[cfg(not(target_arch = "wasm32"))] -use crate::database::init_and_migrate_sql_db; -use crate::heartbeat_event::HeartbeatEvent; -use crate::lp_message_service::{init_message_service, InitMessageServiceError}; -use crate::lp_network::{lp_network_ports, p2p_event_process_loop, NetIdError}; -use crate::lp_ordermatch::{broadcast_maker_orders_keep_alive_loop, clean_memory_loop, init_ordermatch_context, - lp_ordermatch_loop, orders_kick_start, BalanceUpdateOrdermatchHandler, OrdermatchInitError}; -use crate::lp_swap::{running_swaps_num, swap_kick_starts}; -use crate::lp_wallet::{initialize_wallet_passphrase, WalletInitError}; -use crate::rpc::spawn_rpc; - cfg_native! { + use crate::database::init_and_migrate_sql_db; use db_common::sqlite::rusqlite::Error as SqlError; use mm2_core::sql_connection_pool::{AsyncSqliteConnPool, SqliteConnPool}; use mm2_io::fs::{ensure_dir_is_writable, ensure_file_is_writable}; diff --git a/mm2src/mm2_main/src/lp_stats.rs b/mm2src/mm2_main/src/lp_stats.rs index 2e10ce6136..1f311facb1 100644 --- a/mm2src/mm2_main/src/lp_stats.rs +++ b/mm2src/mm2_main/src/lp_stats.rs @@ -1,7 +1,7 @@ -use crate::lp_network::{add_reserved_peer_addresses, lp_network_ports, request_peers, NetIdError, P2PRequest, - ParseAddressError, PeerDecodedResponse}; /// The module is responsible for mm2 network stats collection /// +use crate::lp_network::{add_reserved_peer_addresses, lp_network_ports, request_peers, NetIdError, P2PRequest, + ParseAddressError, PeerDecodedResponse}; use coins::find_unique_account_ids_active; #[cfg(not(target_arch = "wasm32"))] use common::async_blocking; use common::executor::{SpawnFuture, Timer};