Skip to content

Commit

Permalink
fix review notes and remove left over
Browse files Browse the repository at this point in the history
  • Loading branch information
borngraced committed Jul 24, 2024
1 parent b690e93 commit 1675a53
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 23 deletions.
13 changes: 8 additions & 5 deletions mm2src/coins/eth/v2_activation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -742,7 +742,9 @@ pub(crate) async fn build_address_and_priv_key_policy(

#[cfg(not(target_arch = "wasm32"))]
{
let pubkey = dhash160(activated_key.public().as_bytes()).to_string();
// Skip the first byte of the uncompressed public key before converting to the eth address.
let pubkey = Public::from_slice(&activated_key.public().as_bytes()[1..]);
let pubkey = public_to_address(&pubkey).to_string();
run_db_migration_for_new_pubkey(ctx, pubkey)
.await
.map_to_mm(EthActivationV2Error::InternalError)?;
Expand Down Expand Up @@ -1013,10 +1015,11 @@ pub(super) async fn eth_shared_db_id(coin: &EthCoin, ctx: &MmArc) -> Option<Stri

pub(super) async fn eth_account_db_id(coin: &EthCoin) -> Option<String> {
match coin.derivation_method() {
DerivationMethod::HDWallet(hd_wallet) => hd_wallet
.get_enabled_address()
.await
.map(|addr| dhash160(addr.pubkey().as_bytes()).to_string()),
DerivationMethod::HDWallet(hd_wallet) => hd_wallet.get_enabled_address().await.map(|addr| {
// Skip the first byte of the uncompressed public key before converting to the eth address.
let pubkey = Public::from_slice(&addr.pubkey().as_bytes()[1..]);
public_to_address(&pubkey).to_string()
}),
_ => None,
}
}
18 changes: 3 additions & 15 deletions mm2src/mm2_db/src/indexed_db/indexed_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,28 +111,20 @@ impl DbIdentifier {
}

pub fn display_db_id(&self) -> String { self.db_id.clone().unwrap_or_else(|| "KOMODEFI".to_string()) }

pub fn db_id(&self) -> String {
self.db_id
.clone()
.unwrap_or_else(|| hex::encode(H160::default().as_slice()))
}
}

pub struct IndexedDbBuilder {
pub db_name: String,
pub db_version: u32,
pub tables: HashMap<String, OnUpgradeNeededCb>,
pub db_id: String,
}

impl IndexedDbBuilder {
pub fn new(db_id: DbIdentifier) -> IndexedDbBuilder {
pub fn new(db_ident: DbIdentifier) -> IndexedDbBuilder {
IndexedDbBuilder {
db_name: db_id.to_string(),
db_name: db_ident.to_string(),
db_version: 1,
tables: HashMap::new(),
db_id: db_id.db_id(),
}
}

Expand All @@ -148,13 +140,12 @@ impl IndexedDbBuilder {
}

pub async fn build(self) -> InitDbResult<IndexedDb> {
let db_id = self.db_id.clone();
let (init_tx, init_rx) = oneshot::channel();
let (event_tx, event_rx) = mpsc::unbounded();

self.init_and_spawn(init_tx, event_rx);
init_rx.await.expect("The init channel must not be closed")?;
Ok(IndexedDb { event_tx, db_id })
Ok(IndexedDb { event_tx })
}

fn init_and_spawn(
Expand Down Expand Up @@ -190,7 +181,6 @@ impl IndexedDbBuilder {

pub struct IndexedDb {
event_tx: DbEventTx,
db_id: String,
}

async fn send_event_recv_response<Event, Item, Error>(
Expand Down Expand Up @@ -248,8 +238,6 @@ impl IndexedDb {
// ignore if the receiver is closed
result_tx.send(Ok(transaction_event_tx)).ok();
}

pub fn get_db_id(&self) -> String { self.db_id.to_string() }
}

pub struct DbTransaction<'transaction> {
Expand Down
15 changes: 12 additions & 3 deletions mm2src/mm2_main/src/lp_native_dex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -460,21 +460,21 @@ async fn init_db_migration_watcher_loop(ctx: MmArc) {
let mut migrations = HashSet::new();
let mut receiver = ctx
.init_db_migration_watcher()
.expect("db_m igration_watcher initialization failed");
.expect("db_migration_watcher initialization failed");

while let Some(db_id) = receiver.next().await {
if migrations.contains(&db_id) {
debug!("{} migrated, skipping migration..", db_id);
continue;
}

// run db migration for db_id if new activated pubkey is unique.
// run db migration for new db_id.
if let Err(err) = run_db_migration_impl(&ctx, Some(&db_id), None).await {
error!("{err:?}");
continue;
};

// insert new db_id to migration list
// insert new db_id to migrated list
migrations.insert(db_id.to_owned());

// Fetch and extend ctx.coins_needed_for_kick_start from new intialized db.
Expand All @@ -500,6 +500,15 @@ async fn run_db_migration_impl(ctx: &MmArc, db_id: Option<&str>, shared_db_id: O
}

pub async fn lp_init_continue(ctx: MmArc) -> MmInitResult<()> {
#[cfg(not(target_arch = "wasm32"))]
{
let dbdir = ctx.dbdir(None);
fs::create_dir_all(&dbdir).map_to_mm(|e| MmInitError::ErrorCreatingDbDir {
path: dbdir.clone(),
error: e.to_string(),
})?;
}

init_ordermatch_context(&ctx)?;
init_p2p(ctx.clone()).await?;

Expand Down
8 changes: 8 additions & 0 deletions mm2src/mm2_main/src/lp_ordermatch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5428,6 +5428,10 @@ pub async fn orders_kick_start(ctx: &MmArc, db_id: Option<&str>) -> Result<HashS
{
let mut maker_orders_ctx = ordermatch_ctx.maker_orders_ctx.lock();
for order in saved_maker_orders {
// we need to only kickstart orders if the coin is activated with the same db_id as the order's db_id.
if order.db_id().as_deref() != db_id {
continue;
}
coins.insert(order.base.clone());
coins.insert(order.rel.clone());
maker_orders_ctx.add_order(ctx.weak(), order.clone(), None);
Expand All @@ -5436,6 +5440,10 @@ pub async fn orders_kick_start(ctx: &MmArc, db_id: Option<&str>) -> Result<HashS

let mut taker_orders = ordermatch_ctx.my_taker_orders.lock().await;
for order in saved_taker_orders {
// we need to only kickstart orders if the coin is activated with the same db_id as the order's db_id.
if order.db_id().as_deref() != db_id {
continue;
}
coins.insert(order.request.base.clone());
coins.insert(order.request.rel.clone());
taker_orders.insert(order.request.uuid, order);
Expand Down

0 comments on commit 1675a53

Please sign in to comment.