diff --git a/Cargo.lock b/Cargo.lock index ff1bc5dec2e6..c2bedbeb4a51 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1764,6 +1764,7 @@ dependencies = [ "hex", "log", "monero", + "rand 0.8.5", "serde", "signal-hook", "signal-hook-async-std", diff --git a/bin/darkfi-mmproxy/Cargo.toml b/bin/darkfi-mmproxy/Cargo.toml index 95e69b3d452a..e00740f297b7 100644 --- a/bin/darkfi-mmproxy/Cargo.toml +++ b/bin/darkfi-mmproxy/Cargo.toml @@ -14,6 +14,7 @@ darkfi-serial = {path = "../../src/serial", features = ["async"]} # Misc log = "0.4.20" +rand = "0.8.5" # Monero epee-encoding = {version = "0.5.0", features = ["derive"]} diff --git a/bin/darkfi-mmproxy/src/main.rs b/bin/darkfi-mmproxy/src/main.rs index 625846fba910..7337ff474e97 100644 --- a/bin/darkfi-mmproxy/src/main.rs +++ b/bin/darkfi-mmproxy/src/main.rs @@ -26,6 +26,7 @@ use darkfi::{ rpc::{ jsonrpc::{ErrorCode, JsonError, JsonRequest, JsonResult, JsonSubscriber}, server::{listen_and_serve, RequestHandler}, + util::JsonValue, }, system::{StoppableTask, StoppableTaskPtr}, Error, Result, @@ -94,6 +95,8 @@ struct Monerod { struct Worker { /// JSON-RPC notification subscriber, used to send job notifications job_sub: JsonSubscriber, + /// Current job ID for the worker + job_id: Uuid, /// Keepalive sender channel, pinged from stratum keepalived ka_send: channel::Sender<()>, /// Background keepalive task reference @@ -106,7 +109,24 @@ impl Worker { ka_send: channel::Sender<()>, ka_task: StoppableTaskPtr, ) -> Self { - Self { job_sub, ka_send, ka_task } + Self { job_sub, job_id: Uuid::new_v4(), ka_send, ka_task } + } + + async fn send_job(&mut self, blob: String, target: String) -> Result<()> { + // Update job id + self.job_id = Uuid::new_v4(); + + let params: JsonValue = HashMap::from([ + ("blob".to_string(), blob.into()), + ("job_id".to_string(), self.job_id.to_string().into()), + ("target".to_string(), target.into()), + ]) + .into(); + + info!("Sending mining job notification to worker"); + self.job_sub.notify(params).await; + + Ok(()) } } @@ -169,10 +189,11 @@ impl RequestHandler for MiningProxy { "on_getblockhash" => self.monero_on_get_block_hash(req.id, req.params).await, "get_block_template" => self.monero_get_block_template(req.id, req.params).await, "getblocktemplate" => self.monero_get_block_template(req.id, req.params).await, - - /* "submit_block" => self.monero_submit_block(req.id, req.params).await, + "submitblock" => self.monero_submit_block(req.id, req.params).await, "generateblocks" => self.monero_generateblocks(req.id, req.params).await, + + /* "get_last_block_header" => self.monero_get_last_block_header(req.id, req.params).await, "get_block_header_by_hash" => self.monero_get_block_header_by_hash(req.id, req.params).await, "get_block_header_by_height" => self.monero_get_block_header_by_height(req.id, req.params).await, diff --git a/bin/darkfi-mmproxy/src/monero.rs b/bin/darkfi-mmproxy/src/monero.rs index 991c0dd4de8a..0215efc5cb17 100644 --- a/bin/darkfi-mmproxy/src/monero.rs +++ b/bin/darkfi-mmproxy/src/monero.rs @@ -28,7 +28,7 @@ use darkfi::{ }, Error, Result, }; -use log::{debug, error}; +use log::{debug, error, info}; use monero::blockdata::transaction::{ExtraField, RawExtraField, SubField::MergeMining}; use super::MiningProxy; @@ -48,7 +48,7 @@ impl MiningProxy { { Ok(v) => v, Err(e) => { - error!(target: "rpc::monero::oneshot_request", "Error sending RPC request to monerod: {}", e); + error!(target: "rpc::monero::oneshot_request", "[RPC] Error sending RPC request to monerod: {}", e); return Err(Error::ParseFailed("Failed sending monerod RPC request")) } }; @@ -56,7 +56,7 @@ impl MiningProxy { let response_bytes = match response.body_bytes().await { Ok(v) => v, Err(e) => { - error!(target: "rpc::monero::get_block_count", "Error reading monerod RPC response: {}", e); + error!(target: "rpc::monero::get_block_count", "[RPC] Error reading monerod RPC response: {}", e); return Err(Error::ParseFailed("Failed reading monerod RPC reponse")) } }; @@ -64,7 +64,7 @@ impl MiningProxy { let response_string = match String::from_utf8(response_bytes) { Ok(v) => v, Err(e) => { - error!(target: "rpc::monero::get_block_count", "Error parsing monerod RPC response: {}", e); + error!(target: "rpc::monero::get_block_count", "[RPC] Error parsing monerod RPC response: {}", e); return Err(Error::ParseFailed("Failed parsing monerod RPC reponse")) } }; @@ -72,7 +72,7 @@ impl MiningProxy { let response_json: JsonValue = match response_string.parse() { Ok(v) => v, Err(e) => { - error!(target: "rpc::monero::get_block_count", "Error parsing monerod RPC response: {}", e); + error!(target: "rpc::monero::get_block_count", "[RPC] Error parsing monerod RPC response: {}", e); return Err(Error::ParseFailed("Failed parsing monerod RPC reponse")) } }; @@ -90,7 +90,7 @@ impl MiningProxy { let rep = match self.oneshot_request(req).await { Ok(v) => v, Err(e) => { - error!(target: "rpc::monero::get_block_count", "{}", e); + error!(target: "rpc::monero::get_block_count", "[RPC] {}", e); return JsonError::new(InternalError, Some(e.to_string()), id).into() } }; @@ -120,7 +120,7 @@ impl MiningProxy { let rep = match self.oneshot_request(req).await { Ok(v) => v, Err(e) => { - error!(target: "rpc::monero::get_block_count", "{}", e); + error!(target: "rpc::monero::get_block_count", "[RPC] {}", e); return JsonError::new(InternalError, Some(e.to_string()), id).into() } }; @@ -180,7 +180,7 @@ impl MiningProxy { let mut rep = match self.oneshot_request(req).await { Ok(v) => v, Err(e) => { - error!(target: "rpc::monero::get_block_template", "{}", e); + error!(target: "rpc::monero::get_block_template", "[RPC] {}", e); return JsonError::new(InternalError, Some(e.to_string()), id).into() } }; @@ -212,15 +212,69 @@ impl MiningProxy { JsonResponse::new(rep, id).into() } - /* + /// Submit a mined block to the network + /// pub async fn monero_submit_block(&self, id: u16, params: JsonValue) -> JsonResult { - todo!() + debug!(target: "rpc::monero", "submit_block()"); + + let Some(params_vec) = params.get::>() else { + return JsonError::new(InvalidParams, None, id).into() + }; + + if params_vec.is_empty() { + return JsonError::new(InvalidParams, None, id).into() + } + + // Deserialize the block blob(s) to make sure it's a valid block + for element in params_vec.iter() { + let Some(block_hex) = element.get::() else { + return JsonError::new(InvalidParams, None, id).into() + }; + + let Ok(block_bytes) = hex::decode(block_hex) else { + return JsonError::new(InvalidParams, None, id).into() + }; + + let Ok(block) = monero::consensus::deserialize::(&block_bytes) else { + return JsonError::new(InvalidParams, None, id).into() + }; + + info!("[RPC] Got submitted Monero block id {}", block.id()); + } + + // Now when all the blocks submitted are valid, we'll just forward them to + // monerod to submit onto the network. + let req = JsonRequest::new("submit_block", params); + let rep = match self.oneshot_request(req).await { + Ok(v) => v, + Err(e) => { + error!(target: "rpc::monero::submit_block", "[RPC] {}", e); + return JsonError::new(InternalError, Some(e.to_string()), id).into() + } + }; + + JsonResponse::new(rep, id).into() } + /// Generate a block and specify the address to receive the coinbase reward. + /// pub async fn monero_generateblocks(&self, id: u16, params: JsonValue) -> JsonResult { - todo!() + debug!(target: "rpc::monero", "generateblocks()"); + + // This request can just passthrough + let req = JsonRequest::new("generateblocks", params); + let rep = match self.oneshot_request(req).await { + Ok(v) => v, + Err(e) => { + error!(target: "rpc::monero::generateblocks", "[RPC] {}", e); + return JsonError::new(InternalError, Some(e.to_string()), id).into() + } + }; + + JsonResponse::new(rep, id).into() } + /* pub async fn monero_get_last_block_header(&self, id: u16, params: JsonValue) -> JsonResult { todo!() } diff --git a/bin/darkfi-mmproxy/src/stratum.rs b/bin/darkfi-mmproxy/src/stratum.rs index 5a7f4d967f3f..f78029e59504 100644 --- a/bin/darkfi-mmproxy/src/stratum.rs +++ b/bin/darkfi-mmproxy/src/stratum.rs @@ -27,6 +27,7 @@ use darkfi::{ Error, Result, }; use log::{debug, error, info, warn}; +use rand::{rngs::OsRng, Rng}; use smol::{channel, lock::RwLock}; use uuid::Uuid; @@ -159,7 +160,7 @@ impl MiningProxy { let ka_task = StoppableTask::new(); // Create worker - let worker = Worker::new(job_sub, ka_send, ka_task.clone()); + let worker = Worker::new(job_sub.clone(), ka_send, ka_task.clone()); // Insert into connections map self.workers.write().await.insert(uuid, worker); @@ -174,15 +175,34 @@ impl MiningProxy { info!("Added worker {} ({})", login, uuid); - // TODO: Send current job - JsonResponse::new( - JsonValue::Object(HashMap::from([( - "status".to_string(), - JsonValue::String("KEEPALIVED".to_string()), - )])), - id, - ) - .into() + // Get block template for mining + let gbt_params: JsonValue = HashMap::from([ + ("wallet_address".to_string(), self.monerod.wallet_address.clone().into()), + ("reserve_size".to_string(), (0_f64).into()), + ]) + .into(); + + let block_template = match self.monero_get_block_template(OsRng.gen(), gbt_params).await { + JsonResult::Response(resp) => { + resp.result.get::>().unwrap().clone() + } + _ => { + error!("[STRATUM] Failed getting block template from monerod"); + return JsonError::new(ErrorCode::InternalError, None, id).into() + } + }; + + // Send the job to the worker + let mut workers = self.workers.write().await; + let worker = workers.get_mut(&uuid).unwrap(); + let blob = block_template["blockhashing_blob"].get::().unwrap(); + let target = block_template["wide_difficulty"].get::().unwrap(); + if let Err(e) = worker.send_job(blob.clone(), target.clone()).await { + error!("[STRATUM] Failed sending job to {}: {}", uuid, e); + return JsonError::new(ErrorCode::InternalError, None, id).into() + } + + job_sub.into() } pub async fn stratum_submit(&self, id: u16, params: JsonValue) -> JsonResult { diff --git a/bin/dnet/model.py b/bin/dnet/model.py index 66bfc01de607..37276a2e29dc 100644 --- a/bin/dnet/model.py +++ b/bin/dnet/model.py @@ -53,11 +53,11 @@ def add_node(self, node): for i, id in enumerate(info["outbound_slots"]): if id == 0: - outbounds = self.nodes[name]['outbound'][f"{i}"] = "none" + outbounds = self.nodes[name]['outbound'][f"{i}"] = ["none", 0] continue assert id in channel_lookup url = channel_lookup[id]["url"] - outbounds = self.nodes[name]['outbound'][f"{i}"] = url + outbounds = self.nodes[name]['outbound'][f"{i}"] = [url, id] for channel in channels: if channel["session"] != "seed": @@ -122,26 +122,26 @@ def add_event(self, event): logging.debug(f"{current_time} inbound (disconnect): {addr}") case "outbound_slot_sleeping": slot = info["slot"] + event = self.nodes[name]['event'] + event[(f"{name}", f"{slot}")] = ["sleeping", 0] logging.debug(f"{current_time} slot {slot}: sleeping") - self.nodes[name]['event'][(f"{name}", f"{slot}")] = "sleeping" case "outbound_slot_connecting": slot = info["slot"] addr = info["addr"] event = self.nodes[name]['event'] - event[(f"{name}", f"{slot}")] = f"connecting: addr={addr}" + event[(f"{name}", f"{slot}")] = [f"connecting: addr={addr}", 0] logging.debug(f"{current_time} slot {slot}: connecting addr={addr}") case "outbound_slot_connected": slot = info["slot"] addr = info["addr"] - channel_id = info["channel_id"] - event = self.nodes[name]['event'] - event[(f"{name}", f"{slot}")] = f"connected: addr={addr}" + id = info["channel_id"] + self.nodes[name]['outbound'][f"{slot}"] = [addr, id] logging.debug(f"{current_time} slot {slot}: connected addr={addr}") case "outbound_slot_disconnected": slot = info["slot"] err = info["err"] event = self.nodes[name]['event'] - event[(f"{name}", f"{slot}")] = f"disconnected: {err}" + event[(f"{name}", f"{slot}")] = [f"disconnected: {err}", 0] logging.debug(f"{current_time} slot {slot}: disconnected err='{err}'") case "outbound_peer_discovery": attempt = info["attempt"] diff --git a/bin/dnet/rpc.py b/bin/dnet/rpc.py index 7c2cfc98bde8..7c6507c0744e 100644 --- a/bin/dnet/rpc.py +++ b/bin/dnet/rpc.py @@ -25,7 +25,7 @@ class JsonRpc: async def start(self, host, port): - logging.info(f"trying to connect to {host}:{port}") + #logging.info(f"trying to connect to {host}:{port}") reader, writer = await asyncio.open_connection(host, port) self.reader = reader self.writer = writer diff --git a/bin/dnet/view.py b/bin/dnet/view.py index 808a71a474e5..9b4b1ed79242 100644 --- a/bin/dnet/view.py +++ b/bin/dnet/view.py @@ -40,14 +40,15 @@ def update(self, txt): self._w = urwid.AttrWrap(self._w, None) self._w.focus_attr = 'line' - def is_empty(self): - self.is_empty == True - class Node(DnetWidget): - def set_txt(self): - txt = urwid.Text(f"{self.node_name}") - super().update(txt) + def set_txt(self, is_empty: bool): + if is_empty: + txt = urwid.Text(f"{self.node_name} (offline)") + super().update(txt) + else: + txt = urwid.Text(f"{self.node_name}") + super().update(txt) class Session(DnetWidget): @@ -59,10 +60,12 @@ def set_txt(self): class Slot(DnetWidget): def set_txt(self, i, addr): self.i = i - self.addr = addr if len(self.i) == 1: + self.addr = addr[0] + self.id = addr[1] txt = urwid.Text(f" {self.i}: {self.addr}") else: + self.addr = addr txt = urwid.Text(f" {self.addr}") super().update(txt) @@ -91,7 +94,7 @@ def __init__(self, model): #----------------------------------------------------------------- def draw_info(self, node_name, info): node = Node(node_name, "node") - node.set_txt() + node.set_txt(False) self.listwalker.contents.append(node) if 'outbound' in info and info['outbound']: @@ -133,9 +136,8 @@ def draw_info(self, node_name, info): self.listwalker.contents.append(slot) def draw_empty(self, node_name, info): - name = node_name + " (offline)" - node = Node(name, "node") - node.set_txt() + node = Node(node_name, "node") + node.set_txt(True) self.listwalker.contents.append(node) #----------------------------------------------------------------- @@ -192,55 +194,89 @@ async def update_view(self, evloop: asyncio.AbstractEventLoop, loop: urwid.MainLoop): live_nodes = [] dead_nodes = [] - live_inbound = [] - dead_inbound = [] + known_nodes = [] + known_inbound = [] + known_outbound = [] + refresh = False + while True: await asyncio.sleep(0.1) + + nodes = self.model.nodes.items() + listw = self.listwalker.contents evloop.call_soon(loop.draw_screen) - for index, item in enumerate(self.listwalker.contents): - live_nodes.append(item.node_name) - if item.session == "inbound-slot": - live_inbound.append(item.i) - - # Draw get_info(). Called only once. - for node_name, info in self.model.nodes.items(): - if node_name in live_nodes: - continue - self.draw_info(node_name, info) - - # TODO: when RPC can't connect, display the node as offline. - - # If a node goes offline, trigger a redraw. - for node_name, info in self.model.nodes.items(): - if not bool(info): - if node_name in dead_nodes: - continue - dead_nodes.append(node_name) - self.listwalker.contents.clear() - self.draw_empty(node_name, info) - for name, info in self.model.nodes.items(): - if name not in dead_nodes: - self.draw_info(name, info) - - # Only render info if the node is online. - self.fill_left_box() - self.fill_right_box() - - # If a new inbound comes online, trigger a redraw. - for key in info['inbound'].keys(): - if key not in live_inbound: - self.listwalker.contents.clear() - for name, info in self.model.nodes.items(): - self.draw_info(name, info) - - # If an inbound goes offline, trigger a redraw. - addr = info['inbound'].get(key) - if not bool(addr): - if key in dead_inbound: - continue - dead_inbound.append(key) - self.listwalker.contents.clear() - for name, info in self.model.nodes.items(): - if name not in dead_nodes: - self.draw_info(name, info) + for index, item in enumerate(listw): + # Keep track of known nodes. + if item.node_name not in known_nodes: + known_nodes.append(item.node_name) + # Keep track of known inbounds. + if (item.session == "inbound-slot" + and item.i not in known_inbound): + known_inbound.append(item.i) + # Keep track of known outbounds. + if (item.session == "outbound-slot" + and item.id not in known_outbound + and not item.id == 0): + known_outbound.append(item.id) + + for name, info in nodes: + # 1. Sort nodes into lists. + if bool(info) and name not in live_nodes: + live_nodes.append(name) + if not bool(info) and name not in dead_nodes: + dead_nodes.append(name) + if bool(info) and name in dead_nodes: + logging.debug("Refresh: dead node online.") + refresh = True + if not bool(info) and name in live_nodes: + logging.debug("Refresh: online node offline.") + refresh = True + + # 2. Display nodes according to list. + if name in live_nodes and name not in known_nodes: + self.draw_info(name, info) + if name in dead_nodes and name not in known_nodes: + self.draw_empty(name, info) + if refresh: + logging.debug("Refresh initiated.") + await asyncio.sleep(0.1) + known_outbound.clear() + known_inbound.clear() + known_nodes.clear() + live_nodes.clear() + dead_nodes.clear() + refresh = False + listw.clear() + logging.debug("Refresh complete.") + + # 3. Handle events on nodes we know. + if bool(info) and name in known_nodes: + self.fill_left_box() + self.fill_right_box() + if 'inbound' in info: + for key in info['inbound'].keys(): + # New inbound online. + if key not in known_inbound: + addr = info['inbound'].get(key) + if bool(addr): + logging.debug(f"Refresh: inbound {key} online") + refresh = True + # Known inbound offline. + for key in known_inbound: + addr = info['inbound'].get(key) + if bool(addr): + continue + logging.debug(f"Refresh: inbound {key} offline") + refresh = True + # New outbound online. + if 'outbound' in info: + for i, info in info['outbound'].items(): + addr = info[0] + id = info[1] + if id == 0: + continue + if id in known_outbound: + continue + logging.debug(f"Outbound {key} came online.") + refresh = True diff --git a/doc/src/dev/contribute.md b/doc/src/dev/contribute.md index e2bcdd37aee3..db6173d9c0ae 100644 --- a/doc/src/dev/contribute.md +++ b/doc/src/dev/contribute.md @@ -11,10 +11,10 @@ identify issues and areas of improvement. Contribute according to your own interests, skills, and topics in which you would like to become more knowledgable. Take initiative. Other darkfi devs can help you -as mentors: see [the Methodology section of the Study Guide](https://darkrenaissance.github.io/darkfi/development/learn.html#methodology). +as mentors: see [the Methodology section of the Study Guide](https://darkrenaissance.github.io/darkfi/dev/learn.html#methodology). Few people are able be an expert in all domains. Choose a topic and specialize. -Example specializations are described [here](https://darkrenaissance.github.io/darkfi/development/learn.html#branches). +Example specializations are described [here](https://darkrenaissance.github.io/darkfi/dev/learn.html#branches). Don't make the mistake that you must become an expert in all areas before getting started. It's best to just jump in. @@ -34,7 +34,7 @@ There are several areas of work that are either undergoing maintenance or need to be maintained: * **Documentation:** general documentation and code docs (cargo doc). This is a very - important work for example [overview](https://darkrenaissance.github.io/darkfi/architecture/overview.html) + important work for example [overview](https://darkrenaissance.github.io/darkfi/arch/overview.html) page is out of date. * **Tooling:** Such as the `drk` tool. right now we're adding [DAO functionality](https://github.com/darkrenaissance/darkfi/blob/master/src/contract/dao/wallet.sql) diff --git a/src/zk/debug.rs b/src/zk/debug.rs index 62c36a4670cf..c98b7760a3ae 100644 --- a/src/zk/debug.rs +++ b/src/zk/debug.rs @@ -1,3 +1,21 @@ +/* This file is part of DarkFi (https://dark.fi) + * + * Copyright (C) 2020-2023 Dyne.org foundation + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + use darkfi_sdk::pasta::pallas; use log::error;