From 34e0946aa461c6042bdcd0aee354517211e1a116 Mon Sep 17 00:00:00 2001 From: Mamy Ratsimbazafy Date: Sun, 12 May 2024 23:20:45 +0200 Subject: [PATCH 01/44] task-manager: dump fight vs sqlite --- .gitignore | 4 + Cargo.toml | 4 + task_manager/Cargo.toml | 19 ++ task_manager/src/lib.rs | 546 +++++++++++++++++++++++++++++++++++++ task_manager/tests/main.rs | 49 ++++ 5 files changed, 622 insertions(+) create mode 100644 task_manager/Cargo.toml create mode 100644 task_manager/src/lib.rs create mode 100644 task_manager/tests/main.rs diff --git a/.gitignore b/.gitignore index 8be9c66e9..36c79dd61 100644 --- a/.gitignore +++ b/.gitignore @@ -38,6 +38,10 @@ target/ # MSVC Windows builds of rustc generate these, which store debugging information *.pdb +# SQLite +# ----------------------------------------------------------------------------------------- +*.sqlite + # Temp files, swap, debug, log, perf, cache # ----------------------------------------------------------------------------------------- *.swp diff --git a/Cargo.toml b/Cargo.toml index 0721e12d2..6a999196f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ members = [ "provers/sgx/guest", "provers/sgx/setup", "pipeline", + "task_manager" ] # Always optimize; building and running the guest takes much longer without optimization. @@ -148,6 +149,9 @@ anyhow = "1.0" thiserror = "1.0" thiserror-no-std = "2.0.2" +# SQLite +rusqlite = { version = "0.31.0", features = ["bundled"] } + # misc hashbrown = { version = "0.14", features = ["inline-more"] } tempfile = "3.8" diff --git a/task_manager/Cargo.toml b/task_manager/Cargo.toml new file mode 100644 index 000000000..967847b9e --- /dev/null +++ b/task_manager/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "task_manager" +version = "0.1.0" +authors = ["Mamy Ratsimbazafy "] +edition = "2021" # { workspace = true } + +[dependencies] +raiko-primitives = { workspace = true } +rusqlite = { workspace = true } + +[dev-dependencies] +rand = "0.9.0-alpha.1" # This is an alpha version, that has rng.gen_iter::() +rand_chacha = "0.9.0-alpha.1" +tempfile = "3.10.1" +alloy-primitives = { workspace = true, features = ["getrandom"] } + +[[test]] +name = "task_manager_tests" +path = "tests/main.rs" diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs new file mode 100644 index 000000000..4b4ab1477 --- /dev/null +++ b/task_manager/src/lib.rs @@ -0,0 +1,546 @@ +// Raiko +// Copyright (c) 2024 Taiko Labs +// Licensed and distributed under either of +// * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT). +// * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0). +// at your option. This file may not be copied, modified, or distributed except according to those terms. + +//! # Raiko Task Manager +//! +//! At the moment (Apr '24) proving requires a significant amount of time +//! and maintaining a connection with a potentially external party. +//! +//! By design Raiko is stateless, it prepares inputs and forward to the various proof systems. +//! However some proving backend like Risc0's Bonsai are also stateless, +//! and only accepts proofs and return result. +//! Hence to handle crashes, networking losses and restarts, we need to persist +//! the status of proof requests, task submitted, proof received, proof forwarded. +//! +//! In the diagram: +//! _____________ ______________ _______________ +//! Taiko L2 -> | Taiko-geth | ======> | Raiko-host | =========> | Raiko-guests | +//! | Taiko-reth | | | | Risc0 | +//! |____________| |_____________| | SGX | +//! | SP1 | +//! |______________| +//! _____________________________ +//! =========> | Prover Networks | +//! | Risc0's Bonsai | +//! | Succinct's Prover Network | +//! |____________________________| +//! _________________________ +//! =========> | Raiko-dist | +//! | Distributed Risc0 | +//! | Distributed SP1 | +//! |_______________________| +//! +//! We would position Raiko task manager either before Raiko-host or after Raiko-host. +//! +//! ## Implementation +//! +//! The task manager is a set of tables and KV-stores. +//! - Keys for table joins are prefixed with id +//! - KV-stores for (almost) immutable data +//! - KV-store for large inputs and indistinguishable from random proofs +//! - Tables for tasks and their metadata. +//! - Prefixed with rts_ in-case the DB is co-located with other services. +//! +//! __________________________ +//! | metadata | +//! |_________________________| A simple KV-store with the DB version for migration/upgrade detection. +//! | Key | Value | Future version may add new fields, without breaking older versions. +//! |_________________|_______| +//! | task_db_version | 0 | +//! |_________________|_______| +//! +//! ________________________ +//! | Proof systems | +//! |______________________| A map: ID -> proof systems +//! | id_proofsys | Desc | +//! |_____________|________| +//! | 0 | Risc0 | (0 for Risc0 and 1 for SP1 is intentional) +//! | 1 | SP1 | +//! | 2 | SGX | +//! |_____________|________| +//! +//! _________________________________________________ +//! | Task Status code | +//! |________________________________________________| +//! | id_status | Desc | +//! |_____________|__________________________________| +//! | 0 | Success | +//! | 100 | Success but pruned | +//! | 1000 | Work-in-progress | +//! | | | +//! | -1000 | Proof failure (prover - generic) | +//! | -1100 | Proof failure (OOM) | +//! | | | +//! | -2000 | Network failure | +//! | | | +//! | -3000 | Cancelled | +//! | -3100 | Cancelled (never started) | +//! | -3200 | Cancelled (aborted) | +//! | -3210 | Cancellation in progress | (Yes -3210 is intentional ;)) +//! | | | +//! | -4000 | Invalid or unsupported block | +//! | | | +//! | -9999 | Unspecified failure reason | +//! |_____________|__________________________________| +//! +//! Rationale: +//! - Convention, failures use negative status code. +//! - We leave space for new status codes +//! - -X000 status code are for generic failures segregated by failures: +//! on the networking side, the prover side or trying to prove an invalid block. +//! +//! A catchall -9999 error code is provided if a failure is not due to +//! either the network, the prover or the requester invalid block. +//! They should not exist in the DB and a proper analysis +//! and eventually status code should be assigned. +//! +//! ____________________________ +//! | Proof cache | A map: ID -> proof +//! |___________________________| +//! | id_proof | proof_value | +//! |__________|________________| A Groth16 proof is 2G₁+1G₂ elements +//! | 0 | 0xabcd...6789 | On BN254: 2*(2*32)+1*(2*2*32) = 256 bytes +//! | 1 | 0x1234...cdef | +//! | ... | ... | A SGX proof is ... +//! |__________|________________| A Stark proof (not wrapped in Groth16) would be several kilobytes +//! +//! Do we need pruning? +//! There are 60s * 60min * 24h * 30j = 2592000s in a month +//! dividing by 12, that's 216000 Ethereum slots. +//! Assuming 1kB of proofs per block (Stark-to-Groth16 Risc0 & SP1 + SGX, SGX size to be verified) +//! That's only 216MB per month. +//! +//! _____________________________________________________________________________________________ +//! | Tasks metadata | +//! |_____________________________________________________________________________________________| +//! | id_task | chainID | block_number | blockhash | parentHash | stateRoot | # of txs | gas_used | +//! |_________|_________|______________|___________|____________|___________|__________|__________| +//! ___________________________________________________________ +//! | Task queue | +//! |__________________________________________________________| +//! | id_task | blockhash | id_proofsys | id_status | id_proof | +//! |_________|___________|_____________|___________|__________| +//! ______________________________________ +//! | Tasks inputs | +//! |_____________________________________| +//! | id_task | inputs (serialized) | +//! |_________|___________________________| +//! _____________________________________ +//! | Task requests | +//! |____________________________________| +//! | id_task | id_submitter | submit_dt | +//! |_________|______________|___________| +//! ______________________________________ +//! | Task fulfillment | +//! |_____________________________________| +//! | id_task | id_fulfiller | fulfill_dt | +//! |_________|______________|____________| +//! +//! Rationale: +//! - When dealing with proof requests we don't need to touch the fullfillment table +//! - and inversely when dealing with provers, we don't need to deal with the request table. +//! - inputs are very large and warrant a dedicated table, with pruning +//! - metadata is useful to audit block building and prover efficiency + +// Imports +// ---------------------------------------------------------------- +use rusqlite::Error as SqlError; +use std::io::{Error as IOError, ErrorKind as IOErrorKind}; + +use std::fs::File; +use std::path::Path; + +use raiko_primitives::{BlockNumber, ChainId, B256}; + +use rusqlite::{named_params, params, Statement}; +use rusqlite::{Connection, OpenFlags}; + +// Types +// ---------------------------------------------------------------- + +#[derive(PartialEq, Debug)] +pub enum TaskManagerError { + IOError(IOErrorKind), + SqlError(String), +} + +impl From for TaskManagerError { + fn from(error: IOError) -> TaskManagerError { + TaskManagerError::IOError(error.kind()) + } +} + +impl From for TaskManagerError { + fn from(error: SqlError) -> TaskManagerError { + TaskManagerError::SqlError(error.to_string()) + } +} + +#[derive(Debug)] +pub struct TaskDb { + conn: Connection, +} + +#[derive(Debug)] +pub struct TaskManager<'db> { + enqueue_task: Statement<'db>, + // dequeue_task: Statement<'db>, + // get_block_proof_status: Statement<'db>, +} + +pub enum TaskProofsys { + Risc0 = 0, + SP1 = 1, + SGX = 2, +} + +#[allow(non_camel_case_types)] +#[rustfmt::skip] +pub enum TaskStatus { + Success = 0, + SuccessButPruned = 100, + WorkInProgress = 1000, + ProofFailure_Generic = -1000, + ProofFailure_OutOfMemory = -1100, + NetworkFailure = -2000, + Cancelled = -3000, + Cancelled_NeverStarted = -3100, + Cancelled_Aborted = -3200, + CancellationInProgress = -3210, + InvalidOrUnsupportedBlock = -4000, + UnspecifiedFailureReason = -9999, +} + +// Implementation +// ---------------------------------------------------------------- + +impl TaskDb { + fn open(path: &Path) -> Result { + let conn = Connection::open_with_flags(path, OpenFlags::SQLITE_OPEN_READ_WRITE)?; + conn.pragma_update(None, "foreign_keys", true)?; + conn.pragma_update(None, "locking_mode", "EXCLUSIVE")?; + conn.pragma_update(None, "journal_mode", "WAL")?; + conn.pragma_update(None, "synchronous", "NORMAL")?; + conn.pragma_update(None, "temp_store", "MEMORY")?; + Ok(conn) + } + + fn create(path: &Path) -> Result { + let _file = File::options() + .write(true) + .read(true) + .create_new(true) + .open(path)?; + + let conn = Self::open(path)?; + Self::create_tables(&conn)?; + + Ok(conn) + } + + /// Open an existing TaskDb database at "path" + /// If a database does not exist at the path, one is created. + pub fn open_or_create(path: &Path) -> Result { + let conn = if path.exists() { + Self::open(path) + } else { + Self::create(path) + }?; + Ok(Self { conn }) + } + + // Queries + // ---------------------------------------------------------------- + + fn create_tables(conn: &Connection) -> Result<(), TaskManagerError> { + conn.execute( + "CREATE TABLE metadata( + key BLOB NOT NULL PRIMARY KEY, + value BLOB + )", + params![], + )?; + conn.execute( + "INSERT INTO + metadata(key, value) + VALUES + (?, ?);", + params!["task_db_version", 0u32], + )?; + + conn.execute( + "CREATE TABLE proofsys( + id_proofsys INTEGER NOT NULL PRIMARY KEY, + desc TEXT NOT NULL + )", + params![], + )?; + conn.execute( + "INSERT INTO + proofsys(id_proofsys, desc) + VALUES + (0, 'Risc0'), + (1, 'SP1'), + (2, 'SGX');", + params![], + )?; + + conn.execute( + "CREATE TABLE status_codes( + id_status INTEGER NOT NULL PRIMARY KEY, + desc TEXT NOT NULL + )", + params![], + )?; + conn.execute( + "INSERT INTO + status_codes(id_status, desc) + VALUES + ( 0, 'Success'), + ( 100, 'Success but pruned'), + ( 1000, 'Work-in-progress'), + (-1000, 'Proof failure (generic)'), + (-1100, 'Proof failure (Out-Of-Memory)'), + (-2000, 'Network failure'), + (-3000, 'Cancelled'), + (-3100, 'Cancelled (never started)'), + (-3200, 'Cancelled (aborted)'), + (-3210, 'Cancellation in progress'), + (-4000, 'Invalid or unsupported block'), + (-9999, 'Unspecified failure reason');", + params![], + )?; + + conn.execute( + "CREATE TABLE proofs( + id_proof INTEGER NOT NULL PRIMARY KEY, + value BLOB NOT NULL + )", + params![], + )?; + + // Notes: + // 1. a blockhash may appear as many times as there are prover backends. + // 2. For query speed over (chainID, blockhash, id_proofsys) + // there is no need to create an index as the UNIQUE constraint + // has an implied index, see: + // - https://sqlite.org/lang_createtable.html#uniqueconst + // - https://www.sqlite.org/fileformat2.html#representation_of_sql_indices + conn.execute( + "CREATE TABLE taskqueue( + id_task INTEGER PRIMARY KEY UNIQUE NOT NULL, + chainID INTEGER NOT NULL, + blockhash BLOB NOT NULL, + id_proofsys INTEGER NOT NULL, + id_status INTEGER NOT NULL, + id_proof INTEGER, + FOREIGN KEY(chainID, blockhash) REFERENCES blocks(chainID, blockhash) + FOREIGN KEY(id_proofsys) REFERENCES proofsys(id_proofsys) + FOREIGN KEY(id_status) REFERENCES status_codes(id_status) + FOREIGN KEY(id_proof) REFERENCES proofs(id_proof) + UNIQUE (chainID, blockhash, id_proofsys) + )", + params![], + )?; + // Different blockchains might have the same blockhash in case of a fork + // for example Ethereum and Ethereum Classic. + // As "GuestInput" refers to ChainID, the proving task would be different. + conn.execute( + "CREATE TABLE blocks( + chainID INTEGER NOT NULL, + blockhash BLOB NOT NULL, + block_number INTEGER NOT NULL, + parentHash BLOB NOT NULL, + stateRoot BLOB NOT NULL, + num_transactions INTEGER NOT NULL, + gas_used INTEGER NOT NULL, + PRIMARY KEY (chainID, blockhash) + )", + params![], + )?; + // Payloads will be very large, 1.77MB on L1 in Jan 2024 (Before EIP-4844 blobs), + // https://ethresear.ch/t/on-block-sizes-gas-limits-and-scalability/18444 + // mandating ideally a separated high-performance KV-store to reduce IO. + conn.execute( + "CREATE TABLE task_payloads( + id_task INTEGER PRIMARY KEY UNIQUE NOT NULL, + payload BLOB NOT NULL, + FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) + )", + params![], + )?; + conn.execute( + "CREATE TABLE task_requests( + id_task INTEGER PRIMARY KEY UNIQUE NOT NULL, + submitter TEXT NOT NULL, + submit_date TEXT NOT NULL, + FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) + )", + params![], + )?; + conn.execute( + "CREATE TABLE task_fulfillment( + id_task INTEGER PRIMARY KEY UNIQUE NOT NULL, + fulfiller TEXT NOT NULL, + fulfill_date TEXT NOT NULL, + FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) + )", + params![], + )?; + + Ok(()) + } + + pub fn manage<'db>(&'db self) -> Result, TaskManagerError> { + // To update all the tables with the task_id assigned by Sqlite + // we require row IDs for the taskqueue table + // and we use last_insert_rowid() which is not reentrant and need a transaction lock + // and store them in a temporary table, configured to be in-memory. + // + // Alternative approaches considered: + // 1. Sqlite does not support variables (because it's embedded and significantly less overhead than other SQL "Client-Server" DBs). + // 2. using AUTOINCREMENT and/or the sqlite_sequence table + // - sqlite recommends not using AUTOINCREMENT for performance + // https://www.sqlite.org/autoinc.html + // 3. INSERT INTO ... RETURNING nested in a WITH clause (CTE / Common Table Expression) + // - Sqlite can only do RETURNING to the application, it cannot be nested in another query or diverted to another table + // https://sqlite.org/lang_returning.html#limitations_and_caveats + // 4. CREATE TEMPORARY TABLE AS with an INSERT INTO ... RETURNING nested + // - Same limitation AND CREATE TABLEAS seems to only support SELECT statements (but if we could nest RETURNING we can workaround that + // https://www.sqlite.org/lang_createtable.html#create_table_as_select_statements + // 5. Views + trigger on view inserts + // This introduces state beyond just the DB tables. + // Furthermore we would still need a transaction and last_insert_rowid() anyway + // + // Hence we have to use row IDs and last_insert_rowid() + // + // Now as a last boss, bindings via params! or named_params! is broken with multi-statements. + // Only the first statement is taken into account. + // + // i.e if 2 INSERTs, only parameters from the first one are counted. + // If DROP temp.table then INSERT, no parameters is counted. + // If BEGIN TRANSACTION; then INSERT, no parameters is counted. + // + // Hence we require exclusive DB locking, single connection, single thread. + // + // Then we insert first in a temporary table. + // That table must be created beforehand and cleared after each transaction, + // so that the INSERT INTO is the very first statement. + + self.conn.execute_batch( + " + -- PRAGMA temp_store = 'MEMORY'; + DROP TABLE IF EXISTS temp.current_task; + + CREATE TEMPORARY TABLE temp.current_task( + id_task INTEGER, + chainID INTEGER, + blockhash BLOB, + id_proofsys INTEGER, + id_status INTEGER, + payload BLOB, + submitter TEXT + ); + ")?; + + let enqueue_task = self.conn.prepare( + " + INSERT INTO temp.current_task(chainID, blockhash, id_proofsys, id_status, payload, submitter) + VALUES (:chainID, :blockhash, :id_proofsys, :id_status, :payload, :submitter); + + INSERT INTO taskqueue(chainID, blockhash, id_proofsys, id_status) + SELECT chainID, blockhash, id_proofsys, id_status FROM temp.current_task; + + UPDATE temp.current_task + SET id_task = last_insert_rowid(); + + INSERT INTO task_payloads(id_task, payload) + SELECT id_task, payload from temp.current_task + LIMIT 1; + + INSERT INTO task_requests(id_task, submitter, submit_date) + SELECT id_task, submitter, datetime('now') from temp.current_task + LIMIT 1; + + DELETE FROM temp.current_task; + ", + ).unwrap(); + + // println!("param count: {:?}", enqueue_task.parameter_count()); + + // println!("chainID: {:?}", enqueue_task.parameter_index(":chainID")); + // println!("blockhash: {:?}", enqueue_task.parameter_index(":blockhash")); + // println!("id_proofsys: {:?}", enqueue_task.parameter_index(":id_proofsys")); + // println!("id_status: {:?}", enqueue_task.parameter_index(":id_status")); + // println!("payload: {:?}", enqueue_task.parameter_index(":payload")); + // println!("submitter: {:?}", enqueue_task.parameter_index(":submitter")); + + // println!("example: {:?}", enqueue_task.parameter_index(":example")); + + Ok(TaskManager { enqueue_task }) + } +} + +impl<'db> TaskManager<'db> { + pub fn enqueue_task( + &mut self, + chain_id: ChainId, + blockhash: B256, + proof_system: TaskProofsys, + payload: &[u8], + submitter: &str, + ) -> Result<(), TaskManagerError> { + + println!("{}", self.enqueue_task.expanded_sql().unwrap()); + + let status = TaskStatus::WorkInProgress; + + self.enqueue_task.execute(named_params! { + ":chainID": chain_id as u64, + ":blockhash": blockhash.as_slice(), + ":id_proofsys": proof_system as u8, + ":id_status": status as u8, + ":payload": payload, + ":submitter": submitter, + })?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + // We only test private functions here. + // Public API will be tested in a dedicated tests folder + + use super::*; + use tempfile::tempdir; + + #[test] + fn error_on_missing() { + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + assert!(TaskDb::open(&file).is_err()); + } + + #[test] + fn ensure_exclusive() { + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + + let _db = TaskDb::create(&file).unwrap(); + assert!(TaskDb::open(&file).is_err()); + } + + #[test] + fn ensure_unicity() { + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + + let _db = TaskDb::create(&file).unwrap(); + assert!(TaskDb::create(&file).is_err()); + } +} diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs new file mode 100644 index 000000000..17a38f617 --- /dev/null +++ b/task_manager/tests/main.rs @@ -0,0 +1,49 @@ +// Raiko +// Copyright (c) 2024 Taiko Labs +// Licensed and distributed under either of +// * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT). +// * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0). +// at your option. This file may not be copied, modified, or distributed except according to those terms. + +#[cfg(test)] +mod tests { + use std::path::Path; + use std::fs; + + use rand::{Rng, SeedableRng}; + use rand_chacha::ChaCha8Rng; + use tempfile::tempdir; + + use raiko_primitives::B256; + use task_manager::{TaskDb, TaskManager, TaskManagerError, TaskProofsys, TaskStatus}; + + #[test] + fn test_enqueue_task() { + let dir = std::env::current_dir().unwrap().join("tests"); + let file = dir.as_path().join("test_enqueue_task.sqlite"); + if file.exists() { + fs::remove_file(&file).unwrap() + }; + + + let db = TaskDb::open_or_create(&file).unwrap(); + let mut tama = TaskDb::manage(&db).unwrap(); + + let mut rng = ChaCha8Rng::seed_from_u64(123); + + let chain_id = 100; + let blockhash = B256::random(); + let proofsys = TaskProofsys::Risc0; + let payload_length = rng.gen_range(20..200); + let payload: Vec = rng.gen_iter::().take(payload_length).collect(); + let submitter = "test_enqueue_task"; + + tama.enqueue_task( + chain_id, + blockhash, + proofsys, + &payload, + submitter + ).unwrap(); + } +} From b418922ed6e89724b9233e40342b44451c590e55 Mon Sep 17 00:00:00 2001 From: Mamy Ratsimbazafy Date: Mon, 13 May 2024 14:32:10 +0200 Subject: [PATCH 02/44] task-manager: SQL tables+views+triggers success - but arguments passed in execute are 'NULL' --- task_manager/src/lib.rs | 196 +++++++++++++++++++++---------------- task_manager/tests/main.rs | 16 ++- 2 files changed, 126 insertions(+), 86 deletions(-) diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index 4b4ab1477..84d3f8c8d 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -69,7 +69,6 @@ //! | id_status | Desc | //! |_____________|__________________________________| //! | 0 | Success | -//! | 100 | Success but pruned | //! | 1000 | Work-in-progress | //! | | | //! | -1000 | Proof failure (prover - generic) | @@ -117,7 +116,7 @@ //! _____________________________________________________________________________________________ //! | Tasks metadata | //! |_____________________________________________________________________________________________| -//! | id_task | chainID | block_number | blockhash | parentHash | stateRoot | # of txs | gas_used | +//! | id_task | chain_id | block_number | blockhash | parent_hash | state_root | # of txs | gas_used | //! |_________|_________|______________|___________|____________|___________|__________|__________| //! ___________________________________________________________ //! | Task queue | @@ -202,7 +201,6 @@ pub enum TaskProofsys { #[rustfmt::skip] pub enum TaskStatus { Success = 0, - SuccessButPruned = 100, WorkInProgress = 1000, ProofFailure_Generic = -1000, ProofFailure_OutOfMemory = -1100, @@ -261,7 +259,7 @@ impl TaskDb { "CREATE TABLE metadata( key BLOB NOT NULL PRIMARY KEY, value BLOB - )", + );", params![], )?; conn.execute( @@ -276,7 +274,7 @@ impl TaskDb { "CREATE TABLE proofsys( id_proofsys INTEGER NOT NULL PRIMARY KEY, desc TEXT NOT NULL - )", + );", params![], )?; conn.execute( @@ -293,7 +291,7 @@ impl TaskDb { "CREATE TABLE status_codes( id_status INTEGER NOT NULL PRIMARY KEY, desc TEXT NOT NULL - )", + );", params![], )?; conn.execute( @@ -301,7 +299,6 @@ impl TaskDb { status_codes(id_status, desc) VALUES ( 0, 'Success'), - ( 100, 'Success but pruned'), ( 1000, 'Work-in-progress'), (-1000, 'Proof failure (generic)'), (-1100, 'Proof failure (Out-Of-Memory)'), @@ -319,13 +316,13 @@ impl TaskDb { "CREATE TABLE proofs( id_proof INTEGER NOT NULL PRIMARY KEY, value BLOB NOT NULL - )", + );", params![], )?; // Notes: // 1. a blockhash may appear as many times as there are prover backends. - // 2. For query speed over (chainID, blockhash, id_proofsys) + // 2. For query speed over (chain_id, blockhash, id_proofsys) // there is no need to create an index as the UNIQUE constraint // has an implied index, see: // - https://sqlite.org/lang_createtable.html#uniqueconst @@ -333,17 +330,17 @@ impl TaskDb { conn.execute( "CREATE TABLE taskqueue( id_task INTEGER PRIMARY KEY UNIQUE NOT NULL, - chainID INTEGER NOT NULL, + chain_id INTEGER NOT NULL, blockhash BLOB NOT NULL, id_proofsys INTEGER NOT NULL, id_status INTEGER NOT NULL, id_proof INTEGER, - FOREIGN KEY(chainID, blockhash) REFERENCES blocks(chainID, blockhash) + FOREIGN KEY(chain_id, blockhash) REFERENCES blocks(chain_id, blockhash) FOREIGN KEY(id_proofsys) REFERENCES proofsys(id_proofsys) FOREIGN KEY(id_status) REFERENCES status_codes(id_status) FOREIGN KEY(id_proof) REFERENCES proofs(id_proof) - UNIQUE (chainID, blockhash, id_proofsys) - )", + UNIQUE (chain_id, blockhash, id_proofsys) + );", params![], )?; // Different blockchains might have the same blockhash in case of a fork @@ -351,15 +348,15 @@ impl TaskDb { // As "GuestInput" refers to ChainID, the proving task would be different. conn.execute( "CREATE TABLE blocks( - chainID INTEGER NOT NULL, + chain_id INTEGER NOT NULL, blockhash BLOB NOT NULL, block_number INTEGER NOT NULL, - parentHash BLOB NOT NULL, - stateRoot BLOB NOT NULL, + parent_hash BLOB NOT NULL, + state_root BLOB NOT NULL, num_transactions INTEGER NOT NULL, gas_used INTEGER NOT NULL, - PRIMARY KEY (chainID, blockhash) - )", + PRIMARY KEY (chain_id, blockhash) + );", params![], )?; // Payloads will be very large, 1.77MB on L1 in Jan 2024 (Before EIP-4844 blobs), @@ -370,7 +367,7 @@ impl TaskDb { id_task INTEGER PRIMARY KEY UNIQUE NOT NULL, payload BLOB NOT NULL, FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) - )", + );", params![], )?; conn.execute( @@ -379,7 +376,7 @@ impl TaskDb { submitter TEXT NOT NULL, submit_date TEXT NOT NULL, FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) - )", + );", params![], )?; conn.execute( @@ -388,7 +385,7 @@ impl TaskDb { fulfiller TEXT NOT NULL, fulfill_date TEXT NOT NULL, FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) - )", + );", params![], )?; @@ -412,74 +409,97 @@ impl TaskDb { // 4. CREATE TEMPORARY TABLE AS with an INSERT INTO ... RETURNING nested // - Same limitation AND CREATE TABLEAS seems to only support SELECT statements (but if we could nest RETURNING we can workaround that // https://www.sqlite.org/lang_createtable.html#create_table_as_select_statements - // 5. Views + trigger on view inserts - // This introduces state beyond just the DB tables. - // Furthermore we would still need a transaction and last_insert_rowid() anyway // // Hence we have to use row IDs and last_insert_rowid() // - // Now as a last boss, bindings via params! or named_params! is broken with multi-statements. - // Only the first statement is taken into account. + // Furthermore we use a view and an INSTEAD OF trigger to update the tables, + // the alternative being // - // i.e if 2 INSERTs, only parameters from the first one are counted. - // If DROP temp.table then INSERT, no parameters is counted. - // If BEGIN TRANSACTION; then INSERT, no parameters is counted. + // 5. Direct insert into tables + // This does not work as SQLite `execute` and `prepare` + // only process the first statement. // - // Hence we require exclusive DB locking, single connection, single thread. - // - // Then we insert first in a temporary table. - // That table must be created beforehand and cleared after each transaction, - // so that the INSERT INTO is the very first statement. + // And lastly, we need the view and trigger to be temporary because + // otherwise they can't access the temporary table: + // 6. https://sqlite.org/forum/info/4f998eeec510bceee69404541e5c9ca0a301868d59ec7c3486ecb8084309bba1 + // "Triggers in any schema other than temp may only access objects in their own schema. However, triggers in temp may access any object by name, even cross-schema." - self.conn.execute_batch( + let conn = &self.conn; + conn.execute_batch( " -- PRAGMA temp_store = 'MEMORY'; - DROP TABLE IF EXISTS temp.current_task; - - CREATE TEMPORARY TABLE temp.current_task( - id_task INTEGER, - chainID INTEGER, - blockhash BLOB, - id_proofsys INTEGER, - id_status INTEGER, - payload BLOB, - submitter TEXT - ); - ")?; - - let enqueue_task = self.conn.prepare( - " - INSERT INTO temp.current_task(chainID, blockhash, id_proofsys, id_status, payload, submitter) - VALUES (:chainID, :blockhash, :id_proofsys, :id_status, :payload, :submitter); - - INSERT INTO taskqueue(chainID, blockhash, id_proofsys, id_status) - SELECT chainID, blockhash, id_proofsys, id_status FROM temp.current_task; - - UPDATE temp.current_task - SET id_task = last_insert_rowid(); - - INSERT INTO task_payloads(id_task, payload) - SELECT id_task, payload from temp.current_task - LIMIT 1; - - INSERT INTO task_requests(id_task, submitter, submit_date) - SELECT id_task, submitter, datetime('now') from temp.current_task - LIMIT 1; - DELETE FROM temp.current_task; - ", - ).unwrap(); - - // println!("param count: {:?}", enqueue_task.parameter_count()); - - // println!("chainID: {:?}", enqueue_task.parameter_index(":chainID")); - // println!("blockhash: {:?}", enqueue_task.parameter_index(":blockhash")); - // println!("id_proofsys: {:?}", enqueue_task.parameter_index(":id_proofsys")); - // println!("id_status: {:?}", enqueue_task.parameter_index(":id_status")); - // println!("payload: {:?}", enqueue_task.parameter_index(":payload")); - // println!("submitter: {:?}", enqueue_task.parameter_index(":submitter")); - - // println!("example: {:?}", enqueue_task.parameter_index(":example")); + CREATE TEMPORARY VIEW temp.enqueue_task AS + SELECT + tq.id_task, + tq.chain_id, + tq.blockhash, + tq.id_proofsys, + tq.id_status, + tr.submitter, + b.block_number, + b.parent_hash, + b.state_root, + b.num_transactions, + b.gas_used, + tp.payload + FROM + taskqueue tq + LEFT JOIN + blocks b on ( + b.chain_id = tq.chain_id + AND b.blockhash = tq.blockhash + ) + LEFT JOIN + task_payloads tp on tp.id_task = tq.id_task + LEFT JOIN + task_requests tr on tr.id_task = tq.id_task; + + CREATE TEMPORARY TABLE temp.current_task(id_task INTEGER); + + CREATE TEMPORARY TRIGGER enqueue_task_insert_trigger + INSTEAD OF INSERT ON enqueue_task + BEGIN + + INSERT INTO blocks(chain_id, blockhash, block_number, parent_hash, state_root, num_transactions, gas_used) + VALUES (new.chain_id, new.blockhash, new.block_number, new.parent_hash, new.state_root, new.num_transactions, new.gas_used); + + INSERT INTO taskqueue(chain_id, blockhash, id_proofsys, id_status) + VALUES (new.chain_id, new.blockhash, new.id_proofsys, new.id_status); + + INSERT INTO current_task + SELECT id_task FROM taskqueue + WHERE rowid = last_insert_rowid() + LIMIT 1; + + INSERT INTO task_payloads(id_task, payload) + SELECT tmp.id_task, new.payload + FROM current_task tmp + LIMIT 1; + + INSERT INTO task_requests(id_task, submitter, submit_date) + SELECT tmp.id_task, new.submitter, datetime('now') + FROM current_task tmp + LIMIT 1; + + DELETE FROM current_task; + END; + ")?; + + let enqueue_task = conn.prepare( + " + INSERT INTO temp.enqueue_task( + chain_id, blockhash, id_proofsys, id_status, + payload, submitter, + block_number, parent_hash, state_root, + num_transactions, gas_used, + payload) + VALUES (:chain_id, :blockhash, :id_proofsys, :id_status, + :payload, :submitter, + :block_number, :parent_hash, :state_root, + :num_transactions, :gas_used, + :payload); + ")?; Ok(TaskManager { enqueue_task }) } @@ -489,10 +509,15 @@ impl<'db> TaskManager<'db> { pub fn enqueue_task( &mut self, chain_id: ChainId, - blockhash: B256, + blockhash: &B256, proof_system: TaskProofsys, payload: &[u8], submitter: &str, + block_number: BlockNumber, + parent_hash: &B256, + state_root: &B256, + num_transactions: u64, + gas_used: u64, ) -> Result<(), TaskManagerError> { println!("{}", self.enqueue_task.expanded_sql().unwrap()); @@ -500,12 +525,17 @@ impl<'db> TaskManager<'db> { let status = TaskStatus::WorkInProgress; self.enqueue_task.execute(named_params! { - ":chainID": chain_id as u64, + ":chain_id": chain_id as u64, ":blockhash": blockhash.as_slice(), ":id_proofsys": proof_system as u8, ":id_status": status as u8, - ":payload": payload, ":submitter": submitter, + ":block_number": block_number, + ":parent_hash": parent_hash.as_slice(), + ":state_root": state_root.as_slice(), + ":num_transactions": num_transactions, + ":gas_used": gas_used, + ":payload": payload, })?; Ok(()) } diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index 17a38f617..6dd90974e 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -35,15 +35,25 @@ mod tests { let blockhash = B256::random(); let proofsys = TaskProofsys::Risc0; let payload_length = rng.gen_range(20..200); - let payload: Vec = rng.gen_iter::().take(payload_length).collect(); let submitter = "test_enqueue_task"; + let block_number = rng.gen_range(1..4_000_000); + let parent_hash = B256::random(); + let state_root = B256::random(); + let num_transactions = rng.gen_range(0..1000); + let gas_used = rng.gen_range(0..100_000_000); + let payload: Vec = rng.gen_iter::().take(payload_length).collect(); tama.enqueue_task( chain_id, - blockhash, + &blockhash, proofsys, &payload, - submitter + submitter, + block_number, + &parent_hash, + &state_root, + num_transactions, + gas_used ).unwrap(); } } From 09287962ec356b0b1caa733bf7f1cb395070f278 Mon Sep 17 00:00:00 2001 From: Mamy Ratsimbazafy Date: Mon, 13 May 2024 15:44:11 +0200 Subject: [PATCH 03/44] task-manager: passing enqueue_task tests --- .gitignore | 2 ++ task_manager/Cargo.toml | 2 ++ task_manager/src/lib.rs | 24 ++++++++++++++---------- task_manager/tests/main.rs | 32 ++++++++++++++++++-------------- 4 files changed, 36 insertions(+), 24 deletions(-) diff --git a/.gitignore b/.gitignore index 36c79dd61..2bff916d5 100644 --- a/.gitignore +++ b/.gitignore @@ -41,6 +41,8 @@ target/ # SQLite # ----------------------------------------------------------------------------------------- *.sqlite +*.sqlite-shm +*.sqlite-wal # Temp files, swap, debug, log, perf, cache # ----------------------------------------------------------------------------------------- diff --git a/task_manager/Cargo.toml b/task_manager/Cargo.toml index 967847b9e..4227b1b7f 100644 --- a/task_manager/Cargo.toml +++ b/task_manager/Cargo.toml @@ -14,6 +14,8 @@ rand_chacha = "0.9.0-alpha.1" tempfile = "3.10.1" alloy-primitives = { workspace = true, features = ["getrandom"] } +rusqlite = { workspace = true, features = ["trace"] } + [[test]] name = "task_manager_tests" path = "tests/main.rs" diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index 84d3f8c8d..5d5d92e78 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -392,6 +392,14 @@ impl TaskDb { Ok(()) } + /// Set a tracer to debug SQL execution + /// for example: + /// db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); + #[cfg(test)] + pub fn set_tracer(&mut self, trace_fn: Option) { + self.conn.trace(trace_fn); + } + pub fn manage<'db>(&'db self) -> Result, TaskManagerError> { // To update all the tables with the task_id assigned by Sqlite // we require row IDs for the taskqueue table @@ -489,15 +497,11 @@ impl TaskDb { let enqueue_task = conn.prepare( " INSERT INTO temp.enqueue_task( - chain_id, blockhash, id_proofsys, id_status, - payload, submitter, - block_number, parent_hash, state_root, - num_transactions, gas_used, + chain_id, blockhash, id_proofsys, id_status, submitter, + block_number, parent_hash, state_root, num_transactions, gas_used, payload) - VALUES (:chain_id, :blockhash, :id_proofsys, :id_status, - :payload, :submitter, - :block_number, :parent_hash, :state_root, - :num_transactions, :gas_used, + VALUES (:chain_id, :blockhash, :id_proofsys, :id_status, :submitter, + :block_number, :parent_hash, :state_root, :num_transactions, :gas_used, :payload); ")?; @@ -511,13 +515,13 @@ impl<'db> TaskManager<'db> { chain_id: ChainId, blockhash: &B256, proof_system: TaskProofsys, - payload: &[u8], submitter: &str, block_number: BlockNumber, parent_hash: &B256, state_root: &B256, num_transactions: u64, gas_used: u64, + payload: &[u8], ) -> Result<(), TaskManagerError> { println!("{}", self.enqueue_task.expanded_sql().unwrap()); @@ -528,7 +532,7 @@ impl<'db> TaskManager<'db> { ":chain_id": chain_id as u64, ":blockhash": blockhash.as_slice(), ":id_proofsys": proof_system as u8, - ":id_status": status as u8, + ":id_status": status as u32, ":submitter": submitter, ":block_number": block_number, ":parent_hash": parent_hash.as_slice(), diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index 6dd90974e..782b4940a 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -7,53 +7,57 @@ #[cfg(test)] mod tests { - use std::path::Path; - use std::fs; - use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; use tempfile::tempdir; use raiko_primitives::B256; - use task_manager::{TaskDb, TaskManager, TaskManagerError, TaskProofsys, TaskStatus}; + use task_manager::{TaskDb, TaskProofsys}; #[test] fn test_enqueue_task() { - let dir = std::env::current_dir().unwrap().join("tests"); - let file = dir.as_path().join("test_enqueue_task.sqlite"); - if file.exists() { - fs::remove_file(&file).unwrap() - }; + // Materialized local DB + // let dir = std::env::current_dir().unwrap().join("tests"); + // let file = dir.as_path().join("test_enqueue_task.sqlite"); + // if file.exists() { + // fs::remove_file(&file).unwrap() + // }; + + // temp dir DB + let dir = tempdir().unwrap(); + let file = dir.path().join("test_enqueue_task.sqlite"); - let db = TaskDb::open_or_create(&file).unwrap(); - let mut tama = TaskDb::manage(&db).unwrap(); + #[allow(unused_mut)] + let mut db = TaskDb::open_or_create(&file).unwrap(); + // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); + let mut tama = db.manage().unwrap(); let mut rng = ChaCha8Rng::seed_from_u64(123); let chain_id = 100; let blockhash = B256::random(); let proofsys = TaskProofsys::Risc0; - let payload_length = rng.gen_range(20..200); let submitter = "test_enqueue_task"; let block_number = rng.gen_range(1..4_000_000); let parent_hash = B256::random(); let state_root = B256::random(); let num_transactions = rng.gen_range(0..1000); let gas_used = rng.gen_range(0..100_000_000); + let payload_length = rng.gen_range(20..200); let payload: Vec = rng.gen_iter::().take(payload_length).collect(); tama.enqueue_task( chain_id, &blockhash, proofsys, - &payload, submitter, block_number, &parent_hash, &state_root, num_transactions, - gas_used + gas_used, + &payload, ).unwrap(); } } From bd06c138c44e3f72a31100b4c5ad589ef6ee8144 Mon Sep 17 00:00:00 2001 From: Mamy Ratsimbazafy Date: Tue, 14 May 2024 07:11:04 +0200 Subject: [PATCH 04/44] task-manager: cleanup - ease copy pasting to SQL script, Registered status, persistent views, remove debug print --- task_manager/src/lib.rs | 225 ++++++++++++++++++------------------- task_manager/tests/main.rs | 6 +- 2 files changed, 110 insertions(+), 121 deletions(-) diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index 5d5d92e78..da6e0bd38 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -155,7 +155,7 @@ use std::path::Path; use raiko_primitives::{BlockNumber, ChainId, B256}; -use rusqlite::{named_params, params, Statement}; +use rusqlite::{named_params, Statement}; use rusqlite::{Connection, OpenFlags}; // Types @@ -201,7 +201,8 @@ pub enum TaskProofsys { #[rustfmt::skip] pub enum TaskStatus { Success = 0, - WorkInProgress = 1000, + Registered = 1000, + WorkInProgress = 2000, ProofFailure_Generic = -1000, ProofFailure_OutOfMemory = -1100, NetworkFailure = -2000, @@ -236,6 +237,7 @@ impl TaskDb { let conn = Self::open(path)?; Self::create_tables(&conn)?; + Self::create_views(&conn)?; Ok(conn) } @@ -251,55 +253,50 @@ impl TaskDb { Ok(Self { conn }) } - // Queries + // SQL // ---------------------------------------------------------------- fn create_tables(conn: &Connection) -> Result<(), TaskManagerError> { - conn.execute( - "CREATE TABLE metadata( + // Change the task_db_version if backward compatibility is broken + // and introduce a migration on DB opening ... if conserving history is important. + conn.execute_batch( + r#" + -- Metadata and mappings + ----------------------------------------------- + + CREATE TABLE metadata( key BLOB NOT NULL PRIMARY KEY, value BLOB - );", - params![], - )?; - conn.execute( - "INSERT INTO + ); + + INSERT INTO metadata(key, value) - VALUES - (?, ?);", - params!["task_db_version", 0u32], - )?; + VALUES + ('task_db_version', 0); - conn.execute( - "CREATE TABLE proofsys( + CREATE TABLE proofsys( id_proofsys INTEGER NOT NULL PRIMARY KEY, desc TEXT NOT NULL - );", - params![], - )?; - conn.execute( - "INSERT INTO + ); + + INSERT INTO proofsys(id_proofsys, desc) - VALUES + VALUES (0, 'Risc0'), (1, 'SP1'), - (2, 'SGX');", - params![], - )?; + (2, 'SGX'); - conn.execute( - "CREATE TABLE status_codes( + CREATE TABLE status_codes( id_status INTEGER NOT NULL PRIMARY KEY, desc TEXT NOT NULL - );", - params![], - )?; - conn.execute( - "INSERT INTO + ); + + INSERT INTO status_codes(id_status, desc) - VALUES + VALUES ( 0, 'Success'), - ( 1000, 'Work-in-progress'), + ( 1000, 'Registered'), + ( 2000, 'Work-in-progress'), (-1000, 'Proof failure (generic)'), (-1100, 'Proof failure (Out-Of-Memory)'), (-2000, 'Network failure'), @@ -308,27 +305,24 @@ impl TaskDb { (-3200, 'Cancelled (aborted)'), (-3210, 'Cancellation in progress'), (-4000, 'Invalid or unsupported block'), - (-9999, 'Unspecified failure reason');", - params![], - )?; + (-9999, 'Unspecified failure reason'); - conn.execute( - "CREATE TABLE proofs( + -- Data + ----------------------------------------------- + + CREATE TABLE proofs( id_proof INTEGER NOT NULL PRIMARY KEY, value BLOB NOT NULL - );", - params![], - )?; - - // Notes: - // 1. a blockhash may appear as many times as there are prover backends. - // 2. For query speed over (chain_id, blockhash, id_proofsys) - // there is no need to create an index as the UNIQUE constraint - // has an implied index, see: - // - https://sqlite.org/lang_createtable.html#uniqueconst - // - https://www.sqlite.org/fileformat2.html#representation_of_sql_indices - conn.execute( - "CREATE TABLE taskqueue( + ); + + -- Notes: + -- 1. a blockhash may appear as many times as there are prover backends. + -- 2. For query speed over (chain_id, blockhash, id_proofsys) + -- there is no need to create an index as the UNIQUE constraint + -- has an implied index, see: + -- - https://sqlite.org/lang_createtable.html#uniqueconst + -- - https://www.sqlite.org/fileformat2.html#representation_of_sql_indices + CREATE TABLE taskqueue( id_task INTEGER PRIMARY KEY UNIQUE NOT NULL, chain_id INTEGER NOT NULL, blockhash BLOB NOT NULL, @@ -340,14 +334,12 @@ impl TaskDb { FOREIGN KEY(id_status) REFERENCES status_codes(id_status) FOREIGN KEY(id_proof) REFERENCES proofs(id_proof) UNIQUE (chain_id, blockhash, id_proofsys) - );", - params![], - )?; - // Different blockchains might have the same blockhash in case of a fork - // for example Ethereum and Ethereum Classic. - // As "GuestInput" refers to ChainID, the proving task would be different. - conn.execute( - "CREATE TABLE blocks( + ); + + -- Different blockchains might have the same blockhash in case of a fork + -- for example Ethereum and Ethereum Classic. + -- As "GuestInput" refers to ChainID, the proving task would be different. + CREATE TABLE blocks( chain_id INTEGER NOT NULL, blockhash BLOB NOT NULL, block_number INTEGER NOT NULL, @@ -356,38 +348,66 @@ impl TaskDb { num_transactions INTEGER NOT NULL, gas_used INTEGER NOT NULL, PRIMARY KEY (chain_id, blockhash) - );", - params![], - )?; - // Payloads will be very large, 1.77MB on L1 in Jan 2024 (Before EIP-4844 blobs), - // https://ethresear.ch/t/on-block-sizes-gas-limits-and-scalability/18444 - // mandating ideally a separated high-performance KV-store to reduce IO. - conn.execute( - "CREATE TABLE task_payloads( + ); + + -- Payloads will be very large, just the block would be 1.77MB on L1 in Jan 2024, + -- https://ethresear.ch/t/on-block-sizes-gas-limits-and-scalability/18444 + -- mandating ideally a separated high-performance KV-store to reduce IO. + -- This is without EIP-4844 blobs and the extra input for zkVMs. + CREATE TABLE task_payloads( id_task INTEGER PRIMARY KEY UNIQUE NOT NULL, payload BLOB NOT NULL, FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) - );", - params![], - )?; - conn.execute( - "CREATE TABLE task_requests( + ); + + CREATE TABLE task_requests( id_task INTEGER PRIMARY KEY UNIQUE NOT NULL, submitter TEXT NOT NULL, submit_date TEXT NOT NULL, FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) - );", - params![], - )?; - conn.execute( - "CREATE TABLE task_fulfillment( + ); + + CREATE TABLE task_fulfillment( id_task INTEGER PRIMARY KEY UNIQUE NOT NULL, fulfiller TEXT NOT NULL, fulfill_date TEXT NOT NULL, FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) - );", - params![], - )?; + ); + "#)?; + + Ok(()) + } + + fn create_views(conn: &Connection) -> Result<(), TaskManagerError> { + // By convention, views will use an action verb as name. + conn.execute_batch( + r#" + CREATE VIEW enqueue_task AS + SELECT + tq.id_task, + tq.chain_id, + tq.blockhash, + tq.id_proofsys, + tq.id_status, + tr.submitter, + b.block_number, + b.parent_hash, + b.state_root, + b.num_transactions, + b.gas_used, + tp.payload + FROM + taskqueue tq + LEFT JOIN + blocks b on ( + b.chain_id = tq.chain_id + AND b.blockhash = tq.blockhash + ) + LEFT JOIN + task_payloads tp on tp.id_task = tq.id_task + LEFT JOIN + task_requests tr on tr.id_task = tq.id_task; + "#)?; Ok(()) } @@ -437,43 +457,17 @@ impl TaskDb { " -- PRAGMA temp_store = 'MEMORY'; - CREATE TEMPORARY VIEW temp.enqueue_task AS - SELECT - tq.id_task, - tq.chain_id, - tq.blockhash, - tq.id_proofsys, - tq.id_status, - tr.submitter, - b.block_number, - b.parent_hash, - b.state_root, - b.num_transactions, - b.gas_used, - tp.payload - FROM - taskqueue tq - LEFT JOIN - blocks b on ( - b.chain_id = tq.chain_id - AND b.blockhash = tq.blockhash - ) - LEFT JOIN - task_payloads tp on tp.id_task = tq.id_task - LEFT JOIN - task_requests tr on tr.id_task = tq.id_task; - CREATE TEMPORARY TABLE temp.current_task(id_task INTEGER); CREATE TEMPORARY TRIGGER enqueue_task_insert_trigger INSTEAD OF INSERT ON enqueue_task BEGIN - INSERT INTO blocks(chain_id, blockhash, block_number, parent_hash, state_root, num_transactions, gas_used) VALUES (new.chain_id, new.blockhash, new.block_number, new.parent_hash, new.state_root, new.num_transactions, new.gas_used); + -- Tasks are initialized at status 1000 - registered INSERT INTO taskqueue(chain_id, blockhash, id_proofsys, id_status) - VALUES (new.chain_id, new.blockhash, new.id_proofsys, new.id_status); + VALUES (new.chain_id, new.blockhash, new.id_proofsys, 1000); INSERT INTO current_task SELECT id_task FROM taskqueue @@ -496,11 +490,12 @@ impl TaskDb { let enqueue_task = conn.prepare( " - INSERT INTO temp.enqueue_task( - chain_id, blockhash, id_proofsys, id_status, submitter, + INSERT INTO enqueue_task( + chain_id, blockhash, id_proofsys, submitter, block_number, parent_hash, state_root, num_transactions, gas_used, payload) - VALUES (:chain_id, :blockhash, :id_proofsys, :id_status, :submitter, + VALUES ( + :chain_id, :blockhash, :id_proofsys, :submitter, :block_number, :parent_hash, :state_root, :num_transactions, :gas_used, :payload); ")?; @@ -523,16 +518,10 @@ impl<'db> TaskManager<'db> { gas_used: u64, payload: &[u8], ) -> Result<(), TaskManagerError> { - - println!("{}", self.enqueue_task.expanded_sql().unwrap()); - - let status = TaskStatus::WorkInProgress; - self.enqueue_task.execute(named_params! { ":chain_id": chain_id as u64, ":blockhash": blockhash.as_slice(), ":id_proofsys": proof_system as u8, - ":id_status": status as u32, ":submitter": submitter, ":block_number": block_number, ":parent_hash": parent_hash.as_slice(), diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index 782b4940a..6aab43090 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -9,7 +9,6 @@ mod tests { use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; - use tempfile::tempdir; use raiko_primitives::B256; use task_manager::{TaskDb, TaskProofsys}; @@ -17,14 +16,15 @@ mod tests { #[test] fn test_enqueue_task() { - // Materialized local DB + // // Materialized local DB // let dir = std::env::current_dir().unwrap().join("tests"); // let file = dir.as_path().join("test_enqueue_task.sqlite"); // if file.exists() { - // fs::remove_file(&file).unwrap() + // std::fs::remove_file(&file).unwrap() // }; // temp dir DB + use tempfile::tempdir; let dir = tempdir().unwrap(); let file = dir.path().join("test_enqueue_task.sqlite"); From bf63839d1ebe2a72056eef66d070368ce41cca12 Mon Sep 17 00:00:00 2001 From: Mamy Ratsimbazafy Date: Tue, 14 May 2024 08:53:26 +0200 Subject: [PATCH 05/44] task-manager: add DB size query --- task_manager/src/lib.rs | 27 +++++++++++++++++- task_manager/tests/main.rs | 56 +++++++++++++++++++++++++++++++++++++- 2 files changed, 81 insertions(+), 2 deletions(-) diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index da6e0bd38..ad348fd02 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -189,6 +189,7 @@ pub struct TaskManager<'db> { enqueue_task: Statement<'db>, // dequeue_task: Statement<'db>, // get_block_proof_status: Statement<'db>, + get_db_size: Statement<'db>, } pub enum TaskProofsys { @@ -500,7 +501,23 @@ impl TaskDb { :payload); ")?; - Ok(TaskManager { enqueue_task }) + // The requires sqlite to be compiled with dbstat support: + // https://www.sqlite.org/dbstat.html + // which is the case for rusqlite + // https://github.com/rusqlite/rusqlite/blob/v0.31.0/libsqlite3-sys/build.rs#L126 + // but may not be the case for system-wide sqlite when debugging. + let get_db_size = conn.prepare( + " + SELECT + name as table_name, + SUM(pgsize) as table_size + FROM dbstat + GROUP BY table_name + ORDER BY SUM(pgsize) DESC + " + )?; + + Ok(TaskManager { enqueue_task, get_db_size }) } } @@ -532,6 +549,14 @@ impl<'db> TaskManager<'db> { })?; Ok(()) } + + /// Returns the total and detailed database size + pub fn get_db_size(&mut self) -> Result<(usize, Vec<(String, usize)>), TaskManagerError> { + let rows = self.get_db_size.query_map([], |row| Ok((row.get(0)?, row.get(1)?)))?; + let details = rows.collect::, _>>()?; + let total = details.iter().fold(0, |acc, item| acc + item.1); + Ok((total, details)) + } } #[cfg(test)] diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index 6aab43090..d19bd7bb5 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -45,7 +45,7 @@ mod tests { let num_transactions = rng.gen_range(0..1000); let gas_used = rng.gen_range(0..100_000_000); let payload_length = rng.gen_range(20..200); - let payload: Vec = rng.gen_iter::().take(payload_length).collect(); + let payload: Vec = (&mut rng).gen_iter::().take(payload_length).collect(); tama.enqueue_task( chain_id, @@ -60,4 +60,58 @@ mod tests { &payload, ).unwrap(); } + + #[test] + fn test_get_db_size() { + + // Materialized local DB + let dir = std::env::current_dir().unwrap().join("tests"); + let file = dir.as_path().join("test_get_db_size.sqlite"); + if file.exists() { + std::fs::remove_file(&file).unwrap() + }; + + // // temp dir DB + // use tempfile::tempdir; + // let dir = tempdir().unwrap(); + // let file = dir.path().join("test_get_db_size.sqlite"); + + #[allow(unused_mut)] + let mut db = TaskDb::open_or_create(&file).unwrap(); + // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); + let mut tama = db.manage().unwrap(); + + let mut rng = ChaCha8Rng::seed_from_u64(123); + + for _ in 0..42 { + let chain_id = 100; + let blockhash = B256::random(); + let proofsys = TaskProofsys::Risc0; + let submitter = "test_enqueue_task"; + let block_number = rng.gen_range(1..4_000_000); + let parent_hash = B256::random(); + let state_root = B256::random(); + let num_transactions = rng.gen_range(0..1000); + let gas_used = rng.gen_range(0..100_000_000); + let payload_length = rng.gen_range(1_000_000..10_000_000); + let payload: Vec = (&mut rng).gen_iter::().take(payload_length).collect(); + + tama.enqueue_task( + chain_id, + &blockhash, + proofsys, + submitter, + block_number, + &parent_hash, + &state_root, + num_transactions, + gas_used, + &payload, + ).unwrap(); + } + + let (db_size, db_tables_size) = tama.get_db_size().unwrap(); + println!("db_tables_size: {:?}", db_tables_size); + assert!(db_size / 1024 / 1024 > 40); + } } From b7ce7a116fbac48862c606c0b8c268c8efe26c66 Mon Sep 17 00:00:00 2001 From: Mamy Ratsimbazafy Date: Tue, 14 May 2024 09:48:20 +0200 Subject: [PATCH 06/44] task-manager: id_proof is unneeded + prettify queries --- task_manager/src/lib.rs | 103 +++++++++++++++++++++---------------- task_manager/tests/main.rs | 2 +- 2 files changed, 61 insertions(+), 44 deletions(-) diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index ad348fd02..4d414e086 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -100,7 +100,7 @@ //! ____________________________ //! | Proof cache | A map: ID -> proof //! |___________________________| -//! | id_proof | proof_value | +//! | id_task | proof_value | //! |__________|________________| A Groth16 proof is 2G₁+1G₂ elements //! | 0 | 0xabcd...6789 | On BN254: 2*(2*32)+1*(2*2*32) = 256 bytes //! | 1 | 0x1234...cdef | @@ -113,16 +113,16 @@ //! Assuming 1kB of proofs per block (Stark-to-Groth16 Risc0 & SP1 + SGX, SGX size to be verified) //! That's only 216MB per month. //! -//! _____________________________________________________________________________________________ -//! | Tasks metadata | -//! |_____________________________________________________________________________________________| +//! ________________________________________________________________________________________________ +//! | Tasks metadata | +//! |________________________________________________________________________________________________| //! | id_task | chain_id | block_number | blockhash | parent_hash | state_root | # of txs | gas_used | -//! |_________|_________|______________|___________|____________|___________|__________|__________| -//! ___________________________________________________________ -//! | Task queue | -//! |__________________________________________________________| -//! | id_task | blockhash | id_proofsys | id_status | id_proof | -//! |_________|___________|_____________|___________|__________| +//! |_________|__________|______________|___________|_____________|____________|__________|__________| +//! ________________________________________________ +//! | Task queue | +//! |_______________________________________________| +//! | id_task | blockhash | id_proofsys | id_status | +//! |_________|___________|_____________|___________| //! ______________________________________ //! | Tasks inputs | //! |_____________________________________| @@ -266,7 +266,7 @@ impl TaskDb { ----------------------------------------------- CREATE TABLE metadata( - key BLOB NOT NULL PRIMARY KEY, + key BLOB UNIQUE NOT NULL PRIMARY KEY, value BLOB ); @@ -276,7 +276,7 @@ impl TaskDb { ('task_db_version', 0); CREATE TABLE proofsys( - id_proofsys INTEGER NOT NULL PRIMARY KEY, + id_proofsys INTEGER UNIQUE NOT NULL PRIMARY KEY, desc TEXT NOT NULL ); @@ -288,7 +288,7 @@ impl TaskDb { (2, 'SGX'); CREATE TABLE status_codes( - id_status INTEGER NOT NULL PRIMARY KEY, + id_status INTEGER UNIQUE NOT NULL PRIMARY KEY, desc TEXT NOT NULL ); @@ -311,9 +311,18 @@ impl TaskDb { -- Data ----------------------------------------------- - CREATE TABLE proofs( - id_proof INTEGER NOT NULL PRIMARY KEY, - value BLOB NOT NULL + -- Different blockchains might have the same blockhash in case of a fork + -- for example Ethereum and Ethereum Classic. + -- As "GuestInput" refers to ChainID, the proving task would be different. + CREATE TABLE blocks( + chain_id INTEGER NOT NULL, + blockhash BLOB NOT NULL, + block_number INTEGER NOT NULL, + parent_hash BLOB NOT NULL, + state_root BLOB NOT NULL, + num_transactions INTEGER NOT NULL, + gas_used INTEGER NOT NULL, + PRIMARY KEY (chain_id, blockhash) ); -- Notes: @@ -324,31 +333,22 @@ impl TaskDb { -- - https://sqlite.org/lang_createtable.html#uniqueconst -- - https://www.sqlite.org/fileformat2.html#representation_of_sql_indices CREATE TABLE taskqueue( - id_task INTEGER PRIMARY KEY UNIQUE NOT NULL, + id_task INTEGER UNIQUE NOT NULL PRIMARY KEY, chain_id INTEGER NOT NULL, blockhash BLOB NOT NULL, id_proofsys INTEGER NOT NULL, id_status INTEGER NOT NULL, - id_proof INTEGER, FOREIGN KEY(chain_id, blockhash) REFERENCES blocks(chain_id, blockhash) FOREIGN KEY(id_proofsys) REFERENCES proofsys(id_proofsys) FOREIGN KEY(id_status) REFERENCES status_codes(id_status) - FOREIGN KEY(id_proof) REFERENCES proofs(id_proof) UNIQUE (chain_id, blockhash, id_proofsys) ); - -- Different blockchains might have the same blockhash in case of a fork - -- for example Ethereum and Ethereum Classic. - -- As "GuestInput" refers to ChainID, the proving task would be different. - CREATE TABLE blocks( - chain_id INTEGER NOT NULL, - blockhash BLOB NOT NULL, - block_number INTEGER NOT NULL, - parent_hash BLOB NOT NULL, - state_root BLOB NOT NULL, - num_transactions INTEGER NOT NULL, - gas_used INTEGER NOT NULL, - PRIMARY KEY (chain_id, blockhash) + CREATE TABLE task_requests( + id_task INTEGER UNIQUE NOT NULL PRIMARY KEY, + submitter TEXT NOT NULL, + submit_date TEXT NOT NULL, + FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) ); -- Payloads will be very large, just the block would be 1.77MB on L1 in Jan 2024, @@ -356,24 +356,25 @@ impl TaskDb { -- mandating ideally a separated high-performance KV-store to reduce IO. -- This is without EIP-4844 blobs and the extra input for zkVMs. CREATE TABLE task_payloads( - id_task INTEGER PRIMARY KEY UNIQUE NOT NULL, + id_task INTEGER UNIQUE NOT NULL PRIMARY KEY, payload BLOB NOT NULL, FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) ); - CREATE TABLE task_requests( - id_task INTEGER PRIMARY KEY UNIQUE NOT NULL, - submitter TEXT NOT NULL, - submit_date TEXT NOT NULL, - FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) - ); - CREATE TABLE task_fulfillment( - id_task INTEGER PRIMARY KEY UNIQUE NOT NULL, + id_task INTEGER UNIQUE NOT NULL PRIMARY KEY, fulfiller TEXT NOT NULL, fulfill_date TEXT NOT NULL, FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) ); + + -- Proofs might also be large, so we isolate them in a dedicated table + CREATE TABLE task_proofs( + id_task INTEGER UNIQUE NOT NULL PRIMARY KEY, + proof BLOB NOT NULL, + FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) + ); + "#)?; Ok(()) @@ -396,7 +397,7 @@ impl TaskDb { b.state_root, b.num_transactions, b.gas_used, - tp.payload + tpl.payload FROM taskqueue tq LEFT JOIN @@ -405,9 +406,25 @@ impl TaskDb { AND b.blockhash = tq.blockhash ) LEFT JOIN - task_payloads tp on tp.id_task = tq.id_task + task_requests tr on tr.id_task = tq.id_task + LEFT JOIN + task_payloads tpl on tpl.id_task = tq.id_task; + + CREATE VIEW update_task AS + SELECT + tq.id_task, + tq.chain_id, + tq.blockhash, + tq.id_proofsys, + tq.id_status, + tf.fulfiller, + tpf.proof + FROM + taskqueue tq + LEFT JOIN + task_fulfillment tf on tf.id_task = tq.id_task LEFT JOIN - task_requests tr on tr.id_task = tq.id_task; + task_proofs tpf on tpf.id_task = tq.id_task; "#)?; Ok(()) diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index d19bd7bb5..37ac80f00 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -87,7 +87,7 @@ mod tests { let chain_id = 100; let blockhash = B256::random(); let proofsys = TaskProofsys::Risc0; - let submitter = "test_enqueue_task"; + let submitter = "test_get_db_size"; let block_number = rng.gen_range(1..4_000_000); let parent_hash = B256::random(); let state_root = B256::random(); From e707bc79e3bad5b46564b379b55e6ed4bb15c343 Mon Sep 17 00:00:00 2001 From: Mamy Ratsimbazafy Date: Tue, 14 May 2024 16:12:44 +0200 Subject: [PATCH 07/44] task-manager: change DB schema - allow multiple provers and status for same task in case of failures-retry --- .gitignore | 1 + task_manager/src/lib.rs | 224 +++++++++++++++++++++++++--------------- 2 files changed, 142 insertions(+), 83 deletions(-) diff --git a/.gitignore b/.gitignore index 2bff916d5..1053afbdb 100644 --- a/.gitignore +++ b/.gitignore @@ -43,6 +43,7 @@ target/ *.sqlite *.sqlite-shm *.sqlite-wal +*.sqlite-journal # Temp files, swap, debug, log, perf, cache # ----------------------------------------------------------------------------------------- diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index 4d414e086..fecb639cb 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -43,7 +43,6 @@ //! - KV-stores for (almost) immutable data //! - KV-store for large inputs and indistinguishable from random proofs //! - Tables for tasks and their metadata. -//! - Prefixed with rts_ in-case the DB is co-located with other services. //! //! __________________________ //! | metadata | @@ -69,7 +68,8 @@ //! | id_status | Desc | //! |_____________|__________________________________| //! | 0 | Success | -//! | 1000 | Work-in-progress | +//! | 1000 | Registered | +//! | 2000 | Work-in-progress | //! | | | //! | -1000 | Proof failure (prover - generic) | //! | -1100 | Proof failure (OOM) | @@ -97,53 +97,58 @@ //! They should not exist in the DB and a proper analysis //! and eventually status code should be assigned. //! -//! ____________________________ -//! | Proof cache | A map: ID -> proof -//! |___________________________| -//! | id_task | proof_value | -//! |__________|________________| A Groth16 proof is 2G₁+1G₂ elements -//! | 0 | 0xabcd...6789 | On BN254: 2*(2*32)+1*(2*2*32) = 256 bytes -//! | 1 | 0x1234...cdef | -//! | ... | ... | A SGX proof is ... -//! |__________|________________| A Stark proof (not wrapped in Groth16) would be several kilobytes -//! -//! Do we need pruning? -//! There are 60s * 60min * 24h * 30j = 2592000s in a month -//! dividing by 12, that's 216000 Ethereum slots. -//! Assuming 1kB of proofs per block (Stark-to-Groth16 Risc0 & SP1 + SGX, SGX size to be verified) -//! That's only 216MB per month. -//! //! ________________________________________________________________________________________________ //! | Tasks metadata | //! |________________________________________________________________________________________________| //! | id_task | chain_id | block_number | blockhash | parent_hash | state_root | # of txs | gas_used | //! |_________|__________|______________|___________|_____________|____________|__________|__________| -//! ________________________________________________ -//! | Task queue | -//! |_______________________________________________| -//! | id_task | blockhash | id_proofsys | id_status | -//! |_________|___________|_____________|___________| +//! ____________________________________ +//! | Task queue | +//! |___________________________________| +//! | id_task | blockhash | id_proofsys | +//! |_________|___________|_____________| //! ______________________________________ -//! | Tasks inputs | +//! | Task payloads | //! |_____________________________________| //! | id_task | inputs (serialized) | //! |_________|___________________________| //! _____________________________________ //! | Task requests | //! |____________________________________| -//! | id_task | id_submitter | submit_dt | +//! | id_task | id_submitter | timestamp | //! |_________|______________|___________| -//! ______________________________________ -//! | Task fulfillment | -//! |_____________________________________| -//! | id_task | id_fulfiller | fulfill_dt | -//! |_________|______________|____________| +//! ___________________________________________________________________________________ +//! | Task progress trail | +//! |__________________________________________________________________________________| +//! | id_task | third_party | id_status | timestamp | +//! |_________|________________________|_________________________|_____________________| +//! | 101 | 'Based Proposer" | 1000 (Registered) | 2024-01-01 00:00:01 | +//! | 101 | 'A Prover Network' | 2000 (WIP) | 2024-01-01 00:00:01 | +//! | 101 | 'A Prover Network' | -2000 (Network failure) | 2024-01-01 00:02:00 | +//! | 101 | 'Proof in the Pudding' | 2000 (WIP) | 2024-01-01 00:02:30 | +//!·| 101 | 'Proof in the Pudding' | 0 (Success) | 2024-01-01 01:02:30 | //! //! Rationale: -//! - When dealing with proof requests we don't need to touch the fullfillment table -//! - and inversely when dealing with provers, we don't need to deal with the request table. -//! - inputs are very large and warrant a dedicated table, with pruning +//! - payloads are very large and warrant a dedicated table, with pruning //! - metadata is useful to audit block building and prover efficiency +//! - Due to failures and retries, we may submit the same task to multiple fulfillers +//! or retry with the same fulfiller so we keep an audit trail of events. +//! +//! ____________________________ +//! | Proof cache | A map: ID -> proof +//! |___________________________| +//! | id_task | proof_value | +//! |__________|________________| A Groth16 proof is 2G₁+1G₂ elements +//! | 0 | 0xabcd...6789 | On BN254: 2*(2*32)+1*(2*2*32) = 256 bytes +//! | 1 | 0x1234...cdef | +//! | ... | ... | A SGX proof is ... +//! |__________|________________| A Stark proof (not wrapped in Groth16) would be several kilobytes +//! +//! Do we need pruning? +//! There are 60s * 60min * 24h * 30j = 2592000s in a month +//! dividing by 12, that's 216000 Ethereum slots. +//! Assuming 1kB of proofs per block (Stark-to-Groth16 Risc0 & SP1 + SGX, SGX size to be verified) +//! That's only 216MB per month. // Imports // ---------------------------------------------------------------- @@ -187,6 +192,9 @@ pub struct TaskDb { #[derive(Debug)] pub struct TaskManager<'db> { enqueue_task: Statement<'db>, + update_task_progress: Statement<'db>, + // get_task_status: Statement<'db>, + // get_task_proof: Statement<'db>, // dequeue_task: Statement<'db>, // get_block_proof_status: Statement<'db>, get_db_size: Statement<'db>, @@ -332,25 +340,16 @@ impl TaskDb { -- has an implied index, see: -- - https://sqlite.org/lang_createtable.html#uniqueconst -- - https://www.sqlite.org/fileformat2.html#representation_of_sql_indices - CREATE TABLE taskqueue( + CREATE TABLE tasks( id_task INTEGER UNIQUE NOT NULL PRIMARY KEY, chain_id INTEGER NOT NULL, blockhash BLOB NOT NULL, id_proofsys INTEGER NOT NULL, - id_status INTEGER NOT NULL, FOREIGN KEY(chain_id, blockhash) REFERENCES blocks(chain_id, blockhash) FOREIGN KEY(id_proofsys) REFERENCES proofsys(id_proofsys) - FOREIGN KEY(id_status) REFERENCES status_codes(id_status) UNIQUE (chain_id, blockhash, id_proofsys) ); - CREATE TABLE task_requests( - id_task INTEGER UNIQUE NOT NULL PRIMARY KEY, - submitter TEXT NOT NULL, - submit_date TEXT NOT NULL, - FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) - ); - -- Payloads will be very large, just the block would be 1.77MB on L1 in Jan 2024, -- https://ethresear.ch/t/on-block-sizes-gas-limits-and-scalability/18444 -- mandating ideally a separated high-performance KV-store to reduce IO. @@ -358,21 +357,29 @@ impl TaskDb { CREATE TABLE task_payloads( id_task INTEGER UNIQUE NOT NULL PRIMARY KEY, payload BLOB NOT NULL, - FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) - ); - - CREATE TABLE task_fulfillment( - id_task INTEGER UNIQUE NOT NULL PRIMARY KEY, - fulfiller TEXT NOT NULL, - fulfill_date TEXT NOT NULL, - FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) + FOREIGN KEY(id_task) REFERENCES tasks(id_task) ); -- Proofs might also be large, so we isolate them in a dedicated table CREATE TABLE task_proofs( id_task INTEGER UNIQUE NOT NULL PRIMARY KEY, proof BLOB NOT NULL, - FOREIGN KEY(id_task) REFERENCES taskqueue(id_task) + FOREIGN KEY(id_task) REFERENCES tasks(id_task) + ); + + CREATE TABLE third_parties( + id_thirdparty INTEGER UNIQUE NOT NULL PRIMARY KEY, + thirdparty_desc TEXT UNIQUE NOT NULL + ); + + CREATE TABLE task_status( + id_task INTEGER NOT NULL, + id_thirdparty INTEGER NOT NULL, + id_status INTEGER NOT NULL, + timestamp TEXT NOT NULL, + FOREIGN KEY(id_task) REFERENCES tasks(id_task) + FOREIGN KEY(id_thirdparty) REFERENCES third_parties(id_thirdparty) + FOREIGN KEY(id_status) REFERENCES status_codes(id_status) ); "#)?; @@ -386,12 +393,13 @@ impl TaskDb { r#" CREATE VIEW enqueue_task AS SELECT - tq.id_task, - tq.chain_id, - tq.blockhash, - tq.id_proofsys, - tq.id_status, - tr.submitter, + t.id_task, + t.chain_id, + t.blockhash, + t.id_proofsys, + ts.id_status, + ts.id_thirdparty AS submitter, + t3p.thirdparty_desc, b.block_number, b.parent_hash, b.state_root, @@ -399,32 +407,34 @@ impl TaskDb { b.gas_used, tpl.payload FROM - taskqueue tq + tasks t LEFT JOIN blocks b on ( - b.chain_id = tq.chain_id - AND b.blockhash = tq.blockhash + b.chain_id = t.chain_id + AND b.blockhash = t.blockhash ) LEFT JOIN - task_requests tr on tr.id_task = tq.id_task + task_status ts on ts.id_task = t.id_task LEFT JOIN - task_payloads tpl on tpl.id_task = tq.id_task; + task_payloads tpl on tpl.id_task = t.id_task + LEFT JOIN + third_parties t3p on t3p.id_thirdparty = ts.id_thirdparty; - CREATE VIEW update_task AS + CREATE VIEW update_task_progress AS SELECT - tq.id_task, - tq.chain_id, - tq.blockhash, - tq.id_proofsys, - tq.id_status, - tf.fulfiller, + t.id_task, + t.chain_id, + t.blockhash, + t.id_proofsys, + ts.id_status, + ts.id_thirdparty AS fulfiller, tpf.proof FROM - taskqueue tq + tasks t LEFT JOIN - task_fulfillment tf on tf.id_task = tq.id_task + task_status ts on ts.id_task = t.id_task LEFT JOIN - task_proofs tpf on tpf.id_task = tq.id_task; + task_proofs tpf on tpf.id_task = t.id_task; "#)?; Ok(()) @@ -440,7 +450,7 @@ impl TaskDb { pub fn manage<'db>(&'db self) -> Result, TaskManagerError> { // To update all the tables with the task_id assigned by Sqlite - // we require row IDs for the taskqueue table + // we require row IDs for the tasks table // and we use last_insert_rowid() which is not reentrant and need a transaction lock // and store them in a temporary table, configured to be in-memory. // @@ -472,7 +482,7 @@ impl TaskDb { let conn = &self.conn; conn.execute_batch( - " + r#" -- PRAGMA temp_store = 'MEMORY'; CREATE TEMPORARY TABLE temp.current_task(id_task INTEGER); @@ -483,12 +493,11 @@ impl TaskDb { INSERT INTO blocks(chain_id, blockhash, block_number, parent_hash, state_root, num_transactions, gas_used) VALUES (new.chain_id, new.blockhash, new.block_number, new.parent_hash, new.state_root, new.num_transactions, new.gas_used); - -- Tasks are initialized at status 1000 - registered - INSERT INTO taskqueue(chain_id, blockhash, id_proofsys, id_status) - VALUES (new.chain_id, new.blockhash, new.id_proofsys, 1000); + INSERT INTO tasks(chain_id, blockhash, id_proofsys) + VALUES (new.chain_id, new.blockhash, new.id_proofsys); INSERT INTO current_task - SELECT id_task FROM taskqueue + SELECT id_task FROM tasks WHERE rowid = last_insert_rowid() LIMIT 1; @@ -497,14 +506,53 @@ impl TaskDb { FROM current_task tmp LIMIT 1; - INSERT INTO task_requests(id_task, submitter, submit_date) - SELECT tmp.id_task, new.submitter, datetime('now') + INSERT OR IGNORE INTO third_parties(thirdparty_desc) + VALUES (new.submitter); + + -- Tasks are initialized at status 1000 - registered + INSERT INTO task_status(id_task, id_thirdparty, id_status, timestamp) + SELECT tmp.id_task, t3p.id_thirdparty, 1000, datetime('now') FROM current_task tmp + JOIN third_parties t3p + WHERE t3p.thirdparty_desc = new.submitter LIMIT 1; DELETE FROM current_task; END; - ")?; + + CREATE TEMPORARY TRIGGER update_task_progress_trigger + INSTEAD OF INSERT ON update_task_progress + BEGIN + INSERT INTO current_task + SELECT id_task + FROM tasks + WHERE id_task = new.id_task + LIMIT 1; + + -- If fulfiller is NULL, due to IGNORE and the NOT NULL requirement, + -- table will be left as-is. + INSERT OR IGNORE INTO third_parties(thirdparty_desc) + VALUES (new.fulfiller); + + INSERT INTO task_status + SELECT id_task, id_thirdparty, new.id_status, datetime('now') + FROM current_task + JOIN third_parties t3p + WHERE 1=1 + AND new.fulfiller IS NOT NULL + AND new.id_status IS NOT NULL + AND new.fulfiller = t3p.thirdparty_desc + LIMIT 1; + + INSERT OR REPLACE INTO task_proofs + SELECT id_task, new.proof + FROM current_task + WHERE new.proof IS NOT NULL + LIMIT 1; + + DELETE FROM current_task; + END; + "#)?; let enqueue_task = conn.prepare( " @@ -518,6 +566,16 @@ impl TaskDb { :payload); ")?; + let update_task_progress = conn.prepare( + " + INSERT INTO update_task_progress( + chain_id, blockhash, id_proofsys, + fulfiller, id_status, proof) + VALUES ( + :chain_id, :blockhash, :id_proofsys, + :fulfiller, :id_status, :proof); + ")?; + // The requires sqlite to be compiled with dbstat support: // https://www.sqlite.org/dbstat.html // which is the case for rusqlite @@ -534,7 +592,7 @@ impl TaskDb { " )?; - Ok(TaskManager { enqueue_task, get_db_size }) + Ok(TaskManager { enqueue_task, update_task_progress, get_db_size }) } } From 0402e6aa88d2a680dbd349a37de9d1257b32adb5 Mon Sep 17 00:00:00 2001 From: Mamy Ratsimbazafy Date: Tue, 14 May 2024 17:16:32 +0200 Subject: [PATCH 08/44] task-manager: allow task updates --- task_manager/src/lib.rs | 29 ++++++++- task_manager/tests/main.rs | 123 ++++++++++++++++++++++++++++++++++++- 2 files changed, 146 insertions(+), 6 deletions(-) diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index fecb639cb..98a603350 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -200,6 +200,7 @@ pub struct TaskManager<'db> { get_db_size: Statement<'db>, } +#[derive(Debug, Copy, Clone)] pub enum TaskProofsys { Risc0 = 0, SP1 = 1, @@ -208,6 +209,7 @@ pub enum TaskProofsys { #[allow(non_camel_case_types)] #[rustfmt::skip] +#[derive(Debug, Copy, Clone)] pub enum TaskStatus { Success = 0, Registered = 1000, @@ -537,11 +539,12 @@ impl TaskDb { INSERT INTO task_status SELECT id_task, id_thirdparty, new.id_status, datetime('now') FROM current_task - JOIN third_parties t3p + LEFT JOIN + -- fulfiller can be NULL, for example + -- for tasks Cancelled before they were ever sent to a prover. + third_parties t3p ON new.fulfiller = t3p.thirdparty_desc WHERE 1=1 - AND new.fulfiller IS NOT NULL AND new.id_status IS NOT NULL - AND new.fulfiller = t3p.thirdparty_desc LIMIT 1; INSERT OR REPLACE INTO task_proofs @@ -625,6 +628,26 @@ impl<'db> TaskManager<'db> { Ok(()) } + pub fn update_task_progress( + &mut self, + chain_id: ChainId, + blockhash: &B256, + proof_system: TaskProofsys, + fulfiller: Option<&str>, + status: TaskStatus, + proof: Option<&[u8]>, + ) -> Result<(), TaskManagerError> { + self.update_task_progress.execute(named_params! { + ":chain_id": chain_id as u64, + ":blockhash": blockhash.as_slice(), + ":id_proofsys": proof_system as u8, + ":fulfiller": fulfiller, + ":id_status": status as isize, + ":proof": proof + })?; + Ok(()) + } + /// Returns the total and detailed database size pub fn get_db_size(&mut self) -> Result<(usize, Vec<(String, usize)>), TaskManagerError> { let rows = self.get_db_size.query_map([], |row| Ok((row.get(0)?, row.get(1)?)))?; diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index 37ac80f00..86fc93f6c 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -11,7 +11,7 @@ mod tests { use rand_chacha::ChaCha8Rng; use raiko_primitives::B256; - use task_manager::{TaskDb, TaskProofsys}; + use task_manager::{TaskDb, TaskProofsys, TaskStatus}; #[test] fn test_enqueue_task() { @@ -87,7 +87,7 @@ mod tests { let chain_id = 100; let blockhash = B256::random(); let proofsys = TaskProofsys::Risc0; - let submitter = "test_get_db_size"; + let submitter = format!("test_get_db_size/{}", rng.gen_range(1..10)); let block_number = rng.gen_range(1..4_000_000); let parent_hash = B256::random(); let state_root = B256::random(); @@ -100,7 +100,7 @@ mod tests { chain_id, &blockhash, proofsys, - submitter, + &submitter, block_number, &parent_hash, &state_root, @@ -114,4 +114,121 @@ mod tests { println!("db_tables_size: {:?}", db_tables_size); assert!(db_size / 1024 / 1024 > 40); } + + #[test] + fn test_update_task_progress() { + + // Materialized local DB + let dir = std::env::current_dir().unwrap().join("tests"); + let file = dir.as_path().join("test_update_task_progress.sqlite"); + if file.exists() { + std::fs::remove_file(&file).unwrap() + }; + + // // temp dir DB + // use tempfile::tempdir; + // let dir = tempdir().unwrap(); + // let file = dir.path().join("test_update_task_progress.sqlite"); + + #[allow(unused_mut)] + let mut db = TaskDb::open_or_create(&file).unwrap(); + // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); + let mut tama = db.manage().unwrap(); + + let mut rng = ChaCha8Rng::seed_from_u64(123); + let mut tasks = vec![]; + + for _ in 0..5 { + let chain_id = 100; + let blockhash = B256::random(); + let proofsys = TaskProofsys::Risc0; + let submitter = format!("test_get_db_size/{}", rng.gen_range(1..10)); + let block_number = rng.gen_range(1..4_000_000); + let parent_hash = B256::random(); + let state_root = B256::random(); + let num_transactions = rng.gen_range(0..1000); + let gas_used = rng.gen_range(0..100_000_000); + let payload_length = rng.gen_range(1_000_000..10_000_000); + let payload: Vec = (&mut rng).gen_iter::().take(payload_length).collect(); + + tama.enqueue_task( + chain_id, + &blockhash, + proofsys, + &submitter, + block_number, + &parent_hash, + &state_root, + num_transactions, + gas_used, + &payload, + ).unwrap(); + + tasks.push(( + chain_id, + blockhash, + proofsys, + )); + } + + tama.update_task_progress( + tasks[0].0, + &tasks[0].1, + tasks[0].2, + None, + TaskStatus::Cancelled_NeverStarted, + None + ).unwrap(); + + // ----------------------- + + tama.update_task_progress( + tasks[1].0, + &tasks[1].1, + tasks[1].2, + Some("A prover Network"), + TaskStatus::WorkInProgress, + None + ).unwrap(); + + tama.update_task_progress( + tasks[1].0, + &tasks[1].1, + tasks[1].2, + Some("A prover Network"), + TaskStatus::CancellationInProgress, + None + ).unwrap(); + + tama.update_task_progress( + tasks[1].0, + &tasks[1].1, + tasks[1].2, + Some("A prover Network"), + TaskStatus::Cancelled, + None + ).unwrap(); + + // ----------------------- + + tama.update_task_progress( + tasks[2].0, + &tasks[2].1, + tasks[2].2, + Some("A based prover"), + TaskStatus::WorkInProgress, + None + ).unwrap(); + + let proof: Vec<_> = (&mut rng).gen_iter::().take(128).collect(); + tama.update_task_progress( + tasks[2].0, + &tasks[2].1, + tasks[2].2, + Some("A based prover"), + TaskStatus::WorkInProgress, + Some(&proof) + ).unwrap(); + } + } From a1302eced6a76be21a60e53cef2b22116e0ed256 Mon Sep 17 00:00:00 2001 From: Mamy Ratsimbazafy Date: Wed, 15 May 2024 12:29:06 +0200 Subject: [PATCH 09/44] task-manager: retrieve cached proofs from DB --- task_manager/src/lib.rs | 126 ++++++++++++++++++++++++++++++------- task_manager/tests/main.rs | 14 +++-- 2 files changed, 115 insertions(+), 25 deletions(-) diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index 98a603350..729492094 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -193,10 +193,9 @@ pub struct TaskDb { pub struct TaskManager<'db> { enqueue_task: Statement<'db>, update_task_progress: Statement<'db>, - // get_task_status: Statement<'db>, - // get_task_proof: Statement<'db>, - // dequeue_task: Statement<'db>, - // get_block_proof_status: Statement<'db>, + get_task_proof: Statement<'db>, + get_task_proving_status: Statement<'db>, + get_tasks_unfinished: Statement<'db>, get_db_size: Statement<'db>, } @@ -369,18 +368,18 @@ impl TaskDb { FOREIGN KEY(id_task) REFERENCES tasks(id_task) ); - CREATE TABLE third_parties( + CREATE TABLE thirdparties( id_thirdparty INTEGER UNIQUE NOT NULL PRIMARY KEY, thirdparty_desc TEXT UNIQUE NOT NULL ); CREATE TABLE task_status( id_task INTEGER NOT NULL, - id_thirdparty INTEGER NOT NULL, + id_thirdparty INTEGER, id_status INTEGER NOT NULL, timestamp TEXT NOT NULL, FOREIGN KEY(id_task) REFERENCES tasks(id_task) - FOREIGN KEY(id_thirdparty) REFERENCES third_parties(id_thirdparty) + FOREIGN KEY(id_thirdparty) REFERENCES thirdparties(id_thirdparty) FOREIGN KEY(id_status) REFERENCES status_codes(id_status) ); @@ -420,7 +419,7 @@ impl TaskDb { LEFT JOIN task_payloads tpl on tpl.id_task = t.id_task LEFT JOIN - third_parties t3p on t3p.id_thirdparty = ts.id_thirdparty; + thirdparties t3p on t3p.id_thirdparty = ts.id_thirdparty; CREATE VIEW update_task_progress AS SELECT @@ -508,14 +507,14 @@ impl TaskDb { FROM current_task tmp LIMIT 1; - INSERT OR IGNORE INTO third_parties(thirdparty_desc) + INSERT OR IGNORE INTO thirdparties(thirdparty_desc) VALUES (new.submitter); -- Tasks are initialized at status 1000 - registered INSERT INTO task_status(id_task, id_thirdparty, id_status, timestamp) SELECT tmp.id_task, t3p.id_thirdparty, 1000, datetime('now') FROM current_task tmp - JOIN third_parties t3p + JOIN thirdparties t3p WHERE t3p.thirdparty_desc = new.submitter LIMIT 1; @@ -528,23 +527,24 @@ impl TaskDb { INSERT INTO current_task SELECT id_task FROM tasks - WHERE id_task = new.id_task + WHERE 1=1 + AND chain_id = new.chain_id + AND blockhash = new.blockhash + AND id_proofsys = new.id_proofsys LIMIT 1; -- If fulfiller is NULL, due to IGNORE and the NOT NULL requirement, -- table will be left as-is. - INSERT OR IGNORE INTO third_parties(thirdparty_desc) + INSERT OR IGNORE INTO thirdparties(thirdparty_desc) VALUES (new.fulfiller); - INSERT INTO task_status - SELECT id_task, id_thirdparty, new.id_status, datetime('now') - FROM current_task - LEFT JOIN + INSERT INTO task_status(id_task, id_thirdparty, id_status, timestamp) + SELECT tmp.id_task, t3p.id_thirdparty, new.id_status, datetime('now') + FROM current_task tmp + LEFT JOIN thirdparties t3p -- fulfiller can be NULL, for example -- for tasks Cancelled before they were ever sent to a prover. - third_parties t3p ON new.fulfiller = t3p.thirdparty_desc - WHERE 1=1 - AND new.id_status IS NOT NULL + ON t3p.thirdparty_desc = new.fulfiller LIMIT 1; INSERT OR REPLACE INTO task_proofs @@ -591,11 +591,79 @@ impl TaskDb { SUM(pgsize) as table_size FROM dbstat GROUP BY table_name - ORDER BY SUM(pgsize) DESC + ORDER BY SUM(pgsize) DESC; " )?; - Ok(TaskManager { enqueue_task, update_task_progress, get_db_size }) + let get_task_proof = conn.prepare( + " + SELECT proof + FROM task_proofs tp + LEFT JOIN + tasks t ON tp.id_task = t.id_task + WHERE 1=1 + AND t.chain_id = :chain_id + AND t.blockhash = :blockhash + AND t.id_proofsys = :id_proofsys + LIMIT 1; + ")?; + + let get_task_proving_status = conn.prepare( + " + SELECT + thirdparty_desc, + id_status, + MAX(timestamp) + FROM + task_status ts + LEFT JOIN + tasks t ON ts.id_task = t.id_task + LEFT JOIN + thirdparties t3p ON ts.id_thirdparty = t3p.id_thirdparty + WHERE 1=1 + AND t.chain_id = :chain_id + AND t.blockhash = :blockhash + AND t.id_proofsys = :id_proofsys + GROUP BY + ts.id_thirdparty + ORDER BY + ts.timestamp DESC; + ")?; + + let get_tasks_unfinished = conn.prepare ( + " + SELECT + t.chain_id, + t.blockhash, + t.id_proofsys, + t3p.thirdparty_desc, + ts.id_status, + MAX(timestamp) + FROM + task_status ts + LEFT JOIN + tasks t ON ts.id_task = t.id_task + LEFT JOIN + thirdparties t3p ON ts.id_thirdparty = t3p.id_thirdparty + WHERE 1=1 + AND id_status NOT IN ( + 0, -- Success + -3000, -- Cancelled + -3100, -- Cancelled (never started) + -3200 -- Cancelled (aborted) + -- What do we do with -4000 Invalid/unsupported blocks? + -- And -9999 Unspecified failure reason? + -- For now we return them until we know more of the failure modes + ); + ")?; + + Ok(TaskManager { + enqueue_task, + update_task_progress, + get_task_proof, + get_task_proving_status, + get_tasks_unfinished, + get_db_size }) } } @@ -648,6 +716,21 @@ impl<'db> TaskManager<'db> { Ok(()) } + pub fn get_task_proof( + &mut self, + chain_id: ChainId, + blockhash: &B256, + proof_system: TaskProofsys, + ) -> Result, TaskManagerError> { + let proof = self.get_task_proof.query_row(named_params! { + ":chain_id": chain_id as u64, + ":blockhash": blockhash.as_slice(), + ":id_proofsys": proof_system as u8, + }, |r| r.get(0))?; + + Ok(proof) + } + /// Returns the total and detailed database size pub fn get_db_size(&mut self) -> Result<(usize, Vec<(String, usize)>), TaskManagerError> { let rows = self.get_db_size.query_map([], |row| Ok((row.get(0)?, row.get(1)?)))?; @@ -655,6 +738,7 @@ impl<'db> TaskManager<'db> { let total = details.iter().fold(0, |acc, item| acc + item.1); Ok((total, details)) } + } #[cfg(test)] diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index 86fc93f6c..8b9df5826 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -116,11 +116,11 @@ mod tests { } #[test] - fn test_update_task_progress() { + fn test_update_query_tasks_progress() { // Materialized local DB let dir = std::env::current_dir().unwrap().join("tests"); - let file = dir.as_path().join("test_update_task_progress.sqlite"); + let file = dir.as_path().join("test_update_query_tasks_progress.sqlite"); if file.exists() { std::fs::remove_file(&file).unwrap() }; @@ -148,7 +148,7 @@ mod tests { let state_root = B256::random(); let num_transactions = rng.gen_range(0..1000); let gas_used = rng.gen_range(0..100_000_000); - let payload_length = rng.gen_range(1_000_000..10_000_000); + let payload_length = rng.gen_range(16..64); let payload: Vec = (&mut rng).gen_iter::().take(payload_length).collect(); tama.enqueue_task( @@ -226,9 +226,15 @@ mod tests { &tasks[2].1, tasks[2].2, Some("A based prover"), - TaskStatus::WorkInProgress, + TaskStatus::Success, Some(&proof) ).unwrap(); + + // ----------------------- + assert_eq!( + proof, + tama.get_task_proof(tasks[2].0, &tasks[2].1, tasks[2].2).unwrap() + ) } } From 881f174271a36fe3b412e19fbf6cd4f2d7599e59 Mon Sep 17 00:00:00 2001 From: Mamy Ratsimbazafy Date: Wed, 15 May 2024 13:53:31 +0200 Subject: [PATCH 10/44] task-manager: add status check --- Cargo.toml | 1 + task_manager/Cargo.toml | 1 + task_manager/src/lib.rs | 30 ++++++++++++++++++++++++----- task_manager/tests/main.rs | 39 ++++++++++++++++++++++++++++++++++++-- 4 files changed, 64 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6a999196f..5bc4c8660 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -103,6 +103,7 @@ base64-serde = "0.7.0" base64 = "0.21.7" libflate = { version = "2.0.0" } typetag = { version = "0.2.15" } +num_enum = "0.7.2" # tracing, logging tracing = "0.1" diff --git a/task_manager/Cargo.toml b/task_manager/Cargo.toml index 4227b1b7f..2517e1783 100644 --- a/task_manager/Cargo.toml +++ b/task_manager/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" # { workspace = true } [dependencies] raiko-primitives = { workspace = true } rusqlite = { workspace = true } +num_enum = { workspace = true } [dev-dependencies] rand = "0.9.0-alpha.1" # This is an alpha version, that has rng.gen_iter::() diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index 729492094..33746c686 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -163,6 +163,8 @@ use raiko_primitives::{BlockNumber, ChainId, B256}; use rusqlite::{named_params, Statement}; use rusqlite::{Connection, OpenFlags}; +use num_enum::{IntoPrimitive, FromPrimitive}; + // Types // ---------------------------------------------------------------- @@ -208,7 +210,8 @@ pub enum TaskProofsys { #[allow(non_camel_case_types)] #[rustfmt::skip] -#[derive(Debug, Copy, Clone)] +#[derive(PartialEq, Debug, Copy, Clone, IntoPrimitive, FromPrimitive)] +#[repr(i32)] pub enum TaskStatus { Success = 0, Registered = 1000, @@ -222,6 +225,8 @@ pub enum TaskStatus { CancellationInProgress = -3210, InvalidOrUnsupportedBlock = -4000, UnspecifiedFailureReason = -9999, + #[num_enum(default)] + SqlDbCorruption = -99999, } // Implementation @@ -611,8 +616,8 @@ impl TaskDb { let get_task_proving_status = conn.prepare( " SELECT - thirdparty_desc, - id_status, + t3p.thirdparty_desc, + ts.id_status, MAX(timestamp) FROM task_status ts @@ -625,7 +630,7 @@ impl TaskDb { AND t.blockhash = :blockhash AND t.id_proofsys = :id_proofsys GROUP BY - ts.id_thirdparty + t3p.id_thirdparty ORDER BY ts.timestamp DESC; ")?; @@ -710,12 +715,27 @@ impl<'db> TaskManager<'db> { ":blockhash": blockhash.as_slice(), ":id_proofsys": proof_system as u8, ":fulfiller": fulfiller, - ":id_status": status as isize, + ":id_status": status as i32, ":proof": proof })?; Ok(()) } + pub fn get_task_proving_status( + &mut self, + chain_id: ChainId, + blockhash: &B256, + proof_system: TaskProofsys, + ) -> Result { + let proving_status = self.get_task_proving_status.query_row(named_params! { + ":chain_id": chain_id as u64, + ":blockhash": blockhash.as_slice(), + ":id_proofsys": proof_system as u8, + }, |r| r.get::<_, i32>(0).map(TaskStatus::from))?; + + Ok(proving_status) + } + pub fn get_task_proof( &mut self, chain_id: ChainId, diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index 8b9df5826..2da68b46a 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -164,6 +164,11 @@ mod tests { &payload, ).unwrap(); + assert_eq!( + TaskStatus::Registered, + tama.get_task_proving_status(chain_id, &blockhash, proofsys).unwrap() + ); + tasks.push(( chain_id, blockhash, @@ -180,6 +185,11 @@ mod tests { None ).unwrap(); + assert_eq!( + TaskStatus::Cancelled_NeverStarted, + tama.get_task_proving_status(tasks[0].0, &tasks[0].1, tasks[0].2).unwrap() + ); + // ----------------------- tama.update_task_progress( @@ -191,6 +201,11 @@ mod tests { None ).unwrap(); + assert_eq!( + TaskStatus::WorkInProgress, + tama.get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2).unwrap() + ); + tama.update_task_progress( tasks[1].0, &tasks[1].1, @@ -200,6 +215,11 @@ mod tests { None ).unwrap(); + assert_eq!( + TaskStatus::CancellationInProgress, + tama.get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2).unwrap() + ); + tama.update_task_progress( tasks[1].0, &tasks[1].1, @@ -209,6 +229,11 @@ mod tests { None ).unwrap(); + assert_eq!( + TaskStatus::Cancelled, + tama.get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2).unwrap() + ); + // ----------------------- tama.update_task_progress( @@ -220,6 +245,11 @@ mod tests { None ).unwrap(); + assert_eq!( + TaskStatus::WorkInProgress, + tama.get_task_proving_status(tasks[2].0, &tasks[2].1, tasks[2].2).unwrap() + ); + let proof: Vec<_> = (&mut rng).gen_iter::().take(128).collect(); tama.update_task_progress( tasks[2].0, @@ -230,11 +260,16 @@ mod tests { Some(&proof) ).unwrap(); - // ----------------------- + assert_eq!( + TaskStatus::Success, + tama.get_task_proving_status(tasks[2].0, &tasks[2].1, tasks[2].2).unwrap() + ); + assert_eq!( proof, tama.get_task_proof(tasks[2].0, &tasks[2].1, tasks[2].2).unwrap() - ) + ); + } } From f0362da2109a6236ea231949edf117bc4787b96f Mon Sep 17 00:00:00 2001 From: Mamy Ratsimbazafy Date: Wed, 15 May 2024 19:02:58 +0200 Subject: [PATCH 11/44] task-manager: add progress reports --- task_manager/Cargo.toml | 3 +- task_manager/src/lib.rs | 27 ++-- task_manager/tests/main.rs | 297 ++++++++++++++++++++++++++----------- 3 files changed, 232 insertions(+), 95 deletions(-) diff --git a/task_manager/Cargo.toml b/task_manager/Cargo.toml index 2517e1783..e3052a5e9 100644 --- a/task_manager/Cargo.toml +++ b/task_manager/Cargo.toml @@ -6,8 +6,9 @@ edition = "2021" # { workspace = true } [dependencies] raiko-primitives = { workspace = true } -rusqlite = { workspace = true } +rusqlite = { workspace = true, features = ["chrono"] } num_enum = { workspace = true } +chrono = { workspace = true } [dev-dependencies] rand = "0.9.0-alpha.1" # This is an alpha version, that has rng.gen_iter::() diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index 33746c686..ccd511621 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -160,9 +160,10 @@ use std::path::Path; use raiko_primitives::{BlockNumber, ChainId, B256}; -use rusqlite::{named_params, Statement}; +use rusqlite::{named_params, Statement, MappedRows}; use rusqlite::{Connection, OpenFlags}; +use chrono::{DateTime, Utc}; use num_enum::{IntoPrimitive, FromPrimitive}; // Types @@ -382,7 +383,7 @@ impl TaskDb { id_task INTEGER NOT NULL, id_thirdparty INTEGER, id_status INTEGER NOT NULL, - timestamp TEXT NOT NULL, + timestamp TIMESTAMP DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')) NOT NULL, FOREIGN KEY(id_task) REFERENCES tasks(id_task) FOREIGN KEY(id_thirdparty) REFERENCES thirdparties(id_thirdparty) FOREIGN KEY(id_status) REFERENCES status_codes(id_status) @@ -516,8 +517,9 @@ impl TaskDb { VALUES (new.submitter); -- Tasks are initialized at status 1000 - registered - INSERT INTO task_status(id_task, id_thirdparty, id_status, timestamp) - SELECT tmp.id_task, t3p.id_thirdparty, 1000, datetime('now') + -- timestamp is auto-filled with datetime('now'), see its field definition + INSERT INTO task_status(id_task, id_thirdparty, id_status) + SELECT tmp.id_task, t3p.id_thirdparty, 1000 FROM current_task tmp JOIN thirdparties t3p WHERE t3p.thirdparty_desc = new.submitter @@ -543,8 +545,9 @@ impl TaskDb { INSERT OR IGNORE INTO thirdparties(thirdparty_desc) VALUES (new.fulfiller); - INSERT INTO task_status(id_task, id_thirdparty, id_status, timestamp) - SELECT tmp.id_task, t3p.id_thirdparty, new.id_status, datetime('now') + -- timestamp is auto-filled with datetime('now'), see its field definition + INSERT INTO task_status(id_task, id_thirdparty, id_status) + SELECT tmp.id_task, t3p.id_thirdparty, new.id_status FROM current_task tmp LEFT JOIN thirdparties t3p -- fulfiller can be NULL, for example @@ -721,17 +724,23 @@ impl<'db> TaskManager<'db> { Ok(()) } + /// Returns the latest triplet (submitter or fulfiller, status, last update time) pub fn get_task_proving_status( &mut self, chain_id: ChainId, blockhash: &B256, proof_system: TaskProofsys, - ) -> Result { - let proving_status = self.get_task_proving_status.query_row(named_params! { + ) -> Result, TaskStatus, DateTime)>, TaskManagerError> { + let rows = self.get_task_proving_status.query_map(named_params! { ":chain_id": chain_id as u64, ":blockhash": blockhash.as_slice(), ":id_proofsys": proof_system as u8, - }, |r| r.get::<_, i32>(0).map(TaskStatus::from))?; + }, |row| Ok(( + row.get::<_, Option>(0)?, + TaskStatus::from(row.get::<_, i32>(1)?), + row.get::<_, DateTime>(2)?, + )))?; + let proving_status = rows.collect::, _>>()?; Ok(proving_status) } diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index 2da68b46a..b0adf34c6 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -7,6 +7,8 @@ #[cfg(test)] mod tests { + use std::time::Duration; + use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; @@ -164,112 +166,237 @@ mod tests { &payload, ).unwrap(); - assert_eq!( - TaskStatus::Registered, - tama.get_task_proving_status(chain_id, &blockhash, proofsys).unwrap() - ); + let task_status = tama.get_task_proving_status(chain_id, &blockhash, proofsys).unwrap(); + assert_eq!(task_status.len(), 1); + assert_eq!(task_status[0].0, Some(submitter.clone())); + assert_eq!(task_status[0].1, TaskStatus::Registered); tasks.push(( chain_id, blockhash, proofsys, + submitter, )); } - tama.update_task_progress( - tasks[0].0, - &tasks[0].1, - tasks[0].2, - None, - TaskStatus::Cancelled_NeverStarted, - None - ).unwrap(); + std::thread::sleep(Duration::from_millis(1)); - assert_eq!( - TaskStatus::Cancelled_NeverStarted, - tama.get_task_proving_status(tasks[0].0, &tasks[0].1, tasks[0].2).unwrap() - ); + { + tama.update_task_progress( + tasks[0].0, + &tasks[0].1, + tasks[0].2, + None, + TaskStatus::Cancelled_NeverStarted, + None + ).unwrap(); + { + let task_status = tama.get_task_proving_status(tasks[0].0, &tasks[0].1, tasks[0].2).unwrap(); + assert_eq!(task_status.len(), 2); + assert_eq!(task_status[0].0, None); + assert_eq!(task_status[0].1, TaskStatus::Cancelled_NeverStarted); + assert_eq!(task_status[1].0, Some(tasks[0].3.clone())); + assert_eq!(task_status[1].1, TaskStatus::Registered); + } + } // ----------------------- + { + tama.update_task_progress( + tasks[1].0, + &tasks[1].1, + tasks[1].2, + Some("A prover Network"), + TaskStatus::WorkInProgress, + None + ).unwrap(); - tama.update_task_progress( - tasks[1].0, - &tasks[1].1, - tasks[1].2, - Some("A prover Network"), - TaskStatus::WorkInProgress, - None - ).unwrap(); + { + let task_status = tama.get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2).unwrap(); + assert_eq!(task_status.len(), 2); + assert_eq!(task_status[0].0, Some(String::from("A prover Network"))); + assert_eq!(task_status[0].1, TaskStatus::WorkInProgress); + assert_eq!(task_status[1].0, Some(tasks[1].3.clone())); + assert_eq!(task_status[1].1, TaskStatus::Registered); + } + + std::thread::sleep(Duration::from_millis(1)); + + tama.update_task_progress( + tasks[1].0, + &tasks[1].1, + tasks[1].2, + Some("A prover Network"), + TaskStatus::CancellationInProgress, + None + ).unwrap(); - assert_eq!( - TaskStatus::WorkInProgress, - tama.get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2).unwrap() - ); - - tama.update_task_progress( - tasks[1].0, - &tasks[1].1, - tasks[1].2, - Some("A prover Network"), - TaskStatus::CancellationInProgress, - None - ).unwrap(); + { + let task_status = tama.get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2).unwrap(); + assert_eq!(task_status.len(), 2); + assert_eq!(task_status[0].0, Some(String::from("A prover Network"))); + assert_eq!(task_status[0].1, TaskStatus::CancellationInProgress); + assert_eq!(task_status[1].0, Some(tasks[1].3.clone())); + assert_eq!(task_status[1].1, TaskStatus::Registered); + } + + std::thread::sleep(Duration::from_millis(1)); + + tama.update_task_progress( + tasks[1].0, + &tasks[1].1, + tasks[1].2, + Some("A prover Network"), + TaskStatus::Cancelled, + None + ).unwrap(); - assert_eq!( - TaskStatus::CancellationInProgress, - tama.get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2).unwrap() - ); - - tama.update_task_progress( - tasks[1].0, - &tasks[1].1, - tasks[1].2, - Some("A prover Network"), - TaskStatus::Cancelled, - None - ).unwrap(); + { + let task_status = tama.get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2).unwrap(); + assert_eq!(task_status.len(), 2); + assert_eq!(task_status[0].0, Some(String::from("A prover Network"))); + assert_eq!(task_status[0].1, TaskStatus::Cancelled); + assert_eq!(task_status[1].0, Some(tasks[1].3.clone())); + assert_eq!(task_status[1].1, TaskStatus::Registered); + } + } + + // ----------------------- + { + tama.update_task_progress( + tasks[2].0, + &tasks[2].1, + tasks[2].2, + Some("A based prover"), + TaskStatus::WorkInProgress, + None + ).unwrap(); - assert_eq!( - TaskStatus::Cancelled, - tama.get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2).unwrap() - ); + { + let task_status = tama.get_task_proving_status(tasks[2].0, &tasks[2].1, tasks[2].2).unwrap(); + assert_eq!(task_status.len(), 2); + assert_eq!(task_status[0].0, Some(String::from("A based prover"))); + assert_eq!(task_status[0].1, TaskStatus::WorkInProgress); + assert_eq!(task_status[1].0, Some(tasks[2].3.clone())); + assert_eq!(task_status[1].1, TaskStatus::Registered); + } + + std::thread::sleep(Duration::from_millis(1)); + + let proof: Vec<_> = (&mut rng).gen_iter::().take(128).collect(); + tama.update_task_progress( + tasks[2].0, + &tasks[2].1, + tasks[2].2, + Some("A based prover"), + TaskStatus::Success, + Some(&proof) + ).unwrap(); + + { + let task_status = tama.get_task_proving_status(tasks[2].0, &tasks[2].1, tasks[2].2).unwrap(); + assert_eq!(task_status.len(), 2); + assert_eq!(task_status[0].0, Some(String::from("A based prover"))); + assert_eq!(task_status[0].1, TaskStatus::Success); + assert_eq!(task_status[1].0, Some(tasks[2].3.clone())); + assert_eq!(task_status[1].1, TaskStatus::Registered); + } + + assert_eq!( + proof, + tama.get_task_proof(tasks[2].0, &tasks[2].1, tasks[2].2).unwrap() + ); + } // ----------------------- + { + tama.update_task_progress( + tasks[3].0, + &tasks[3].1, + tasks[3].2, + Some("A flaky prover"), + TaskStatus::WorkInProgress, + None + ).unwrap(); - tama.update_task_progress( - tasks[2].0, - &tasks[2].1, - tasks[2].2, - Some("A based prover"), - TaskStatus::WorkInProgress, - None - ).unwrap(); + { + let task_status = tama.get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2).unwrap(); + assert_eq!(task_status.len(), 2); + assert_eq!(task_status[0].0, Some(String::from("A flaky prover"))); + assert_eq!(task_status[0].1, TaskStatus::WorkInProgress); + assert_eq!(task_status[1].0, Some(tasks[3].3.clone())); + assert_eq!(task_status[1].1, TaskStatus::Registered); + } + + std::thread::sleep(Duration::from_millis(1)); + + tama.update_task_progress( + tasks[3].0, + &tasks[3].1, + tasks[3].2, + Some("A flaky prover"), + TaskStatus::NetworkFailure, + None + ).unwrap(); - assert_eq!( - TaskStatus::WorkInProgress, - tama.get_task_proving_status(tasks[2].0, &tasks[2].1, tasks[2].2).unwrap() - ); - - let proof: Vec<_> = (&mut rng).gen_iter::().take(128).collect(); - tama.update_task_progress( - tasks[2].0, - &tasks[2].1, - tasks[2].2, - Some("A based prover"), - TaskStatus::Success, - Some(&proof) - ).unwrap(); + { + let task_status = tama.get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2).unwrap(); + assert_eq!(task_status.len(), 2); + assert_eq!(task_status[0].0, Some(String::from("A flaky prover"))); + assert_eq!(task_status[0].1, TaskStatus::NetworkFailure); + assert_eq!(task_status[1].0, Some(tasks[3].3.clone())); + assert_eq!(task_status[1].1, TaskStatus::Registered); + } + + std::thread::sleep(Duration::from_millis(1)); + + tama.update_task_progress( + tasks[3].0, + &tasks[3].1, + tasks[3].2, + Some("A based prover"), + TaskStatus::WorkInProgress, + None + ).unwrap(); - assert_eq!( - TaskStatus::Success, - tama.get_task_proving_status(tasks[2].0, &tasks[2].1, tasks[2].2).unwrap() - ); + { + let task_status = tama.get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2).unwrap(); + assert_eq!(task_status.len(), 3); + assert_eq!(task_status[0].0, Some(String::from("A based prover"))); + assert_eq!(task_status[0].1, TaskStatus::WorkInProgress); + assert_eq!(task_status[1].0, Some(String::from("A flaky prover"))); + assert_eq!(task_status[1].1, TaskStatus::NetworkFailure); + assert_eq!(task_status[2].0, Some(tasks[3].3.clone())); + assert_eq!(task_status[2].1, TaskStatus::Registered); + } + + std::thread::sleep(Duration::from_millis(1)); + + let proof: Vec<_> = (&mut rng).gen_iter::().take(128).collect(); + tama.update_task_progress( + tasks[3].0, + &tasks[3].1, + tasks[3].2, + Some("A based prover"), + TaskStatus::Success, + Some(&proof) + ).unwrap(); - assert_eq!( - proof, - tama.get_task_proof(tasks[2].0, &tasks[2].1, tasks[2].2).unwrap() - ); + { + let task_status = tama.get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2).unwrap(); + assert_eq!(task_status.len(), 3); + assert_eq!(task_status[0].0, Some(String::from("A based prover"))); + assert_eq!(task_status[0].1, TaskStatus::Success); + assert_eq!(task_status[1].0, Some(String::from("A flaky prover"))); + assert_eq!(task_status[1].1, TaskStatus::NetworkFailure); + assert_eq!(task_status[2].0, Some(tasks[3].3.clone())); + assert_eq!(task_status[2].1, TaskStatus::Registered); + } + assert_eq!( + proof, + tama.get_task_proof(tasks[3].0, &tasks[3].1, tasks[3].2).unwrap() + ); + } } - } From 2099196bbdfa57c882910d601202a3282af29050 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Mon, 20 May 2024 15:28:54 +0200 Subject: [PATCH 12/44] chore(task_manager): Run cargo fmt --- task_manager/src/lib.rs | 83 +++++++++++++++---------- task_manager/tests/main.rs | 123 +++++++++++++++++++++++-------------- 2 files changed, 127 insertions(+), 79 deletions(-) diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index ccd511621..f3675802d 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -160,11 +160,11 @@ use std::path::Path; use raiko_primitives::{BlockNumber, ChainId, B256}; -use rusqlite::{named_params, Statement, MappedRows}; +use rusqlite::{named_params, MappedRows, Statement}; use rusqlite::{Connection, OpenFlags}; use chrono::{DateTime, Utc}; -use num_enum::{IntoPrimitive, FromPrimitive}; +use num_enum::{FromPrimitive, IntoPrimitive}; // Types // ---------------------------------------------------------------- @@ -389,7 +389,8 @@ impl TaskDb { FOREIGN KEY(id_status) REFERENCES status_codes(id_status) ); - "#)?; + "#, + )?; Ok(()) } @@ -442,7 +443,8 @@ impl TaskDb { task_status ts on ts.id_task = t.id_task LEFT JOIN task_proofs tpf on tpf.id_task = t.id_task; - "#)?; + "#, + )?; Ok(()) } @@ -575,7 +577,8 @@ impl TaskDb { :chain_id, :blockhash, :id_proofsys, :submitter, :block_number, :parent_hash, :state_root, :num_transactions, :gas_used, :payload); - ")?; + ", + )?; let update_task_progress = conn.prepare( " @@ -585,7 +588,8 @@ impl TaskDb { VALUES ( :chain_id, :blockhash, :id_proofsys, :fulfiller, :id_status, :proof); - ")?; + ", + )?; // The requires sqlite to be compiled with dbstat support: // https://www.sqlite.org/dbstat.html @@ -600,7 +604,7 @@ impl TaskDb { FROM dbstat GROUP BY table_name ORDER BY SUM(pgsize) DESC; - " + ", )?; let get_task_proof = conn.prepare( @@ -614,7 +618,8 @@ impl TaskDb { AND t.blockhash = :blockhash AND t.id_proofsys = :id_proofsys LIMIT 1; - ")?; + ", + )?; let get_task_proving_status = conn.prepare( " @@ -636,9 +641,10 @@ impl TaskDb { t3p.id_thirdparty ORDER BY ts.timestamp DESC; - ")?; + ", + )?; - let get_tasks_unfinished = conn.prepare ( + let get_tasks_unfinished = conn.prepare( " SELECT t.chain_id, @@ -663,15 +669,17 @@ impl TaskDb { -- And -9999 Unspecified failure reason? -- For now we return them until we know more of the failure modes ); - ")?; + ", + )?; Ok(TaskManager { - enqueue_task, - update_task_progress, - get_task_proof, - get_task_proving_status, - get_tasks_unfinished, - get_db_size }) + enqueue_task, + update_task_progress, + get_task_proof, + get_task_proving_status, + get_tasks_unfinished, + get_db_size, + }) } } @@ -731,15 +739,20 @@ impl<'db> TaskManager<'db> { blockhash: &B256, proof_system: TaskProofsys, ) -> Result, TaskStatus, DateTime)>, TaskManagerError> { - let rows = self.get_task_proving_status.query_map(named_params! { - ":chain_id": chain_id as u64, - ":blockhash": blockhash.as_slice(), - ":id_proofsys": proof_system as u8, - }, |row| Ok(( - row.get::<_, Option>(0)?, - TaskStatus::from(row.get::<_, i32>(1)?), - row.get::<_, DateTime>(2)?, - )))?; + let rows = self.get_task_proving_status.query_map( + named_params! { + ":chain_id": chain_id as u64, + ":blockhash": blockhash.as_slice(), + ":id_proofsys": proof_system as u8, + }, + |row| { + Ok(( + row.get::<_, Option>(0)?, + TaskStatus::from(row.get::<_, i32>(1)?), + row.get::<_, DateTime>(2)?, + )) + }, + )?; let proving_status = rows.collect::, _>>()?; Ok(proving_status) @@ -751,23 +764,27 @@ impl<'db> TaskManager<'db> { blockhash: &B256, proof_system: TaskProofsys, ) -> Result, TaskManagerError> { - let proof = self.get_task_proof.query_row(named_params! { - ":chain_id": chain_id as u64, - ":blockhash": blockhash.as_slice(), - ":id_proofsys": proof_system as u8, - }, |r| r.get(0))?; + let proof = self.get_task_proof.query_row( + named_params! { + ":chain_id": chain_id as u64, + ":blockhash": blockhash.as_slice(), + ":id_proofsys": proof_system as u8, + }, + |r| r.get(0), + )?; Ok(proof) } /// Returns the total and detailed database size pub fn get_db_size(&mut self) -> Result<(usize, Vec<(String, usize)>), TaskManagerError> { - let rows = self.get_db_size.query_map([], |row| Ok((row.get(0)?, row.get(1)?)))?; + let rows = self + .get_db_size + .query_map([], |row| Ok((row.get(0)?, row.get(1)?)))?; let details = rows.collect::, _>>()?; let total = details.iter().fold(0, |acc, item| acc + item.1); Ok((total, details)) } - } #[cfg(test)] diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index b0adf34c6..50ff0422e 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -17,7 +17,6 @@ mod tests { #[test] fn test_enqueue_task() { - // // Materialized local DB // let dir = std::env::current_dir().unwrap().join("tests"); // let file = dir.as_path().join("test_enqueue_task.sqlite"); @@ -60,12 +59,12 @@ mod tests { num_transactions, gas_used, &payload, - ).unwrap(); + ) + .unwrap(); } #[test] fn test_get_db_size() { - // Materialized local DB let dir = std::env::current_dir().unwrap().join("tests"); let file = dir.as_path().join("test_get_db_size.sqlite"); @@ -109,7 +108,8 @@ mod tests { num_transactions, gas_used, &payload, - ).unwrap(); + ) + .unwrap(); } let (db_size, db_tables_size) = tama.get_db_size().unwrap(); @@ -119,10 +119,11 @@ mod tests { #[test] fn test_update_query_tasks_progress() { - // Materialized local DB let dir = std::env::current_dir().unwrap().join("tests"); - let file = dir.as_path().join("test_update_query_tasks_progress.sqlite"); + let file = dir + .as_path() + .join("test_update_query_tasks_progress.sqlite"); if file.exists() { std::fs::remove_file(&file).unwrap() }; @@ -164,19 +165,17 @@ mod tests { num_transactions, gas_used, &payload, - ).unwrap(); + ) + .unwrap(); - let task_status = tama.get_task_proving_status(chain_id, &blockhash, proofsys).unwrap(); + let task_status = tama + .get_task_proving_status(chain_id, &blockhash, proofsys) + .unwrap(); assert_eq!(task_status.len(), 1); assert_eq!(task_status[0].0, Some(submitter.clone())); assert_eq!(task_status[0].1, TaskStatus::Registered); - tasks.push(( - chain_id, - blockhash, - proofsys, - submitter, - )); + tasks.push((chain_id, blockhash, proofsys, submitter)); } std::thread::sleep(Duration::from_millis(1)); @@ -188,11 +187,14 @@ mod tests { tasks[0].2, None, TaskStatus::Cancelled_NeverStarted, - None - ).unwrap(); + None, + ) + .unwrap(); { - let task_status = tama.get_task_proving_status(tasks[0].0, &tasks[0].1, tasks[0].2).unwrap(); + let task_status = tama + .get_task_proving_status(tasks[0].0, &tasks[0].1, tasks[0].2) + .unwrap(); assert_eq!(task_status.len(), 2); assert_eq!(task_status[0].0, None); assert_eq!(task_status[0].1, TaskStatus::Cancelled_NeverStarted); @@ -208,11 +210,14 @@ mod tests { tasks[1].2, Some("A prover Network"), TaskStatus::WorkInProgress, - None - ).unwrap(); + None, + ) + .unwrap(); { - let task_status = tama.get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2).unwrap(); + let task_status = tama + .get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2) + .unwrap(); assert_eq!(task_status.len(), 2); assert_eq!(task_status[0].0, Some(String::from("A prover Network"))); assert_eq!(task_status[0].1, TaskStatus::WorkInProgress); @@ -228,11 +233,14 @@ mod tests { tasks[1].2, Some("A prover Network"), TaskStatus::CancellationInProgress, - None - ).unwrap(); + None, + ) + .unwrap(); { - let task_status = tama.get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2).unwrap(); + let task_status = tama + .get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2) + .unwrap(); assert_eq!(task_status.len(), 2); assert_eq!(task_status[0].0, Some(String::from("A prover Network"))); assert_eq!(task_status[0].1, TaskStatus::CancellationInProgress); @@ -248,11 +256,14 @@ mod tests { tasks[1].2, Some("A prover Network"), TaskStatus::Cancelled, - None - ).unwrap(); + None, + ) + .unwrap(); { - let task_status = tama.get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2).unwrap(); + let task_status = tama + .get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2) + .unwrap(); assert_eq!(task_status.len(), 2); assert_eq!(task_status[0].0, Some(String::from("A prover Network"))); assert_eq!(task_status[0].1, TaskStatus::Cancelled); @@ -269,11 +280,14 @@ mod tests { tasks[2].2, Some("A based prover"), TaskStatus::WorkInProgress, - None - ).unwrap(); + None, + ) + .unwrap(); { - let task_status = tama.get_task_proving_status(tasks[2].0, &tasks[2].1, tasks[2].2).unwrap(); + let task_status = tama + .get_task_proving_status(tasks[2].0, &tasks[2].1, tasks[2].2) + .unwrap(); assert_eq!(task_status.len(), 2); assert_eq!(task_status[0].0, Some(String::from("A based prover"))); assert_eq!(task_status[0].1, TaskStatus::WorkInProgress); @@ -290,11 +304,14 @@ mod tests { tasks[2].2, Some("A based prover"), TaskStatus::Success, - Some(&proof) - ).unwrap(); + Some(&proof), + ) + .unwrap(); { - let task_status = tama.get_task_proving_status(tasks[2].0, &tasks[2].1, tasks[2].2).unwrap(); + let task_status = tama + .get_task_proving_status(tasks[2].0, &tasks[2].1, tasks[2].2) + .unwrap(); assert_eq!(task_status.len(), 2); assert_eq!(task_status[0].0, Some(String::from("A based prover"))); assert_eq!(task_status[0].1, TaskStatus::Success); @@ -304,7 +321,8 @@ mod tests { assert_eq!( proof, - tama.get_task_proof(tasks[2].0, &tasks[2].1, tasks[2].2).unwrap() + tama.get_task_proof(tasks[2].0, &tasks[2].1, tasks[2].2) + .unwrap() ); } @@ -316,11 +334,14 @@ mod tests { tasks[3].2, Some("A flaky prover"), TaskStatus::WorkInProgress, - None - ).unwrap(); + None, + ) + .unwrap(); { - let task_status = tama.get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2).unwrap(); + let task_status = tama + .get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2) + .unwrap(); assert_eq!(task_status.len(), 2); assert_eq!(task_status[0].0, Some(String::from("A flaky prover"))); assert_eq!(task_status[0].1, TaskStatus::WorkInProgress); @@ -336,11 +357,14 @@ mod tests { tasks[3].2, Some("A flaky prover"), TaskStatus::NetworkFailure, - None - ).unwrap(); + None, + ) + .unwrap(); { - let task_status = tama.get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2).unwrap(); + let task_status = tama + .get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2) + .unwrap(); assert_eq!(task_status.len(), 2); assert_eq!(task_status[0].0, Some(String::from("A flaky prover"))); assert_eq!(task_status[0].1, TaskStatus::NetworkFailure); @@ -356,11 +380,14 @@ mod tests { tasks[3].2, Some("A based prover"), TaskStatus::WorkInProgress, - None - ).unwrap(); + None, + ) + .unwrap(); { - let task_status = tama.get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2).unwrap(); + let task_status = tama + .get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2) + .unwrap(); assert_eq!(task_status.len(), 3); assert_eq!(task_status[0].0, Some(String::from("A based prover"))); assert_eq!(task_status[0].1, TaskStatus::WorkInProgress); @@ -379,11 +406,14 @@ mod tests { tasks[3].2, Some("A based prover"), TaskStatus::Success, - Some(&proof) - ).unwrap(); + Some(&proof), + ) + .unwrap(); { - let task_status = tama.get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2).unwrap(); + let task_status = tama + .get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2) + .unwrap(); assert_eq!(task_status.len(), 3); assert_eq!(task_status[0].0, Some(String::from("A based prover"))); assert_eq!(task_status[0].1, TaskStatus::Success); @@ -395,7 +425,8 @@ mod tests { assert_eq!( proof, - tama.get_task_proof(tasks[3].0, &tasks[3].1, tasks[3].2).unwrap() + tama.get_task_proof(tasks[3].0, &tasks[3].1, tasks[3].2) + .unwrap() ); } } From 2c4add191e1cffe28d0aba7678c833bf4f5791c5 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Thu, 13 Jun 2024 10:32:46 +0200 Subject: [PATCH 13/44] feat: address small lints --- Cargo.lock | 1755 ++++++++++++++----------------- Cargo.toml | 3 +- core/src/lib.rs | 2 +- core/src/prover.rs | 13 +- harness/core/src/assert.rs | 4 + host/src/server/api/v1/proof.rs | 4 +- lib/src/consts.rs | 16 +- task_manager/Cargo.toml | 8 +- task_manager/src/lib.rs | 54 +- task_manager/tests/main.rs | 62 +- 10 files changed, 891 insertions(+), 1030 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ac4541046..0f07f9dfd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -31,22 +31,13 @@ checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "cpp_demangle", "fallible-iterator", - "gimli 0.28.1", + "gimli", "memmap2", - "object 0.32.2", + "object", "rustc-demangle", "smallvec", ] -[[package]] -name = "addr2line" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" -dependencies = [ - "gimli 0.29.0", -] - [[package]] name = "adler" version = "1.0.2" @@ -59,17 +50,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" -[[package]] -name = "aes" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" -dependencies = [ - "cfg-if", - "cipher", - "cpufeatures", -] - [[package]] name = "ahash" version = "0.8.11" @@ -79,7 +59,7 @@ dependencies = [ "cfg-if", "once_cell", "version_check", - "zerocopy", + "zerocopy 0.7.34", ] [[package]] @@ -112,20 +92,51 @@ version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +[[package]] +name = "alloy" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" +dependencies = [ + "alloy-contract 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-core", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-provider 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-rpc-client 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-signer 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-signer-wallet 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-transport-http 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "reqwest 0.12.4", +] + [[package]] name = "alloy-consensus" version = "0.1.0" source = "git+https://github.com/brechtpd/alloy?branch=175_4e22b9e#5f972199a8208969e838203c3db48f467c629d49" dependencies = [ - "alloy-eips", + "alloy-eips 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-primitives", "alloy-rlp", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "c-kzg", "serde", "sha2", ] +[[package]] +name = "alloy-consensus" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" +dependencies = [ + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-primitives", + "alloy-rlp", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "serde", + "sha2", +] + [[package]] name = "alloy-contract" version = "0.1.0" @@ -133,22 +144,52 @@ source = "git+https://github.com/brechtpd/alloy?branch=175_4e22b9e#5f972199a8208 dependencies = [ "alloy-dyn-abi", "alloy-json-abi", - "alloy-network", + "alloy-network 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-primitives", + "alloy-provider 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-rpc-types 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-sol-types", + "alloy-transport 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "futures", + "futures-util", + "thiserror", +] + +[[package]] +name = "alloy-contract" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-network 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", "alloy-primitives", - "alloy-provider", - "alloy-rpc-types", + "alloy-provider 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", "alloy-sol-types", - "alloy-transport", + "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", "futures", "futures-util", "thiserror", ] +[[package]] +name = "alloy-core" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e30b83573b348305b9629a094b5331093a030514cd5713433799495cb283fea1" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-types", +] + [[package]] name = "alloy-dyn-abi" -version = "0.7.5" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd2404399cb1b50572758e66e9b4bf088e5a3df9007be7126456c7e50af935f" +checksum = "545885d9b0b2c30fd344ae291439b4bfe59e48dd62fbc862f8503d98088967dc" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -158,7 +199,7 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.13", + "winnow 0.6.8", ] [[package]] @@ -168,7 +209,20 @@ source = "git+https://github.com/brechtpd/alloy?branch=175_4e22b9e#5f972199a8208 dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "c-kzg", + "once_cell", + "serde", +] + +[[package]] +name = "alloy-eips" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", "c-kzg", "once_cell", "serde", @@ -180,16 +234,26 @@ version = "0.1.0" source = "git+https://github.com/brechtpd/alloy?branch=175_4e22b9e#5f972199a8208969e838203c3db48f467c629d49" dependencies = [ "alloy-primitives", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "serde", "serde_json", ] +[[package]] +name = "alloy-genesis" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" +dependencies = [ + "alloy-primitives", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "serde", +] + [[package]] name = "alloy-json-abi" -version = "0.7.5" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c3abf6446a292e19853aaca43590eeb48bf435dfd2c74200259e8f4872f6ce3" +checksum = "786689872ec4e7d354810ab0dffd48bb40b838c047522eb031cbd47d15634849" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -209,40 +273,69 @@ dependencies = [ "tracing", ] +[[package]] +name = "alloy-json-rpc" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "alloy-network" version = "0.1.0" source = "git+https://github.com/brechtpd/alloy?branch=175_4e22b9e#5f972199a8208969e838203c3db48f467c629d49" dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-json-rpc", + "alloy-consensus 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-eips 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-json-rpc 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-primitives", - "alloy-rpc-types", - "alloy-signer", + "alloy-rpc-types 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-signer 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-sol-types", "async-trait", "futures-utils-wasm", "thiserror", ] +[[package]] +name = "alloy-network" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" +dependencies = [ + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-primitives", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-signer 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "async-trait", + "futures-utils-wasm", + "serde", + "thiserror", +] + [[package]] name = "alloy-primitives" -version = "0.7.5" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5277af0cbcc483ee6ad2c1e818090b5928d27f04fd6580680f31c1cf8068bcc2" +checksum = "525448f6afc1b70dd0f9d0a8145631bf2f5e434678ab23ab18409ca264cae6b3" dependencies = [ "alloy-rlp", "bytes", "cfg-if", "const-hex", "derive_more", + "getrandom", "hex-literal", "itoa", "k256", "keccak-asm", "proptest", - "rand", + "rand 0.8.5", "ruint", "serde", "tiny-keccak", @@ -253,15 +346,15 @@ name = "alloy-provider" version = "0.1.0" source = "git+https://github.com/brechtpd/alloy?branch=175_4e22b9e#5f972199a8208969e838203c3db48f467c629d49" dependencies = [ - "alloy-eips", - "alloy-json-rpc", - "alloy-network", + "alloy-eips 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-json-rpc 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-network 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-primitives", - "alloy-rpc-client", - "alloy-rpc-types", - "alloy-rpc-types-trace", - "alloy-transport", - "alloy-transport-http", + "alloy-rpc-client 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-rpc-types 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-rpc-types-trace 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-transport 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-transport-http 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "async-stream", "async-trait", "auto_impl", @@ -276,11 +369,37 @@ dependencies = [ "url", ] +[[package]] +name = "alloy-provider" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" +dependencies = [ + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-network 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-primitives", + "alloy-rpc-client 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-rpc-types-trace 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-transport-http 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "async-stream", + "async-trait", + "auto_impl", + "dashmap", + "futures", + "lru", + "reqwest 0.12.4", + "serde_json", + "tokio", + "tracing", + "url", +] + [[package]] name = "alloy-rlp" -version = "0.3.5" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b155716bab55763c95ba212806cf43d05bcc70e5f35b02bad20cf5ec7fe11fed" +checksum = "8d58d9f5da7b40e9bfff0b7e7816700be4019db97d4b6359fe7f94a9e22e42ac" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -289,13 +408,13 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.5" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8037e03c7f462a063f28daec9fda285a9a89da003c552f8637a80b9c8fd96241" +checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -303,9 +422,29 @@ name = "alloy-rpc-client" version = "0.1.0" source = "git+https://github.com/brechtpd/alloy?branch=175_4e22b9e#5f972199a8208969e838203c3db48f467c629d49" dependencies = [ - "alloy-json-rpc", - "alloy-transport", - "alloy-transport-http", + "alloy-json-rpc 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-transport 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-transport-http 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "futures", + "pin-project", + "reqwest 0.12.4", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower", + "tracing", + "url", +] + +[[package]] +name = "alloy-rpc-client" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" +dependencies = [ + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-transport-http 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", "futures", "pin-project", "reqwest 0.12.4", @@ -323,12 +462,30 @@ name = "alloy-rpc-types" version = "0.1.0" source = "git+https://github.com/brechtpd/alloy?branch=175_4e22b9e#5f972199a8208969e838203c3db48f467c629d49" dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-genesis", + "alloy-consensus 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-eips 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-genesis 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-primitives", + "alloy-rlp", + "alloy-serde 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-sol-types", + "itertools 0.12.1", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "alloy-rpc-types" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" +dependencies = [ + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", "alloy-primitives", "alloy-rlp", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", "alloy-sol-types", "itertools 0.12.1", "serde", @@ -342,8 +499,20 @@ version = "0.1.0" source = "git+https://github.com/brechtpd/alloy?branch=175_4e22b9e#5f972199a8208969e838203c3db48f467c629d49" dependencies = [ "alloy-primitives", - "alloy-rpc-types", - "alloy-serde", + "alloy-rpc-types 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-serde 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-rpc-types-trace" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", "serde", "serde_json", ] @@ -358,6 +527,16 @@ dependencies = [ "serde_json", ] +[[package]] +name = "alloy-serde" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + [[package]] name = "alloy-signer" version = "0.1.0" @@ -371,83 +550,101 @@ dependencies = [ "thiserror", ] +[[package]] +name = "alloy-signer" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" +dependencies = [ + "alloy-primitives", + "async-trait", + "auto_impl", + "elliptic-curve", + "k256", + "thiserror", +] + [[package]] name = "alloy-signer-wallet" version = "0.1.0" source = "git+https://github.com/brechtpd/alloy?branch=175_4e22b9e#5f972199a8208969e838203c3db48f467c629d49" dependencies = [ - "alloy-consensus", - "alloy-network", + "alloy-consensus 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-network 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-primitives", - "alloy-signer", + "alloy-signer 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "async-trait", "k256", - "rand", + "rand 0.8.5", "thiserror", ] [[package]] -name = "alloy-sol-macro" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30708a79919b082f2692423c8cc72fc250477e4a2ecb0d4a7244cd3cdb299965" +name = "alloy-signer-wallet" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" dependencies = [ - "alloy-sol-macro-expander", - "alloy-sol-macro-input", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 2.0.66", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-network 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-primitives", + "alloy-signer 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "async-trait", + "k256", + "rand 0.8.5", + "thiserror", ] [[package]] -name = "alloy-sol-macro-expander" -version = "0.7.5" +name = "alloy-sol-macro" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c7a679ac01774ab7e00a567a918d4231ae692c5c8cedaf4e16956c3116d7896" +checksum = "89c80a2cb97e7aa48611cbb63950336f9824a174cdf670527cc6465078a26ea1" dependencies = [ + "alloy-json-abi", "alloy-sol-macro-input", "const-hex", - "heck 0.5.0", + "heck 0.4.1", "indexmap 2.2.6", "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.7.5" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356da0c2228aa6675a5faaa08a3e4061b967f924753983d72b9a18d9a3fad44e" +checksum = "c58894b58ac50979eeac6249661991ac40b9d541830d9a725f7714cc9ef08c23" dependencies = [ + "alloy-json-abi", "const-hex", "dunce", "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.66", + "serde_json", + "syn 2.0.63", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.7.5" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fd4783b0a5840479013e9ce960d2eb7b3be381f722e0fe3d1f7c3bb6bd4ebd" +checksum = "7da8e71ea68e780cc203919e03f69f59e7afe92d2696fb1dcb6662f61e4031b6" dependencies = [ - "winnow 0.6.13", + "winnow 0.6.8", ] [[package]] name = "alloy-sol-types" -version = "0.7.5" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eb5e6234c0b62514992589fe1578f64d418dbc8ef5cd1ab2d7f2f568f599698" +checksum = "399287f68d1081ed8b1f4903c49687658b95b142207d7cb4ae2f4813915343ef" dependencies = [ + "alloy-json-abi", "alloy-primitives", "alloy-sol-macro", "const-hex", @@ -459,7 +656,25 @@ name = "alloy-transport" version = "0.1.0" source = "git+https://github.com/brechtpd/alloy?branch=175_4e22b9e#5f972199a8208969e838203c3db48f467c629d49" dependencies = [ - "alloy-json-rpc", + "alloy-json-rpc 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "base64 0.22.1", + "futures-util", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower", + "url", + "wasm-bindgen-futures", +] + +[[package]] +name = "alloy-transport" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" +dependencies = [ + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", "base64 0.22.1", "futures-util", "futures-utils-wasm", @@ -477,8 +692,21 @@ name = "alloy-transport-http" version = "0.1.0" source = "git+https://github.com/brechtpd/alloy?branch=175_4e22b9e#5f972199a8208969e838203c3db48f467c629d49" dependencies = [ - "alloy-json-rpc", - "alloy-transport", + "alloy-json-rpc 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-transport 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "reqwest 0.12.4", + "serde_json", + "tower", + "url", +] + +[[package]] +name = "alloy-transport-http" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=bfd0fda#bfd0fda492e560c3463d521958793c81bbeadfc1" +dependencies = [ + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", + "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=bfd0fda)", "reqwest 0.12.4", "serde_json", "tower", @@ -541,9 +769,9 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.1.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" +checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" dependencies = [ "windows-sys 0.52.0", ] @@ -560,9 +788,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "25bdb32cbbdce2b519a9cd7df3a678443100e265d5e25ca763b7572a5104f5f3" [[package]] name = "approx" @@ -794,7 +1022,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -804,9 +1032,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] +[[package]] +name = "array-macro" +version = "2.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "220a2c618ab466efe41d0eace94dfeff1c35e3aa47891bdb95e1c0fefffd3c99" + [[package]] name = "arrayref" version = "0.3.7" @@ -836,9 +1070,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.11" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5" +checksum = "9c90a406b4495d129f00461241616194cb8a032c8d1c53c657f0961d5f8e0498" dependencies = [ "brotli", "flate2", @@ -869,7 +1103,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -880,7 +1114,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -894,12 +1128,6 @@ dependencies = [ "rustc_version 0.4.0", ] -[[package]] -name = "atomic-waker" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" - [[package]] name = "atty" version = "0.2.14" @@ -929,7 +1157,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -949,9 +1177,9 @@ dependencies = [ [[package]] name = "axum" -version = "0.7.5" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +checksum = "1236b4b292f6c4d6dc34604bb5120d85c3fe1d1aa596bd5cc52ca054d13e7b9e" dependencies = [ "async-trait", "axum-core", @@ -974,7 +1202,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper", "tokio", "tower", "tower-layer", @@ -997,7 +1225,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper", "tower-layer", "tower-service", "tracing", @@ -1012,21 +1240,21 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] name = "backtrace" -version = "0.3.72" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17c6a35df3749d2e8bb1b7b21a976d82b15548788d2735b9d82f329268f71a11" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ - "addr2line 0.22.0", + "addr2line", "cc", "cfg-if", "libc", "miniz_oxide", - "object 0.35.0", + "object", "rustc-demangle", "serde", ] @@ -1071,12 +1299,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" -[[package]] -name = "bech32" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" - [[package]] name = "bincode" version = "1.3.3" @@ -1086,29 +1308,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bindgen" -version = "0.69.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" -dependencies = [ - "bitflags 2.5.0", - "cexpr", - "clang-sys", - "itertools 0.12.1", - "lazy_static", - "lazycell", - "log", - "prettyplease", - "proc-macro2", - "quote", - "regex", - "rustc-hash", - "shlex", - "syn 2.0.66", - "which", -] - [[package]] name = "bit-set" version = "0.5.3" @@ -1186,6 +1385,11 @@ dependencies = [ "rayon", ] +[[package]] +name = "blake3-zkvm" +version = "0.1.0" +source = "git+https://github.com/sp1-patches/BLAKE3.git?branch=patch-blake3_zkvm/v.1.0.0#bac2d59f9122b07a4d91475560b4c3214ae62444" + [[package]] name = "block" version = "0.1.6" @@ -1210,15 +1414,15 @@ dependencies = [ "ff 0.12.1", "group 0.12.1", "pairing", - "rand_core", + "rand_core 0.6.4", "subtle", ] [[package]] name = "blst" -version = "0.3.12" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62dc83a094a71d43eeadd254b1ec2d24cb6a0bb6cadce00df51f0db594711a32" +checksum = "c94087b935a822949d3291a9989ad2b2051ea141eda0fd4e478a75f6aa3e604b" dependencies = [ "cc", "glob", @@ -1252,24 +1456,14 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.1" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" +checksum = "e6221fe77a248b9117d431ad93761222e1cf8ff282d9d1d5d9f53d6299a1cf76" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", ] -[[package]] -name = "bs58" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" -dependencies = [ - "sha2", - "tinyvec", -] - [[package]] name = "bstr" version = "1.9.1" @@ -1295,22 +1489,22 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.16.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5" +checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.7.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ee891b04274a59bd38b412188e24b849617b2e45a0fd8d057deb63e7403761b" +checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -1345,7 +1539,7 @@ dependencies = [ [[package]] name = "c-kzg-taiko" version = "1.0.0" -source = "git+https://github.com/smtmfft/c-kzg-4844?branch=for-alpha7#a2d3ae768eede8228920619c98c87584ad8afd09" +source = "git+https://github.com/smtmfft/c-kzg-4844?branch=for-alpha7#77e9ba0a65e10e6a470832da2914b17a968da791" dependencies = [ "blst", "cc", @@ -1357,9 +1551,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.7" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" +checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" dependencies = [ "serde", ] @@ -1421,24 +1615,15 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.99" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c51067fd44124faa7f870b4b1c969379ad32b2ba805aa959430ceaa384f695" +checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" dependencies = [ "jobserver", "libc", "once_cell", ] -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] - [[package]] name = "cfg-if" version = "1.0.0" @@ -1458,27 +1643,6 @@ dependencies = [ "windows-targets 0.52.5", ] -[[package]] -name = "cipher" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" -dependencies = [ - "crypto-common", - "inout", -] - -[[package]] -name = "clang-sys" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" -dependencies = [ - "glob", - "libc", - "libloading", -] - [[package]] name = "clap" version = "2.34.0" @@ -1496,9 +1660,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.6" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9689a29b593160de5bc4aacab7b5d54fb52231de70122626c178e6a368994c7" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", "clap_derive", @@ -1506,9 +1670,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.6" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5387378c84f6faa26890ebf9f0a92989f8873d4d380467bcd0d8d8620424df" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstream", "anstyle", @@ -1518,73 +1682,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.5" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c780290ccf4fb26629baa7a1081e68ced113f1d3ec302fa5948f1c381ebf06c6" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] name = "clap_lex" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70" - -[[package]] -name = "coins-bip32" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b6be4a5df2098cd811f3194f64ddb96c267606bffd9689ac7b0160097b01ad3" -dependencies = [ - "bs58", - "coins-core", - "digest 0.10.7", - "hmac", - "k256", - "serde", - "sha2", - "thiserror", -] - -[[package]] -name = "coins-bip39" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3db8fba409ce3dc04f7d804074039eb68b960b0829161f8e06c95fea3f122528" -dependencies = [ - "bitvec", - "coins-bip32", - "hmac", - "once_cell", - "pbkdf2 0.12.2", - "rand", - "sha2", - "thiserror", -] - -[[package]] -name = "coins-core" -version = "0.8.7" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5286a0843c21f8367f7be734f89df9b822e0321d8bcce8d6e735aadff7d74979" -dependencies = [ - "base64 0.21.7", - "bech32", - "bs58", - "digest 0.10.7", - "generic-array 0.14.7", - "hex", - "ripemd", - "serde", - "serde_derive", - "sha2", - "sha3", - "thiserror", -] +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "colorchoice" @@ -1607,9 +1719,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.12.0" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6" +checksum = "5ba00838774b4ab0233e355d26710fbfc8327a05c017f6dc4873f876d1f79f78" dependencies = [ "cfg-if", "cpufeatures", @@ -1692,18 +1804,31 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ "cfg-if", ] +[[package]] +name = "crossbeam" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] + [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" dependencies = [ "crossbeam-utils", ] @@ -1727,11 +1852,20 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "crossbeam-queue" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crunchy" @@ -1746,7 +1880,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array 0.14.7", - "rand_core", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -1761,15 +1895,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "ctr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" -dependencies = [ - "cipher", -] - [[package]] name = "curve25519-dalek" version = "4.1.2" @@ -1794,7 +1919,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -1845,9 +1970,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.9" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" +checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" dependencies = [ "darling_core", "darling_macro", @@ -1855,27 +1980,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.9" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" +checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.11.1", - "syn 2.0.66", + "strsim 0.10.0", + "syn 2.0.63", ] [[package]] name = "darling_macro" -version = "0.20.9" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" +checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -2010,6 +2135,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "dotenv" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" + [[package]] name = "downcast-rs" version = "1.2.1" @@ -2024,7 +2155,7 @@ checksum = "d05213e96f184578b5f70105d4d0a644a168e99e12d7bea0b200c15d67b5c182" dependencies = [ "digest 0.10.7", "futures", - "rand", + "rand 0.8.5", "reqwest 0.11.27", "thiserror", "tokio", @@ -2058,9 +2189,9 @@ dependencies = [ [[package]] name = "either" -version = "1.12.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" [[package]] name = "elf" @@ -2081,7 +2212,7 @@ dependencies = [ "generic-array 0.14.7", "group 0.13.0", "pkcs8", - "rand_core", + "rand_core 0.6.4", "sec1", "subtle", "zeroize", @@ -2113,25 +2244,7 @@ dependencies = [ "hex", "k256", "log", - "rand", - "rlp", - "serde", - "sha3", - "zeroize", -] - -[[package]] -name = "enr" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4" -dependencies = [ - "base64 0.21.7", - "bytes", - "hex", - "k256", - "log", - "rand", + "rand 0.8.5", "rlp", "serde", "sha3", @@ -2146,7 +2259,7 @@ checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -2180,12 +2293,11 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "erased-serde" -version = "0.4.5" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24e2389d65ab4fab27dc2a5de7b191e1f6617d1f1c8855c0dc569c94a4cbb18d" +checksum = "2b73807008a3c7f171cc40312f37d95ef0396e048b5848d775f54b1a4dd4a0d3" dependencies = [ "serde", - "typeid", ] [[package]] @@ -2198,28 +2310,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "eth-keystore" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" -dependencies = [ - "aes", - "ctr", - "digest 0.10.7", - "hex", - "hmac", - "pbkdf2 0.11.0", - "rand", - "scrypt", - "serde", - "serde_json", - "sha2", - "sha3", - "thiserror", - "uuid 0.8.2", -] - [[package]] name = "ethabi" version = "18.0.0" @@ -2268,61 +2358,16 @@ dependencies = [ "uint", ] -[[package]] -name = "ethers" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "816841ea989f0c69e459af1cf23a6b0033b19a55424a1ea3a30099becdb8dec0" -dependencies = [ - "ethers-addressbook", - "ethers-contract 2.0.14", - "ethers-core 2.0.14", - "ethers-middleware", - "ethers-providers 2.0.14", - "ethers-signers", -] - -[[package]] -name = "ethers-addressbook" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5495afd16b4faa556c3bba1f21b98b4983e53c1755022377051a975c3b021759" -dependencies = [ - "ethers-core 2.0.14", - "once_cell", - "serde", - "serde_json", -] - [[package]] name = "ethers-contract" version = "2.0.10" source = "git+https://github.com/smtmfft/ethers-rs?branch=ethers-core-2.0.10#37493be6cd912dfe64a9036932dd6da8e13679ce" dependencies = [ "const-hex", - "ethers-contract-abigen 2.0.10", - "ethers-contract-derive 2.0.10", - "ethers-core 2.0.10", - "ethers-providers 2.0.10", - "futures-util", - "once_cell", - "pin-project", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "ethers-contract" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fceafa3578c836eeb874af87abacfb041f92b4da0a78a5edd042564b8ecdaaa" -dependencies = [ - "const-hex", - "ethers-contract-abigen 2.0.14", - "ethers-contract-derive 2.0.14", - "ethers-core 2.0.14", - "ethers-providers 2.0.14", + "ethers-contract-abigen", + "ethers-contract-derive", + "ethers-core", + "ethers-providers", "futures-util", "once_cell", "pin-project", @@ -2339,7 +2384,7 @@ dependencies = [ "Inflector", "const-hex", "dunce", - "ethers-core 2.0.10", + "ethers-core", "eyre", "prettyplease", "proc-macro2", @@ -2347,30 +2392,8 @@ dependencies = [ "regex", "serde", "serde_json", - "syn 2.0.66", - "toml 0.7.8", - "walkdir", -] - -[[package]] -name = "ethers-contract-abigen" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04ba01fbc2331a38c429eb95d4a570166781f14290ef9fdb144278a90b5a739b" -dependencies = [ - "Inflector", - "const-hex", - "dunce", - "ethers-core 2.0.14", - "eyre", - "prettyplease", - "proc-macro2", - "quote", - "regex", - "serde", - "serde_json", - "syn 2.0.66", - "toml 0.8.14", + "syn 2.0.63", + "toml", "walkdir", ] @@ -2381,28 +2404,12 @@ source = "git+https://github.com/smtmfft/ethers-rs?branch=ethers-core-2.0.10#374 dependencies = [ "Inflector", "const-hex", - "ethers-contract-abigen 2.0.10", - "ethers-core 2.0.10", - "proc-macro2", - "quote", - "serde_json", - "syn 2.0.66", -] - -[[package]] -name = "ethers-contract-derive" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87689dcabc0051cde10caaade298f9e9093d65f6125c14575db3fd8c669a168f" -dependencies = [ - "Inflector", - "const-hex", - "ethers-contract-abigen 2.0.14", - "ethers-core 2.0.14", + "ethers-contract-abigen", + "ethers-core", "proc-macro2", "quote", "serde_json", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -2422,74 +2429,18 @@ dependencies = [ "num_enum 0.7.2", "once_cell", "open-fastrlp", - "rand", + "rand 0.8.5", "rlp", "serde", "serde_json", "strum 0.25.0", - "syn 2.0.66", + "syn 2.0.63", "tempfile", "thiserror", "tiny-keccak", "unicode-xid", ] -[[package]] -name = "ethers-core" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82d80cc6ad30b14a48ab786523af33b37f28a8623fc06afd55324816ef18fb1f" -dependencies = [ - "arrayvec", - "bytes", - "cargo_metadata 0.18.1", - "chrono", - "const-hex", - "elliptic-curve", - "ethabi", - "generic-array 0.14.7", - "k256", - "num_enum 0.7.2", - "once_cell", - "open-fastrlp", - "rand", - "rlp", - "serde", - "serde_json", - "strum 0.26.2", - "syn 2.0.66", - "tempfile", - "thiserror", - "tiny-keccak", - "unicode-xid", -] - -[[package]] -name = "ethers-middleware" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48f9fdf09aec667c099909d91908d5eaf9be1bd0e2500ba4172c1d28bfaa43de" -dependencies = [ - "async-trait", - "auto_impl", - "ethers-contract 2.0.14", - "ethers-core 2.0.14", - "ethers-providers 2.0.14", - "ethers-signers", - "futures-channel", - "futures-locks", - "futures-util", - "instant", - "reqwest 0.11.27", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "tracing-futures", - "url", -] - [[package]] name = "ethers-providers" version = "2.0.10" @@ -2500,8 +2451,8 @@ dependencies = [ "base64 0.21.7", "bytes", "const-hex", - "enr 0.9.1", - "ethers-core 2.0.10", + "enr", + "ethers-core", "futures-channel", "futures-core", "futures-timer", @@ -2527,61 +2478,6 @@ dependencies = [ "ws_stream_wasm", ] -[[package]] -name = "ethers-providers" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6434c9a33891f1effc9c75472e12666db2fa5a0fec4b29af6221680a6fe83ab2" -dependencies = [ - "async-trait", - "auto_impl", - "base64 0.21.7", - "bytes", - "const-hex", - "enr 0.10.0", - "ethers-core 2.0.14", - "futures-core", - "futures-timer", - "futures-util", - "hashers", - "http 0.2.12", - "instant", - "jsonwebtoken", - "once_cell", - "pin-project", - "reqwest 0.11.27", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "tracing-futures", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "ws_stream_wasm", -] - -[[package]] -name = "ethers-signers" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "228875491c782ad851773b652dd8ecac62cda8571d3bc32a5853644dd26766c2" -dependencies = [ - "async-trait", - "coins-bip32", - "coins-bip39", - "const-hex", - "elliptic-curve", - "eth-keystore", - "ethers-core 2.0.14", - "rand", - "sha2", - "thiserror", - "tracing", -] - [[package]] name = "eyre" version = "0.6.12" @@ -2598,6 +2494,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + [[package]] name = "fastrand" version = "2.1.0" @@ -2622,7 +2524,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ "bitvec", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -2635,7 +2537,7 @@ dependencies = [ "bitvec", "byteorder", "ff_derive", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -2677,7 +2579,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", - "rand", + "rand 0.8.5", "rustc-hex", "static_assertions", ] @@ -2731,7 +2633,7 @@ checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -2809,16 +2711,6 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" -[[package]] -name = "futures-locks" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45ec6fe3675af967e67c5536c0b9d44e34e6c52f86bedc4ea49c5317b8e94d06" -dependencies = [ - "futures-channel", - "futures-task", -] - [[package]] name = "futures-macro" version = "0.3.30" @@ -2827,7 +2719,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -2933,12 +2825,6 @@ dependencies = [ "stable_deref_trait", ] -[[package]] -name = "gimli" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" - [[package]] name = "git2" version = "0.18.3" @@ -2987,7 +2873,7 @@ checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff 0.12.1", "memuse", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -2998,7 +2884,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff 0.13.0", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -3023,15 +2909,15 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.5" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +checksum = "816ec7294445779408f36fe57bc5b7fc1cf59664059096c65f905c1c61f58069" dependencies = [ - "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", + "futures-util", "http 1.1.0", "indexmap 2.2.6", "slab", @@ -3059,7 +2945,7 @@ dependencies = [ "ff 0.12.1", "group 0.12.1", "pasta_curves 0.4.1", - "rand_core", + "rand_core 0.6.4", "rayon", ] @@ -3117,6 +3003,15 @@ dependencies = [ "fxhash", ] +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + [[package]] name = "hdrhistogram" version = "7.5.4" @@ -3284,9 +3179,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.29" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -3315,7 +3210,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.5", + "h2 0.4.4", "http 1.1.0", "http-body 1.0.0", "httparse", @@ -3335,7 +3230,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.29", + "hyper 0.14.28", "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", @@ -3365,7 +3260,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.29", + "hyper 0.14.28", "native-tls", "tokio", "tokio-native-tls", @@ -3389,9 +3284,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.5" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" dependencies = [ "bytes", "futures-channel", @@ -3525,20 +3420,11 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "inout" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "instant" -version = "0.1.13" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] @@ -3637,7 +3523,7 @@ dependencies = [ "bls12_381", "ff 0.12.1", "group 0.12.1", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -3694,7 +3580,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -3706,17 +3592,11 @@ dependencies = [ "spin 0.5.2", ] -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "libc" -version = "0.2.155" +version = "0.2.154" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" [[package]] name = "libflate" @@ -3754,16 +3634,6 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "libloading" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" -dependencies = [ - "cfg-if", - "windows-targets 0.52.5", -] - [[package]] name = "libm" version = "0.2.8" @@ -3780,11 +3650,22 @@ dependencies = [ "libc", ] +[[package]] +name = "libsqlite3-sys" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "libz-sys" -version = "1.1.18" +version = "1.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" +checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" dependencies = [ "cc", "libc", @@ -3794,9 +3675,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lock_api" @@ -3915,17 +3796,11 @@ dependencies = [ "unicase", ] -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - [[package]] name = "miniz_oxide" -version = "0.7.3" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", ] @@ -3955,10 +3830,11 @@ checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" [[package]] name = "native-tls" -version = "0.2.12" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ + "lazy_static", "libc", "log", "openssl", @@ -3990,16 +3866,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -4068,7 +3934,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -4161,7 +4027,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -4209,15 +4075,6 @@ dependencies = [ "ruzstd", ] -[[package]] -name = "object" -version = "0.35.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8ec7ab813848ba4522158d5517a6093db1ded27575b070f4177b8d12b41db5e" -dependencies = [ - "memchr", -] - [[package]] name = "once_cell" version = "1.19.0" @@ -4272,7 +4129,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -4308,7 +4165,7 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "p3-air" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "p3-field", "p3-matrix", @@ -4317,21 +4174,21 @@ dependencies = [ [[package]] name = "p3-baby-bear" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "num-bigint 0.4.5", "p3-field", "p3-mds", "p3-poseidon2", "p3-symmetric", - "rand", + "rand 0.8.5", "serde", ] [[package]] name = "p3-blake3" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "blake3", "p3-symmetric", @@ -4340,21 +4197,21 @@ dependencies = [ [[package]] name = "p3-bn254-fr" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "ff 0.13.0", "num-bigint 0.4.5", "p3-field", "p3-poseidon2", "p3-symmetric", - "rand", + "rand 0.8.5", "serde", ] [[package]] name = "p3-challenger" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "p3-field", "p3-maybe-rayon", @@ -4366,7 +4223,7 @@ dependencies = [ [[package]] name = "p3-commit" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "itertools 0.12.1", "p3-challenger", @@ -4379,7 +4236,7 @@ dependencies = [ [[package]] name = "p3-dft" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "p3-field", "p3-matrix", @@ -4391,20 +4248,20 @@ dependencies = [ [[package]] name = "p3-field" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "itertools 0.12.1", "num-bigint 0.4.5", "num-traits", "p3-util", - "rand", + "rand 0.8.5", "serde", ] [[package]] name = "p3-fri" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "itertools 0.12.1", "p3-challenger", @@ -4422,7 +4279,7 @@ dependencies = [ [[package]] name = "p3-interpolation" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "p3-field", "p3-matrix", @@ -4432,7 +4289,7 @@ dependencies = [ [[package]] name = "p3-keccak" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "p3-symmetric", "tiny-keccak", @@ -4441,7 +4298,7 @@ dependencies = [ [[package]] name = "p3-keccak-air" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "p3-air", "p3-field", @@ -4454,13 +4311,13 @@ dependencies = [ [[package]] name = "p3-matrix" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "itertools 0.12.1", "p3-field", "p3-maybe-rayon", "p3-util", - "rand", + "rand 0.8.5", "serde", "tracing", ] @@ -4468,7 +4325,7 @@ dependencies = [ [[package]] name = "p3-maybe-rayon" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "rayon", ] @@ -4476,7 +4333,7 @@ dependencies = [ [[package]] name = "p3-mds" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "itertools 0.12.1", "p3-dft", @@ -4484,13 +4341,13 @@ dependencies = [ "p3-matrix", "p3-symmetric", "p3-util", - "rand", + "rand 0.8.5", ] [[package]] name = "p3-merkle-tree" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "itertools 0.12.1", "p3-commit", @@ -4506,19 +4363,19 @@ dependencies = [ [[package]] name = "p3-poseidon2" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "gcd", "p3-field", "p3-mds", "p3-symmetric", - "rand", + "rand 0.8.5", ] [[package]] name = "p3-symmetric" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "itertools 0.12.1", "p3-field", @@ -4528,7 +4385,7 @@ dependencies = [ [[package]] name = "p3-uni-stark" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "itertools 0.12.1", "p3-air", @@ -4546,7 +4403,7 @@ dependencies = [ [[package]] name = "p3-util" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3.git?rev=3b5265f9d5af36534a46caebf0617595cfb42c5a#3b5265f9d5af36534a46caebf0617595cfb42c5a" +source = "git+https://github.com/Plonky3/Plonky3.git?branch=sp1#04d4c6e15a0296798331db82e696d29c455bafe1" dependencies = [ "serde", ] @@ -4588,9 +4445,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" dependencies = [ "lock_api", "parking_lot_core", @@ -4619,7 +4476,7 @@ dependencies = [ "ff 0.12.1", "group 0.12.1", "lazy_static", - "rand", + "rand 0.8.5", "static_assertions", "subtle", ] @@ -4634,7 +4491,7 @@ dependencies = [ "ff 0.13.0", "group 0.13.0", "lazy_static", - "rand", + "rand 0.8.5", "static_assertions", "subtle", ] @@ -4651,25 +4508,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" -[[package]] -name = "pbkdf2" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" -dependencies = [ - "digest 0.10.7", -] - -[[package]] -name = "pbkdf2" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" -dependencies = [ - "digest 0.10.7", - "hmac", -] - [[package]] name = "pem" version = "1.1.1" @@ -4743,7 +4581,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -4832,7 +4670,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -4894,9 +4732,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.85" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" +checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" dependencies = [ "unicode-ident", ] @@ -4952,8 +4790,8 @@ dependencies = [ "bitflags 2.5.0", "lazy_static", "num-traits", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_xorshift", "regex-syntax 0.8.3", "rusty-fork", @@ -4963,9 +4801,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.6" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922" dependencies = [ "bytes", "prost-derive", @@ -4973,9 +4811,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.12.6" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" +checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1" dependencies = [ "bytes", "heck 0.5.0", @@ -4988,28 +4826,28 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.66", + "syn 2.0.63", "tempfile", ] [[package]] name = "prost-derive" -version = "0.12.6" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +checksum = "9554e3ab233f0a932403704f1a1d08c30d5ccd931adfdfa1e8b5a19b52c1d55a" dependencies = [ "anyhow", "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] name = "prost-types" -version = "0.12.6" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" +checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe" dependencies = [ "prost", ] @@ -5054,21 +4892,21 @@ checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" name = "raiko-core" version = "0.1.0" dependencies = [ - "alloy-consensus", - "alloy-network", + "alloy-consensus 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-network 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-primitives", - "alloy-provider", + "alloy-provider 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-rlp", "alloy-rlp-derive", - "alloy-rpc-client", - "alloy-rpc-types", + "alloy-rpc-client 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-rpc-types 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-sol-types", - "alloy-transport-http", + "alloy-transport-http 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "anyhow", "assert_cmd", "c-kzg-taiko", - "clap 4.5.6", - "ethers-core 2.0.10", + "clap 4.5.4", + "ethers-core", "raiko-lib", "reqwest 0.11.27", "reqwest 0.12.4", @@ -5090,16 +4928,16 @@ dependencies = [ name = "raiko-host" version = "0.1.0" dependencies = [ - "alloy-consensus", - "alloy-network", + "alloy-consensus 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-network 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-primitives", - "alloy-provider", + "alloy-provider 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-rlp", "alloy-rlp-derive", - "alloy-rpc-client", - "alloy-rpc-types", + "alloy-rpc-client 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-rpc-types 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-sol-types", - "alloy-transport-http", + "alloy-transport-http 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "anyhow", "assert_cmd", "axum", @@ -5108,11 +4946,11 @@ dependencies = [ "c-kzg-taiko", "cap", "cfg-if", - "clap 4.5.6", + "clap 4.5.4", "env_logger", - "ethers-core 2.0.10", + "ethers-core", "flate2", - "hyper 0.14.29", + "hyper 0.14.28", "lazy_static", "lru_time_cache", "once_cell", @@ -5149,13 +4987,13 @@ dependencies = [ name = "raiko-lib" version = "0.1.0" dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-network", + "alloy-consensus 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-eips 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-network 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-primitives", "alloy-rlp", "alloy-rlp-derive", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-sol-types", "anyhow", "bincode", @@ -5204,28 +5042,28 @@ dependencies = [ name = "raiko-setup" version = "0.1.0" dependencies = [ - "alloy-consensus", - "alloy-network", + "alloy-consensus 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-network 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-primitives", - "alloy-provider", + "alloy-provider 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-rlp", "alloy-rlp-derive", - "alloy-rpc-client", - "alloy-rpc-types", + "alloy-rpc-client 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-rpc-types 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-sol-types", - "alloy-transport-http", + "alloy-transport-http 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "anyhow", "assert_cmd", "bincode", "bytemuck", "cap", "cfg-if", - "clap 4.5.6", + "clap 4.5.4", "dirs", "env_logger", - "ethers-core 2.0.10", + "ethers-core", "flate2", - "hyper 0.14.29", + "hyper 0.14.28", "lazy_static", "lru_time_cache", "once_cell", @@ -5259,8 +5097,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.0-alpha.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d31e63ea85be51c423e52ba8f2e68a3efd53eed30203ee029dd09947333693e" +dependencies = [ + "rand_chacha 0.9.0-alpha.1", + "rand_core 0.9.0-alpha.1", + "zerocopy 0.8.0-alpha.6", ] [[package]] @@ -5270,7 +5119,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0-alpha.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78674ef918c19451dbd250f8201f8619b494f64c9aa6f3adb28fd8a0f1f6da46" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.0-alpha.1", ] [[package]] @@ -5282,13 +5141,23 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rand_core" +version = "0.9.0-alpha.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc89dffba8377c5ec847d12bb41492bda235dba31a25e8b695cd0fe6589eb8c9" +dependencies = [ + "getrandom", + "zerocopy 0.8.0-alpha.6", +] + [[package]] name = "rand_xorshift" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -5410,7 +5279,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.29", + "hyper 0.14.28", "hyper-rustls 0.24.2", "hyper-tls 0.5.0", "ipnet", @@ -5426,7 +5295,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 0.1.2", + "sync_wrapper", "system-configuration", "tokio", "tokio-native-tls", @@ -5449,9 +5318,10 @@ dependencies = [ "base64 0.22.1", "bytes", "encoding_rs", + "futures-channel", "futures-core", "futures-util", - "h2 0.4.5", + "h2 0.4.4", "http 1.1.0", "http-body 1.0.0", "http-body-util", @@ -5473,7 +5343,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 0.1.2", + "sync_wrapper", "system-configuration", "tokio", "tokio-native-tls", @@ -5485,7 +5355,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 0.26.2", + "webpki-roots 0.26.1", "winreg 0.52.0", ] @@ -5659,7 +5529,7 @@ dependencies = [ "downloader", "hex", "metal", - "rand", + "rand 0.8.5", "rayon", "risc0-circuit-recursion-sys", "risc0-core", @@ -5689,7 +5559,7 @@ dependencies = [ "anyhow", "cust", "metal", - "rand", + "rand 0.8.5", "rayon", "risc0-circuit-rv32im-sys", "risc0-core", @@ -5716,7 +5586,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02e0cbd09d03c23b572b66cd96a56143adb22bf895aca89c1a153ccebedaa0b4" dependencies = [ "bytemuck", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -5730,9 +5600,9 @@ dependencies = [ "bonsai-sdk", "bytemuck", "cfg-if", - "ethers-contract 2.0.10", - "ethers-core 2.0.10", - "ethers-providers 2.0.10", + "ethers-contract", + "ethers-core", + "ethers-providers", "hex", "log", "once_cell", @@ -5807,8 +5677,8 @@ dependencies = [ "metal", "ndarray", "paste", - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", "rayon", "risc0-core", "risc0-sys", @@ -5824,7 +5694,7 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a1275834c86176efc122a172c2b5f271a8a5d792de7efbc47dfbecaaaff9432" dependencies = [ - "addr2line 0.21.0", + "addr2line", "anyhow", "bincode", "bonsai-sdk", @@ -5944,15 +5814,15 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.0", - "syn 2.0.66", + "syn 2.0.63", "unicode-ident", ] [[package]] name = "ruint" -version = "1.12.3" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" +checksum = "8f308135fef9fc398342da5472ce7c484529df23743fb7c734e0f3d472971e62" dependencies = [ "alloy-rlp", "ark-ff 0.3.0", @@ -5964,7 +5834,7 @@ dependencies = [ "parity-scale-codec", "primitive-types", "proptest", - "rand", + "rand 0.8.5", "rlp", "ruint-macro", "serde", @@ -5974,9 +5844,24 @@ dependencies = [ [[package]] name = "ruint-macro" -version = "1.2.1" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f86854cf50259291520509879a5c294c3c9a4c334e9ff65071c51e42ef1e2343" + +[[package]] +name = "rusqlite" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" +checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" +dependencies = [ + "bitflags 2.5.0", + "chrono", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", +] [[package]] name = "rust-embed" @@ -5998,7 +5883,7 @@ dependencies = [ "proc-macro2", "quote", "rust-embed-utils", - "syn 2.0.66", + "syn 2.0.63", "walkdir", ] @@ -6018,12 +5903,6 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - [[package]] name = "rustc-hex" version = "2.1.0" @@ -6082,7 +5961,7 @@ dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.3", "subtle", "zeroize", ] @@ -6124,9 +6003,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "f3bce581c0dd41bce533ce695a1437fa16a7ab5ac3ccfa99fe1a620a7885eabf" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -6135,9 +6014,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "092474d1a01ea8278f69e6a358998405fae5b8b963ddaeb2b0b04a128bf1dfb0" [[package]] name = "rusty-fork" @@ -6168,15 +6047,6 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" -[[package]] -name = "salsa20" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" -dependencies = [ - "cipher", -] - [[package]] name = "same-file" version = "1.0.6" @@ -6234,18 +6104,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "scrypt" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f9e24d2b632954ded8ab2ef9fea0a0c769ea56ea98bddbafbad22caeeadf45d" -dependencies = [ - "hmac", - "pbkdf2 0.11.0", - "salsa20", - "sha2", -] - [[package]] name = "sct" version = "0.7.1" @@ -6282,7 +6140,7 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" dependencies = [ - "rand", + "rand 0.8.5", "secp256k1-sys 0.8.1", ] @@ -6292,7 +6150,7 @@ version = "0.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" dependencies = [ - "rand", + "rand 0.8.5", "secp256k1-sys 0.9.2", ] @@ -6379,22 +6237,22 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.203" +version = "1.0.201" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.201" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -6421,9 +6279,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.6" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ "serde", ] @@ -6467,7 +6325,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -6492,7 +6350,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -6503,12 +6361,12 @@ dependencies = [ "base64 0.21.7", "base64-serde", "bincode", - "clap 4.5.6", + "clap 4.5.4", "dirs", "hex", "raiko-lib", - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", "secp256k1 0.27.0", "serde", "serde_json", @@ -6521,13 +6379,13 @@ dependencies = [ name = "sgx-prover" version = "0.1.0" dependencies = [ - "alloy-contract", - "alloy-provider", - "alloy-rpc-client", - "alloy-signer", - "alloy-signer-wallet", + "alloy-contract 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-provider 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-rpc-client 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-signer 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", + "alloy-signer-wallet 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "alloy-sol-types", - "alloy-transport-http", + "alloy-transport-http 0.1.0 (git+https://github.com/brechtpd/alloy?branch=175_4e22b9e)", "anyhow", "bincode", "once_cell", @@ -6591,12 +6449,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "shlex" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" - [[package]] name = "signal-hook-registry" version = "1.4.2" @@ -6613,7 +6465,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -6679,12 +6531,13 @@ dependencies = [ [[package]] name = "sp1-core" version = "0.1.0" -source = "git+https://github.com/succinctlabs/sp1.git?branch=main#8c0f5010606f9c2306effe60412d421c23e88afa" +source = "git+https://github.com/succinctlabs/sp1.git?branch=main#2f57e1e77f7c88396b571bd961d3cbe4b1291c7a" dependencies = [ "anyhow", "arrayref", "bincode", "blake3", + "blake3-zkvm", "cfg-if", "curve25519-dalek", "elf", @@ -6696,7 +6549,6 @@ dependencies = [ "log", "nohash-hasher", "num", - "num-bigint 0.4.5", "num_cpus", "p3-air", "p3-baby-bear", @@ -6719,14 +6571,14 @@ dependencies = [ "rrs-lib 0.1.0 (git+https://github.com/GregAC/rrs.git)", "serde", "serde_with", + "serial_test", "size", "snowbridge-amcl", "sp1-derive", "sp1-primitives", "strum 0.26.2", - "strum_macros 0.26.4", + "strum_macros 0.26.2", "tempfile", - "thiserror", "tracing", "tracing-forest", "tracing-subscriber 0.3.18", @@ -6737,7 +6589,7 @@ dependencies = [ [[package]] name = "sp1-derive" version = "0.1.0" -source = "git+https://github.com/succinctlabs/sp1.git?branch=main#8c0f5010606f9c2306effe60412d421c23e88afa" +source = "git+https://github.com/succinctlabs/sp1.git?branch=main#2f57e1e77f7c88396b571bd961d3cbe4b1291c7a" dependencies = [ "proc-macro2", "quote", @@ -6766,7 +6618,7 @@ dependencies = [ [[package]] name = "sp1-helper" version = "0.1.0" -source = "git+https://github.com/succinctlabs/sp1.git?branch=main#8c0f5010606f9c2306effe60412d421c23e88afa" +source = "git+https://github.com/succinctlabs/sp1.git?branch=main#2f57e1e77f7c88396b571bd961d3cbe4b1291c7a" dependencies = [ "cargo_metadata 0.18.1", "chrono", @@ -6775,7 +6627,7 @@ dependencies = [ [[package]] name = "sp1-primitives" version = "0.1.0" -source = "git+https://github.com/succinctlabs/sp1.git?branch=main#8c0f5010606f9c2306effe60412d421c23e88afa" +source = "git+https://github.com/succinctlabs/sp1.git?branch=main#2f57e1e77f7c88396b571bd961d3cbe4b1291c7a" dependencies = [ "itertools 0.12.1", "lazy_static", @@ -6788,18 +6640,17 @@ dependencies = [ [[package]] name = "sp1-prover" version = "0.1.0" -source = "git+https://github.com/succinctlabs/sp1.git?branch=main#8c0f5010606f9c2306effe60412d421c23e88afa" +source = "git+https://github.com/succinctlabs/sp1.git?branch=main#2f57e1e77f7c88396b571bd961d3cbe4b1291c7a" dependencies = [ "anyhow", "backtrace", "bincode", - "clap 4.5.6", + "clap 4.5.4", "dirs", "futures", "hex", "indicatif", "itertools 0.12.1", - "num-bigint 0.4.5", "p3-baby-bear", "p3-bn254-fr", "p3-challenger", @@ -6821,16 +6672,16 @@ dependencies = [ "sp1-recursion-program", "subtle-encoding", "tempfile", - "thiserror", "tokio", "tracing", + "tracing-appender", "tracing-subscriber 0.3.18", ] [[package]] name = "sp1-recursion-circuit" version = "0.1.0" -source = "git+https://github.com/succinctlabs/sp1.git?branch=main#8c0f5010606f9c2306effe60412d421c23e88afa" +source = "git+https://github.com/succinctlabs/sp1.git?branch=main#2f57e1e77f7c88396b571bd961d3cbe4b1291c7a" dependencies = [ "bincode", "itertools 0.12.1", @@ -6853,7 +6704,7 @@ dependencies = [ [[package]] name = "sp1-recursion-compiler" version = "0.1.0" -source = "git+https://github.com/succinctlabs/sp1.git?branch=main#8c0f5010606f9c2306effe60412d421c23e88afa" +source = "git+https://github.com/succinctlabs/sp1.git?branch=main#2f57e1e77f7c88396b571bd961d3cbe4b1291c7a" dependencies = [ "backtrace", "itertools 0.12.1", @@ -6868,6 +6719,8 @@ dependencies = [ "p3-symmetric", "p3-util", "serde", + "serde_json", + "serial_test", "sp1-core", "sp1-recursion-core", "sp1-recursion-derive", @@ -6877,7 +6730,7 @@ dependencies = [ [[package]] name = "sp1-recursion-core" version = "0.1.0" -source = "git+https://github.com/succinctlabs/sp1.git?branch=main#8c0f5010606f9c2306effe60412d421c23e88afa" +source = "git+https://github.com/succinctlabs/sp1.git?branch=main#2f57e1e77f7c88396b571bd961d3cbe4b1291c7a" dependencies = [ "arrayref", "backtrace", @@ -6910,7 +6763,7 @@ dependencies = [ [[package]] name = "sp1-recursion-derive" version = "0.1.0" -source = "git+https://github.com/succinctlabs/sp1.git?branch=main#8c0f5010606f9c2306effe60412d421c23e88afa" +source = "git+https://github.com/succinctlabs/sp1.git?branch=main#2f57e1e77f7c88396b571bd961d3cbe4b1291c7a" dependencies = [ "proc-macro2", "quote", @@ -6920,27 +6773,26 @@ dependencies = [ [[package]] name = "sp1-recursion-gnark-ffi" version = "0.1.0" -source = "git+https://github.com/succinctlabs/sp1.git?branch=main#8c0f5010606f9c2306effe60412d421c23e88afa" +source = "git+https://github.com/succinctlabs/sp1.git?branch=main#2f57e1e77f7c88396b571bd961d3cbe4b1291c7a" dependencies = [ - "bindgen", - "cc", - "cfg-if", + "crossbeam", "log", - "num-bigint 0.4.5", - "p3-baby-bear", "p3-field", - "rand", + "rand 0.8.5", + "reqwest 0.12.4", "serde", "serde_json", "sp1-recursion-compiler", + "subtle-encoding", "tempfile", ] [[package]] name = "sp1-recursion-program" version = "0.1.0" -source = "git+https://github.com/succinctlabs/sp1.git?branch=main#8c0f5010606f9c2306effe60412d421c23e88afa" +source = "git+https://github.com/succinctlabs/sp1.git?branch=main#2f57e1e77f7c88396b571bd961d3cbe4b1291c7a" dependencies = [ + "array-macro", "itertools 0.12.1", "p3-air", "p3-baby-bear", @@ -6955,7 +6807,7 @@ dependencies = [ "p3-poseidon2", "p3-symmetric", "p3-util", - "rand", + "rand 0.8.5", "serde", "sp1-core", "sp1-recursion-compiler", @@ -6966,25 +6818,24 @@ dependencies = [ [[package]] name = "sp1-sdk" version = "0.1.0" -source = "git+https://github.com/succinctlabs/sp1.git?branch=main#8c0f5010606f9c2306effe60412d421c23e88afa" +source = "git+https://github.com/succinctlabs/sp1.git?branch=main#2f57e1e77f7c88396b571bd961d3cbe4b1291c7a" dependencies = [ - "alloy-sol-types", + "alloy", "anyhow", "async-trait", "axum", "bincode", - "cfg-if", "dirs", - "ethers", + "dotenv", "futures", "hex", "indicatif", "log", - "num-bigint 0.4.5", "p3-commit", "p3-field", "p3-matrix", "prost", + "prost-types", "reqwest 0.12.4", "reqwest-middleware", "serde", @@ -6992,8 +6843,6 @@ dependencies = [ "sha2", "sp1-core", "sp1-prover", - "strum 0.26.2", - "strum_macros 0.26.4", "tempfile", "tokio", "tracing", @@ -7041,6 +6890,12 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + [[package]] name = "strsim" version = "0.11.1" @@ -7085,9 +6940,6 @@ name = "strum" version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" -dependencies = [ - "strum_macros 0.26.4", -] [[package]] name = "strum_macros" @@ -7099,20 +6951,20 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] name = "strum_macros" -version = "0.26.4" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" dependencies = [ - "heck 0.5.0", + "heck 0.4.1", "proc-macro2", "quote", "rustversion", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -7124,7 +6976,7 @@ dependencies = [ "byteorder", "crunchy", "lazy_static", - "rand", + "rand 0.8.5", "rustc-hex", ] @@ -7156,9 +7008,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.66" +version = "2.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" +checksum = "bf5be731623ca1a1fb7d8be6f261a3be6d3e2337b8a1f97be944d020c8fcb704" dependencies = [ "proc-macro2", "quote", @@ -7167,14 +7019,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.7.5" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6fe08d08d84f2c0a77f1e7c46518789d745c2e87a2721791ed7c3c9bc78df28" +checksum = "5aa0cefd02f532035d83cfec82647c6eb53140b0485220760e669f4bad489e36" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -7183,12 +7035,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" -[[package]] -name = "sync_wrapper" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" - [[package]] name = "system-configuration" version = "0.5.1" @@ -7216,6 +7062,20 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +[[package]] +name = "task_manager" +version = "0.1.0" +dependencies = [ + "alloy-primitives", + "chrono", + "num_enum 0.7.2", + "raiko-lib", + "rand 0.9.0-alpha.1", + "rand_chacha 0.9.0-alpha.1", + "rusqlite", + "tempfile", +] + [[package]] name = "tempfile" version = "3.10.1" @@ -7245,22 +7105,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.61" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "579e9083ca58dd9dcf91a9923bb9054071b9ebbd800b342194c9feb0ee89fc18" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "e2470041c06ec3ac1ab38d0356a6119054dedaea53e12fbefc0de730a1c08524" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -7361,9 +7221,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.38.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -7380,13 +7240,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.3.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -7472,23 +7332,11 @@ dependencies = [ "toml_edit 0.19.15", ] -[[package]] -name = "toml" -version = "0.8.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit 0.22.14", -] - [[package]] name = "toml_datetime" -version = "0.6.6" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] @@ -7517,19 +7365,6 @@ dependencies = [ "winnow 0.5.40", ] -[[package]] -name = "toml_edit" -version = "0.22.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f21c7aaf97f1bd9ca9d4f9e73b0a6c74bd5afef56f2bc931943a6e1c37e04e38" -dependencies = [ - "indexmap 2.2.6", - "serde", - "serde_spanned", - "toml_datetime", - "winnow 0.6.13", -] - [[package]] name = "tower" version = "0.4.13" @@ -7542,7 +7377,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand", + "rand 0.8.5", "slab", "tokio", "tokio-util", @@ -7579,7 +7414,7 @@ dependencies = [ "tower-layer", "tower-service", "tracing", - "uuid 1.8.0", + "uuid", ] [[package]] @@ -7626,7 +7461,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -7731,7 +7566,7 @@ dependencies = [ "http 0.2.12", "httparse", "log", - "rand", + "rand 0.8.5", "rustls 0.21.12", "sha1", "thiserror", @@ -7771,12 +7606,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "typeid" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "059d83cc991e7a42fc37bd50941885db0888e34209f8cfd9aab07ddec03bc9cf" - [[package]] name = "typenum" version = "1.17.0" @@ -7804,7 +7633,7 @@ checksum = "ac73887f47b9312552aa90ef477927ff014d63d1920ca8037c6c1951eab64bb1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -7869,9 +7698,9 @@ checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" [[package]] name = "unicode-width" -version = "0.1.13" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" +checksum = "68f5e5f3158ecfd4b8ff6fe086db7c8467a2dfdac97fe420f2b7c4aa97af66d6" [[package]] name = "unicode-xid" @@ -7910,9 +7739,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "utf8parse" -version = "0.2.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "utoipa" @@ -7936,7 +7765,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -7967,16 +7796,6 @@ dependencies = [ "zip", ] -[[package]] -name = "uuid" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" -dependencies = [ - "getrandom", - "serde", -] - [[package]] name = "uuid" version = "1.8.0" @@ -8090,7 +7909,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", "wasm-bindgen-shared", ] @@ -8124,7 +7943,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8176,9 +7995,9 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "webpki-roots" -version = "0.26.2" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c452ad30530b54a4d8e71952716a212b08efd0f3562baa66c29a618b07da7c3" +checksum = "b3de34ae270483955a94f4b21bdaaeb83d508bb84a01435f393818edb0012009" dependencies = [ "rustls-pki-types", ] @@ -8385,9 +8204,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.13" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b5e5f6c299a3c7890b876a2a587f3115162487e704907d9b6cd29473052ba1" +checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d" dependencies = [ "memchr", ] @@ -8446,7 +8265,16 @@ version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ - "zerocopy-derive", + "zerocopy-derive 0.7.34", +] + +[[package]] +name = "zerocopy" +version = "0.8.0-alpha.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db678a6ee512bd06adf35c35be471cae2f9c82a5aed2b5d15e03628c98bddd57" +dependencies = [ + "zerocopy-derive 0.8.0-alpha.6", ] [[package]] @@ -8457,14 +8285,25 @@ checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.0-alpha.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "201585ea96d37ee69f2ac769925ca57160cef31acb137c16f38b02b76f4c1e62" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.63", ] [[package]] name = "zeroize" -version = "1.8.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] @@ -8477,7 +8316,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.63", ] [[package]] @@ -8511,7 +8350,7 @@ dependencies = [ "jubjub", "lazy_static", "pasta_curves 0.5.1", - "rand", + "rand 0.8.5", "serde", "sha2", "sha3", diff --git a/Cargo.toml b/Cargo.toml index d09816f10..63b2e027b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ members = [ "provers/sgx/setup", "pipeline", "core", - "task_manager" + "task_manager", ] # Always optimize; building and running the guest takes much longer without optimization. @@ -178,4 +178,3 @@ rand = "0.8.5" rand_core = "0.6.4" dirs = "5.0.1" pathdiff = "0.2.1" - diff --git a/core/src/lib.rs b/core/src/lib.rs index 403cfaf4c..716bf23d5 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -169,7 +169,7 @@ pub fn merge(a: &mut Value, b: &Value) { merge(a.entry(k).or_insert(Value::Null), v); } } - (a, b) if !b.is_null() => *a = b.to_owned(), + (a, b) if !b.is_null() => b.clone_into(a), // If b is null, just keep a (which means do nothing). _ => {} } diff --git a/core/src/prover.rs b/core/src/prover.rs index 84a1e26b4..188e2dd46 100644 --- a/core/src/prover.rs +++ b/core/src/prover.rs @@ -29,12 +29,13 @@ impl Prover for NativeProver { output: &GuestOutput, config: &ProverConfig, ) -> ProverResult { - let param = config - .get("native") - .map(|v| NativeParam::deserialize(v)) - .ok_or(ProverError::Param(serde_json::Error::custom( - "native param not provided", - )))??; + let param = + config + .get("native") + .map(NativeParam::deserialize) + .ok_or(ProverError::Param(serde_json::Error::custom( + "native param not provided", + )))??; if let Some(path) = param.write_guest_input_path { let path = Path::new(&path); diff --git a/harness/core/src/assert.rs b/harness/core/src/assert.rs index a583b8556..e9100e2c6 100644 --- a/harness/core/src/assert.rs +++ b/harness/core/src/assert.rs @@ -60,6 +60,10 @@ impl AssertionLog { self.assertions.len() } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn display_failures(&self, start: usize, end: usize) { for i in start..end { if let Some(assertion) = self.assertions.get(i) { diff --git a/host/src/server/api/v1/proof.rs b/host/src/server/api/v1/proof.rs index bdc55faea..a7483e1c2 100644 --- a/host/src/server/api/v1/proof.rs +++ b/host/src/server/api/v1/proof.rs @@ -80,7 +80,7 @@ async fn validate_cache_input( // double check if cache is valid if cached_block_hash == real_block_hash { - return Ok(cache_input); + Ok(cache_input) } else { Err(HostError::InvalidRequestConfig( "Cached input is not valid".to_owned(), @@ -251,7 +251,7 @@ mod test { block_number: u64, ) -> (GuestInput, RpcBlockDataProvider) { let l1_chain_spec = SupportedChainSpecs::default() - .get_chain_spec(&l1_network) + .get_chain_spec(l1_network) .unwrap(); let taiko_chain_spec = SupportedChainSpecs::default() .get_chain_spec(network) diff --git a/lib/src/consts.rs b/lib/src/consts.rs index 987821b94..7db4f6342 100644 --- a/lib/src/consts.rs +++ b/lib/src/consts.rs @@ -250,14 +250,14 @@ pub enum Network { TaikoMainnet, } -impl ToString for Network { - fn to_string(&self) -> String { - match self { - Network::Ethereum => "ethereum".to_string(), - Network::Holesky => "holesky".to_string(), - Network::TaikoA7 => "taiko_a7".to_string(), - Network::TaikoMainnet => "taiko_mainnet".to_string(), - } +impl std::fmt::Display for Network { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.write_str(match self { + Network::Ethereum => "ethereum", + Network::Holesky => "holesky", + Network::TaikoA7 => "taiko_a7", + Network::TaikoMainnet => "taiko_mainnet", + }) } } diff --git a/task_manager/Cargo.toml b/task_manager/Cargo.toml index e3052a5e9..2d4bf0a68 100644 --- a/task_manager/Cargo.toml +++ b/task_manager/Cargo.toml @@ -2,16 +2,16 @@ name = "task_manager" version = "0.1.0" authors = ["Mamy Ratsimbazafy "] -edition = "2021" # { workspace = true } +edition = "2021" # { workspace = true } [dependencies] -raiko-primitives = { workspace = true } -rusqlite = { workspace = true, features = ["chrono"] } +raiko-lib = { workspace = true } +rusqlite = { workspace = true, features = ["chrono"] } num_enum = { workspace = true } chrono = { workspace = true } [dev-dependencies] -rand = "0.9.0-alpha.1" # This is an alpha version, that has rng.gen_iter::() +rand = "0.9.0-alpha.1" # This is an alpha version, that has rng.gen_iter::() rand_chacha = "0.9.0-alpha.1" tempfile = "3.10.1" alloy-primitives = { workspace = true, features = ["getrandom"] } diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index f3675802d..18f90855b 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -158,9 +158,9 @@ use std::io::{Error as IOError, ErrorKind as IOErrorKind}; use std::fs::File; use std::path::Path; -use raiko_primitives::{BlockNumber, ChainId, B256}; +use raiko_lib::primitives::{BlockNumber, ChainId, B256}; -use rusqlite::{named_params, MappedRows, Statement}; +use rusqlite::{named_params, Statement}; use rusqlite::{Connection, OpenFlags}; use chrono::{DateTime, Utc}; @@ -198,6 +198,7 @@ pub struct TaskManager<'db> { update_task_progress: Statement<'db>, get_task_proof: Statement<'db>, get_task_proving_status: Statement<'db>, + #[allow(dead_code)] get_tasks_unfinished: Statement<'db>, get_db_size: Statement<'db>, } @@ -457,7 +458,7 @@ impl TaskDb { self.conn.trace(trace_fn); } - pub fn manage<'db>(&'db self) -> Result, TaskManagerError> { + pub fn manage(&self) -> Result, TaskManagerError> { // To update all the tables with the task_id assigned by Sqlite // we require row IDs for the tasks table // and we use last_insert_rowid() which is not reentrant and need a transaction lock @@ -683,22 +684,39 @@ impl TaskDb { } } +pub struct EnqueTaskParams { + pub chain_id: ChainId, + pub blockhash: B256, + pub proof_system: TaskProofsys, + pub submitter: String, + pub block_number: BlockNumber, + pub parent_hash: B256, + pub state_root: B256, + pub num_transactions: u64, + pub gas_used: u64, + pub payload: Vec, +} + +type TaskProvingStatus = Vec<(Option, TaskStatus, DateTime)>; + impl<'db> TaskManager<'db> { pub fn enqueue_task( &mut self, - chain_id: ChainId, - blockhash: &B256, - proof_system: TaskProofsys, - submitter: &str, - block_number: BlockNumber, - parent_hash: &B256, - state_root: &B256, - num_transactions: u64, - gas_used: u64, - payload: &[u8], + EnqueTaskParams { + chain_id, + blockhash, + proof_system, + submitter, + block_number, + parent_hash, + state_root, + num_transactions, + gas_used, + payload, + }: EnqueTaskParams, ) -> Result<(), TaskManagerError> { self.enqueue_task.execute(named_params! { - ":chain_id": chain_id as u64, + ":chain_id": chain_id, ":blockhash": blockhash.as_slice(), ":id_proofsys": proof_system as u8, ":submitter": submitter, @@ -722,7 +740,7 @@ impl<'db> TaskManager<'db> { proof: Option<&[u8]>, ) -> Result<(), TaskManagerError> { self.update_task_progress.execute(named_params! { - ":chain_id": chain_id as u64, + ":chain_id": chain_id, ":blockhash": blockhash.as_slice(), ":id_proofsys": proof_system as u8, ":fulfiller": fulfiller, @@ -738,10 +756,10 @@ impl<'db> TaskManager<'db> { chain_id: ChainId, blockhash: &B256, proof_system: TaskProofsys, - ) -> Result, TaskStatus, DateTime)>, TaskManagerError> { + ) -> Result { let rows = self.get_task_proving_status.query_map( named_params! { - ":chain_id": chain_id as u64, + ":chain_id": chain_id, ":blockhash": blockhash.as_slice(), ":id_proofsys": proof_system as u8, }, @@ -766,7 +784,7 @@ impl<'db> TaskManager<'db> { ) -> Result, TaskManagerError> { let proof = self.get_task_proof.query_row( named_params! { - ":chain_id": chain_id as u64, + ":chain_id": chain_id, ":blockhash": blockhash.as_slice(), ":id_proofsys": proof_system as u8, }, diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index 50ff0422e..ae53365e4 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -12,8 +12,8 @@ mod tests { use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; - use raiko_primitives::B256; - use task_manager::{TaskDb, TaskProofsys, TaskStatus}; + use raiko_lib::primitives::B256; + use task_manager::{EnqueTaskParams, TaskDb, TaskProofsys, TaskStatus}; #[test] fn test_enqueue_task() { @@ -38,8 +38,8 @@ mod tests { let chain_id = 100; let blockhash = B256::random(); - let proofsys = TaskProofsys::Risc0; - let submitter = "test_enqueue_task"; + let proof_system = TaskProofsys::Risc0; + let submitter = "test_enqueue_task".to_owned(); let block_number = rng.gen_range(1..4_000_000); let parent_hash = B256::random(); let state_root = B256::random(); @@ -48,18 +48,18 @@ mod tests { let payload_length = rng.gen_range(20..200); let payload: Vec = (&mut rng).gen_iter::().take(payload_length).collect(); - tama.enqueue_task( + tama.enqueue_task(EnqueTaskParams { chain_id, - &blockhash, - proofsys, + blockhash, + proof_system, submitter, block_number, - &parent_hash, - &state_root, + parent_hash, + state_root, num_transactions, gas_used, - &payload, - ) + payload, + }) .unwrap(); } @@ -87,7 +87,7 @@ mod tests { for _ in 0..42 { let chain_id = 100; let blockhash = B256::random(); - let proofsys = TaskProofsys::Risc0; + let proof_system = TaskProofsys::Risc0; let submitter = format!("test_get_db_size/{}", rng.gen_range(1..10)); let block_number = rng.gen_range(1..4_000_000); let parent_hash = B256::random(); @@ -97,18 +97,18 @@ mod tests { let payload_length = rng.gen_range(1_000_000..10_000_000); let payload: Vec = (&mut rng).gen_iter::().take(payload_length).collect(); - tama.enqueue_task( + tama.enqueue_task(EnqueTaskParams { chain_id, - &blockhash, - proofsys, - &submitter, + blockhash, + proof_system, + submitter, block_number, - &parent_hash, - &state_root, + parent_hash, + state_root, num_transactions, gas_used, - &payload, - ) + payload, + }) .unwrap(); } @@ -144,7 +144,7 @@ mod tests { for _ in 0..5 { let chain_id = 100; let blockhash = B256::random(); - let proofsys = TaskProofsys::Risc0; + let proof_system = TaskProofsys::Risc0; let submitter = format!("test_get_db_size/{}", rng.gen_range(1..10)); let block_number = rng.gen_range(1..4_000_000); let parent_hash = B256::random(); @@ -154,28 +154,28 @@ mod tests { let payload_length = rng.gen_range(16..64); let payload: Vec = (&mut rng).gen_iter::().take(payload_length).collect(); - tama.enqueue_task( + tama.enqueue_task(EnqueTaskParams { chain_id, - &blockhash, - proofsys, - &submitter, + blockhash, + proof_system, + submitter: submitter.clone(), block_number, - &parent_hash, - &state_root, + parent_hash, + state_root, num_transactions, gas_used, - &payload, - ) + payload, + }) .unwrap(); let task_status = tama - .get_task_proving_status(chain_id, &blockhash, proofsys) + .get_task_proving_status(chain_id, &blockhash, proof_system) .unwrap(); assert_eq!(task_status.len(), 1); assert_eq!(task_status[0].0, Some(submitter.clone())); assert_eq!(task_status[0].1, TaskStatus::Registered); - tasks.push((chain_id, blockhash, proofsys, submitter)); + tasks.push((chain_id, blockhash, proof_system, submitter)); } std::thread::sleep(Duration::from_millis(1)); From 9390f0b55c3dd0a310329c0105eb0c4e69f9e7a3 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Thu, 13 Jun 2024 13:28:14 +0200 Subject: [PATCH 14/44] feat(task-manager): use result type with thiserror --- Cargo.lock | 1 + task_manager/Cargo.toml | 7 ++++--- task_manager/src/lib.rs | 28 ++++++++++++++++------------ 3 files changed, 21 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0f07f9dfd..91d089033 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7074,6 +7074,7 @@ dependencies = [ "rand_chacha 0.9.0-alpha.1", "rusqlite", "tempfile", + "thiserror", ] [[package]] diff --git a/task_manager/Cargo.toml b/task_manager/Cargo.toml index 2d4bf0a68..212970729 100644 --- a/task_manager/Cargo.toml +++ b/task_manager/Cargo.toml @@ -5,10 +5,11 @@ authors = ["Mamy Ratsimbazafy "] edition = "2021" # { workspace = true } [dependencies] -raiko-lib = { workspace = true } +raiko-lib.workspace = true rusqlite = { workspace = true, features = ["chrono"] } -num_enum = { workspace = true } -chrono = { workspace = true } +num_enum.workspace = true +chrono.workspace = true +thiserror.workspace = true [dev-dependencies] rand = "0.9.0-alpha.1" # This is an alpha version, that has rng.gen_iter::() diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index 18f90855b..5a33e6638 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -169,12 +169,16 @@ use num_enum::{FromPrimitive, IntoPrimitive}; // Types // ---------------------------------------------------------------- -#[derive(PartialEq, Debug)] +#[derive(PartialEq, Debug, thiserror::Error)] pub enum TaskManagerError { + #[error("IO Error {0}")] IOError(IOErrorKind), + #[error("SQL Error {0}")] SqlError(String), } +pub type TaskManagerResult = Result; + impl From for TaskManagerError { fn from(error: IOError) -> TaskManagerError { TaskManagerError::IOError(error.kind()) @@ -235,7 +239,7 @@ pub enum TaskStatus { // ---------------------------------------------------------------- impl TaskDb { - fn open(path: &Path) -> Result { + fn open(path: &Path) -> TaskManagerResult { let conn = Connection::open_with_flags(path, OpenFlags::SQLITE_OPEN_READ_WRITE)?; conn.pragma_update(None, "foreign_keys", true)?; conn.pragma_update(None, "locking_mode", "EXCLUSIVE")?; @@ -245,7 +249,7 @@ impl TaskDb { Ok(conn) } - fn create(path: &Path) -> Result { + fn create(path: &Path) -> TaskManagerResult { let _file = File::options() .write(true) .read(true) @@ -261,7 +265,7 @@ impl TaskDb { /// Open an existing TaskDb database at "path" /// If a database does not exist at the path, one is created. - pub fn open_or_create(path: &Path) -> Result { + pub fn open_or_create(path: &Path) -> TaskManagerResult { let conn = if path.exists() { Self::open(path) } else { @@ -273,7 +277,7 @@ impl TaskDb { // SQL // ---------------------------------------------------------------- - fn create_tables(conn: &Connection) -> Result<(), TaskManagerError> { + fn create_tables(conn: &Connection) -> TaskManagerResult<()> { // Change the task_db_version if backward compatibility is broken // and introduce a migration on DB opening ... if conserving history is important. conn.execute_batch( @@ -396,7 +400,7 @@ impl TaskDb { Ok(()) } - fn create_views(conn: &Connection) -> Result<(), TaskManagerError> { + fn create_views(conn: &Connection) -> TaskManagerResult<()> { // By convention, views will use an action verb as name. conn.execute_batch( r#" @@ -458,7 +462,7 @@ impl TaskDb { self.conn.trace(trace_fn); } - pub fn manage(&self) -> Result, TaskManagerError> { + pub fn manage(&self) -> TaskManagerResult> { // To update all the tables with the task_id assigned by Sqlite // we require row IDs for the tasks table // and we use last_insert_rowid() which is not reentrant and need a transaction lock @@ -714,7 +718,7 @@ impl<'db> TaskManager<'db> { gas_used, payload, }: EnqueTaskParams, - ) -> Result<(), TaskManagerError> { + ) -> TaskManagerResult<()> { self.enqueue_task.execute(named_params! { ":chain_id": chain_id, ":blockhash": blockhash.as_slice(), @@ -738,7 +742,7 @@ impl<'db> TaskManager<'db> { fulfiller: Option<&str>, status: TaskStatus, proof: Option<&[u8]>, - ) -> Result<(), TaskManagerError> { + ) -> TaskManagerResult<()> { self.update_task_progress.execute(named_params! { ":chain_id": chain_id, ":blockhash": blockhash.as_slice(), @@ -756,7 +760,7 @@ impl<'db> TaskManager<'db> { chain_id: ChainId, blockhash: &B256, proof_system: TaskProofsys, - ) -> Result { + ) -> TaskManagerResult { let rows = self.get_task_proving_status.query_map( named_params! { ":chain_id": chain_id, @@ -781,7 +785,7 @@ impl<'db> TaskManager<'db> { chain_id: ChainId, blockhash: &B256, proof_system: TaskProofsys, - ) -> Result, TaskManagerError> { + ) -> TaskManagerResult> { let proof = self.get_task_proof.query_row( named_params! { ":chain_id": chain_id, @@ -795,7 +799,7 @@ impl<'db> TaskManager<'db> { } /// Returns the total and detailed database size - pub fn get_db_size(&mut self) -> Result<(usize, Vec<(String, usize)>), TaskManagerError> { + pub fn get_db_size(&mut self) -> TaskManagerResult<(usize, Vec<(String, usize)>)> { let rows = self .get_db_size .query_map([], |row| Ok((row.get(0)?, row.get(1)?)))?; From f1f492d857d5046e78bb943763c69901b57dbb50 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Thu, 13 Jun 2024 15:41:11 +0200 Subject: [PATCH 15/44] chore(task-db): fix typos --- task_manager/src/lib.rs | 25 ++++++++++++------------- task_manager/tests/main.rs | 8 ++++---- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index 5a33e6638..af267980a 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -152,19 +152,18 @@ // Imports // ---------------------------------------------------------------- -use rusqlite::Error as SqlError; -use std::io::{Error as IOError, ErrorKind as IOErrorKind}; - -use std::fs::File; -use std::path::Path; - -use raiko_lib::primitives::{BlockNumber, ChainId, B256}; - -use rusqlite::{named_params, Statement}; -use rusqlite::{Connection, OpenFlags}; +use std::{ + fs::File, + io::{Error as IOError, ErrorKind as IOErrorKind}, + path::Path, +}; use chrono::{DateTime, Utc}; use num_enum::{FromPrimitive, IntoPrimitive}; +use raiko_lib::primitives::{BlockNumber, ChainId, B256}; +use rusqlite::{ + Error as SqlError, {named_params, Statement}, {Connection, OpenFlags}, +}; // Types // ---------------------------------------------------------------- @@ -688,7 +687,7 @@ impl TaskDb { } } -pub struct EnqueTaskParams { +pub struct EnqueueTaskParams { pub chain_id: ChainId, pub blockhash: B256, pub proof_system: TaskProofsys, @@ -706,7 +705,7 @@ type TaskProvingStatus = Vec<(Option, TaskStatus, DateTime)>; impl<'db> TaskManager<'db> { pub fn enqueue_task( &mut self, - EnqueTaskParams { + EnqueueTaskParams { chain_id, blockhash, proof_system, @@ -717,7 +716,7 @@ impl<'db> TaskManager<'db> { num_transactions, gas_used, payload, - }: EnqueTaskParams, + }: EnqueueTaskParams, ) -> TaskManagerResult<()> { self.enqueue_task.execute(named_params! { ":chain_id": chain_id, diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index ae53365e4..64b817273 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -13,7 +13,7 @@ mod tests { use rand_chacha::ChaCha8Rng; use raiko_lib::primitives::B256; - use task_manager::{EnqueTaskParams, TaskDb, TaskProofsys, TaskStatus}; + use task_manager::{EnqueueTaskParams, TaskDb, TaskProofsys, TaskStatus}; #[test] fn test_enqueue_task() { @@ -48,7 +48,7 @@ mod tests { let payload_length = rng.gen_range(20..200); let payload: Vec = (&mut rng).gen_iter::().take(payload_length).collect(); - tama.enqueue_task(EnqueTaskParams { + tama.enqueue_task(EnqueueTaskParams { chain_id, blockhash, proof_system, @@ -97,7 +97,7 @@ mod tests { let payload_length = rng.gen_range(1_000_000..10_000_000); let payload: Vec = (&mut rng).gen_iter::().take(payload_length).collect(); - tama.enqueue_task(EnqueTaskParams { + tama.enqueue_task(EnqueueTaskParams { chain_id, blockhash, proof_system, @@ -154,7 +154,7 @@ mod tests { let payload_length = rng.gen_range(16..64); let payload: Vec = (&mut rng).gen_iter::().take(payload_length).collect(); - tama.enqueue_task(EnqueTaskParams { + tama.enqueue_task(EnqueueTaskParams { chain_id, blockhash, proof_system, From fed8e82f30e2ba1b6945038d1752c7a207a96bfe Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Fri, 14 Jun 2024 10:17:04 +0200 Subject: [PATCH 16/44] refactor(task-manager): clean up tests --- task_manager/src/lib.rs | 1 + task_manager/tests/main.rs | 115 +++++++++++++------------------------ 2 files changed, 40 insertions(+), 76 deletions(-) diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index af267980a..e7a144f59 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -687,6 +687,7 @@ impl TaskDb { } } +#[derive(Debug, Clone)] pub struct EnqueueTaskParams { pub chain_id: ChainId, pub blockhash: B256, diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index 64b817273..626d64b6f 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -15,40 +15,21 @@ mod tests { use raiko_lib::primitives::B256; use task_manager::{EnqueueTaskParams, TaskDb, TaskProofsys, TaskStatus}; - #[test] - fn test_enqueue_task() { - // // Materialized local DB - // let dir = std::env::current_dir().unwrap().join("tests"); - // let file = dir.as_path().join("test_enqueue_task.sqlite"); - // if file.exists() { - // std::fs::remove_file(&file).unwrap() - // }; - - // temp dir DB - use tempfile::tempdir; - let dir = tempdir().unwrap(); - let file = dir.path().join("test_enqueue_task.sqlite"); - - #[allow(unused_mut)] - let mut db = TaskDb::open_or_create(&file).unwrap(); - // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); - let mut tama = db.manage().unwrap(); - + fn create_random_task(submitter: String) -> EnqueueTaskParams { let mut rng = ChaCha8Rng::seed_from_u64(123); let chain_id = 100; let blockhash = B256::random(); let proof_system = TaskProofsys::Risc0; - let submitter = "test_enqueue_task".to_owned(); let block_number = rng.gen_range(1..4_000_000); let parent_hash = B256::random(); let state_root = B256::random(); let num_transactions = rng.gen_range(0..1000); let gas_used = rng.gen_range(0..100_000_000); - let payload_length = rng.gen_range(20..200); + let payload_length = rng.gen_range(1_000_000..10_000_000); let payload: Vec = (&mut rng).gen_iter::().take(payload_length).collect(); - tama.enqueue_task(EnqueueTaskParams { + EnqueueTaskParams { chain_id, blockhash, proof_system, @@ -59,8 +40,30 @@ mod tests { num_transactions, gas_used, payload, - }) - .unwrap(); + } + } + + #[test] + fn test_enqueue_task() { + // // Materialized local DB + // let dir = std::env::current_dir().unwrap().join("tests"); + // let file = dir.as_path().join("test_enqueue_task.sqlite"); + // if file.exists() { + // std::fs::remove_file(&file).unwrap() + // }; + + // temp dir DB + use tempfile::tempdir; + let dir = tempdir().unwrap(); + let file = dir.path().join("test_enqueue_task.sqlite"); + + #[allow(unused_mut)] + let mut db = TaskDb::open_or_create(&file).unwrap(); + // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); + let mut tama = db.manage().unwrap(); + + tama.enqueue_task(create_random_task("test_enqueue_task".to_owned())) + .unwrap(); } #[test] @@ -85,31 +88,9 @@ mod tests { let mut rng = ChaCha8Rng::seed_from_u64(123); for _ in 0..42 { - let chain_id = 100; - let blockhash = B256::random(); - let proof_system = TaskProofsys::Risc0; let submitter = format!("test_get_db_size/{}", rng.gen_range(1..10)); - let block_number = rng.gen_range(1..4_000_000); - let parent_hash = B256::random(); - let state_root = B256::random(); - let num_transactions = rng.gen_range(0..1000); - let gas_used = rng.gen_range(0..100_000_000); - let payload_length = rng.gen_range(1_000_000..10_000_000); - let payload: Vec = (&mut rng).gen_iter::().take(payload_length).collect(); - - tama.enqueue_task(EnqueueTaskParams { - chain_id, - blockhash, - proof_system, - submitter, - block_number, - parent_hash, - state_root, - num_transactions, - gas_used, - payload, - }) - .unwrap(); + + tama.enqueue_task(create_random_task(submitter)).unwrap(); } let (db_size, db_tables_size) = tama.get_db_size().unwrap(); @@ -142,40 +123,22 @@ mod tests { let mut tasks = vec![]; for _ in 0..5 { - let chain_id = 100; - let blockhash = B256::random(); - let proof_system = TaskProofsys::Risc0; let submitter = format!("test_get_db_size/{}", rng.gen_range(1..10)); - let block_number = rng.gen_range(1..4_000_000); - let parent_hash = B256::random(); - let state_root = B256::random(); - let num_transactions = rng.gen_range(0..1000); - let gas_used = rng.gen_range(0..100_000_000); - let payload_length = rng.gen_range(16..64); - let payload: Vec = (&mut rng).gen_iter::().take(payload_length).collect(); - - tama.enqueue_task(EnqueueTaskParams { - chain_id, - blockhash, - proof_system, - submitter: submitter.clone(), - block_number, - parent_hash, - state_root, - num_transactions, - gas_used, - payload, - }) - .unwrap(); + let task = create_random_task(submitter.clone()); + + tama.enqueue_task(task.clone()).unwrap(); let task_status = tama - .get_task_proving_status(chain_id, &blockhash, proof_system) + .get_task_proving_status(task.chain_id, &task.blockhash, task.proof_system) .unwrap(); assert_eq!(task_status.len(), 1); - assert_eq!(task_status[0].0, Some(submitter.clone())); - assert_eq!(task_status[0].1, TaskStatus::Registered); + let (submitter_name, status, _) = task_status + .first() + .expect("Already confirmed there is exactly 1 element"); + assert_eq!(submitter_name, &Some(submitter.clone())); + assert_eq!(status, &TaskStatus::Registered); - tasks.push((chain_id, blockhash, proof_system, submitter)); + tasks.push((task.chain_id, task.blockhash, task.proof_system, submitter)); } std::thread::sleep(Duration::from_millis(1)); From 544763adafa2129e0c454e27771dede282c5d813 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Mon, 17 Jun 2024 10:12:34 +0200 Subject: [PATCH 17/44] fix(docker): unignore task manager --- .dockerignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.dockerignore b/.dockerignore index 0d744e0f2..438e4ad7a 100644 --- a/.dockerignore +++ b/.dockerignore @@ -19,3 +19,4 @@ !/provers/sgx/setup !/kzg_settings_raw.bin !/core +!/task_manager From ee1fb8c855ba332e0fafe0c3a40bb936aee71134 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Mon, 17 Jun 2024 11:57:42 +0200 Subject: [PATCH 18/44] [WIP](task_manager): write initial task handler stubs --- Cargo.lock | 31 +++++----- Cargo.toml | 1 + host/Cargo.toml | 3 +- host/src/interfaces.rs | 16 ++++++ host/src/lib.rs | 35 +++++++++++- host/src/server/api/mod.rs | 1 + host/src/server/api/v1/proof.rs | 9 +-- host/src/server/api/v2/mod.rs | 1 + host/src/server/api/v2/proof/get.rs | 0 host/src/server/api/v2/proof/mod.rs | 3 + host/src/server/api/v2/proof/status.rs | 0 host/src/server/api/v2/proof/submit.rs | 79 ++++++++++++++++++++++++++ task_manager/Cargo.toml | 2 +- task_manager/tests/main.rs | 2 +- 14 files changed, 158 insertions(+), 25 deletions(-) create mode 100644 host/src/server/api/v2/mod.rs create mode 100644 host/src/server/api/v2/proof/get.rs create mode 100644 host/src/server/api/v2/proof/mod.rs create mode 100644 host/src/server/api/v2/proof/status.rs create mode 100644 host/src/server/api/v2/proof/submit.rs diff --git a/Cargo.lock b/Cargo.lock index a90090d79..79e4b14e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5293,6 +5293,7 @@ dependencies = [ "proptest", "raiko-core", "raiko-lib", + "raiko-task-manager", "reqwest 0.11.27", "reqwest 0.12.4", "revm", @@ -5425,6 +5426,21 @@ dependencies = [ "url", ] +[[package]] +name = "raiko-task-manager" +version = "0.1.0" +dependencies = [ + "alloy-primitives", + "chrono", + "num_enum 0.7.2", + "raiko-lib", + "rand 0.9.0-alpha.1", + "rand_chacha 0.9.0-alpha.1", + "rusqlite", + "tempfile", + "thiserror", +] + [[package]] name = "rand" version = "0.8.5" @@ -7438,21 +7454,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" -[[package]] -name = "task_manager" -version = "0.1.0" -dependencies = [ - "alloy-primitives", - "chrono", - "num_enum 0.7.2", - "raiko-lib", - "rand 0.9.0-alpha.1", - "rand_chacha 0.9.0-alpha.1", - "rusqlite", - "tempfile", - "thiserror", -] - [[package]] name = "tempfile" version = "3.10.1" diff --git a/Cargo.toml b/Cargo.toml index 8dc25a801..10ba72cc9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,7 @@ opt-level = 3 # raiko raiko-lib = { path = "./lib", features = ["std"] } raiko-core = { path = "./core" } +raiko-task-manager = { path = "./task_manager" } # revm revm-primitives = { git = "https://github.com/taikoxyz/revm.git", branch = "v35-taiko", default-features = false } diff --git a/host/Cargo.toml b/host/Cargo.toml index 668c17a67..c051892cc 100644 --- a/host/Cargo.toml +++ b/host/Cargo.toml @@ -13,7 +13,8 @@ sgx-prover = { path = "../provers/sgx/prover", optional = true } # raiko raiko-lib = { workspace = true, features = ["c-kzg"] } -raiko-core = { workspace = true } +raiko-core.workspace = true +raiko-task-manager.workspace = true # alloy alloy-rlp = { workspace = true } diff --git a/host/src/interfaces.rs b/host/src/interfaces.rs index f9d2b9696..b4ec7807a 100644 --- a/host/src/interfaces.rs +++ b/host/src/interfaces.rs @@ -1,11 +1,20 @@ use axum::response::IntoResponse; use raiko_core::interfaces::ProofType; use raiko_lib::prover::ProverError; +use raiko_task_manager::TaskManagerError; use utoipa::ToSchema; /// The standardized error returned by the Raiko host. #[derive(thiserror::Error, Debug, ToSchema)] pub enum HostError { + /// For unexpectedly dropping task handle. + #[error("Task handle unexpectedly dropped")] + HandleDropped, + + /// For full prover capacity. + #[error("Capacity full")] + CapacityFull, + /// For invalid address. #[error("Invalid address: {0}")] InvalidAddress(String), @@ -56,6 +65,10 @@ pub enum HostError { #[error("There was an unexpected error: {0}")] #[schema(value_type = Value)] Anyhow(#[from] anyhow::Error), + + /// For task manager errors. + #[error("There was an error with the task manager: {0}")] + TaskManager(#[from] TaskManagerError), } impl IntoResponse for HostError { @@ -74,6 +87,9 @@ impl IntoResponse for HostError { ("feature_not_supported_error".to_string(), t.to_string()) } HostError::Anyhow(e) => ("anyhow_error".to_string(), e.to_string()), + HostError::HandleDropped => ("handle_dropped".to_string(), "".to_string()), + HostError::CapacityFull => ("capacity_full".to_string(), "".to_string()), + HostError::TaskManager(e) => ("task_manager".to_string(), e.to_string()), }; axum::Json(serde_json::json!({ "status": "error", "error": error, "message": message })) .into_response() diff --git a/host/src/lib.rs b/host/src/lib.rs index 4208aec1a..b498519ae 100644 --- a/host/src/lib.rs +++ b/host/src/lib.rs @@ -16,15 +16,20 @@ pub mod interfaces; pub mod metrics; pub mod server; -use std::{alloc, path::PathBuf}; +use std::{alloc, path::PathBuf, sync::Arc}; use anyhow::Context; use cap::Cap; use clap::Parser; -use raiko_core::{interfaces::ProofRequestOpt, merge}; +use raiko_core::{ + interfaces::{ProofRequest, ProofRequestOpt}, + merge, +}; use raiko_lib::consts::SupportedChainSpecs; +use raiko_task_manager::TaskDb; use serde::{Deserialize, Serialize}; use serde_json::Value; +use tokio::sync::{mpsc, Mutex}; use crate::interfaces::HostResult; @@ -104,6 +109,10 @@ pub struct Cli { #[arg(long, require_equals = true)] /// Set jwt secret for auth jwt_secret: Option, + + #[arg(long, require_equals = true, default_value = "raiko.sqlite")] + /// Set the path to the sqlite db file + sqlite_file: PathBuf, } impl Cli { @@ -124,6 +133,8 @@ impl Cli { pub struct ProverState { pub opts: Cli, pub chain_specs: SupportedChainSpecs, + pub task_db: Arc>, + pub tx: mpsc::Sender<(ProofRequest, Cli)>, } impl ProverState { @@ -146,7 +157,25 @@ impl ProverState { } } - Ok(Self { opts, chain_specs }) + let db = TaskDb::open_or_create(&opts.sqlite_file)?; + let task_db = Arc::new(Mutex::new(db)); + // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); + + let (tx, mut rx) = mpsc::channel(opts.concurrency_limit); + + tokio::spawn(async move { + while let Some(_proof_request_opt) = rx.recv().await { + // TODO:(petar) implement proof request handler here + todo!(); + } + }); + + Ok(Self { + opts, + chain_specs, + task_db, + tx, + }) } } diff --git a/host/src/server/api/mod.rs b/host/src/server/api/mod.rs index 11e3e394e..8a5d9c56f 100644 --- a/host/src/server/api/mod.rs +++ b/host/src/server/api/mod.rs @@ -17,6 +17,7 @@ use tower_http::{ use crate::ProverState; mod v1; +mod v2; pub fn create_router(concurrency_limit: usize, jwt_secret: Option<&str>) -> Router { let cors = CorsLayer::new() diff --git a/host/src/server/api/v1/proof.rs b/host/src/server/api/v1/proof.rs index 185f677c8..825c0c720 100644 --- a/host/src/server/api/v1/proof.rs +++ b/host/src/server/api/v1/proof.rs @@ -27,7 +27,7 @@ use crate::{ ProverState, }; -fn get_cached_input( +pub fn get_cached_input( cache_path: &Option, block_number: u64, network: &str, @@ -41,7 +41,7 @@ fn get_cached_input( bincode::deserialize_from(file).ok() } -fn set_cached_input( +pub fn set_cached_input( cache_path: &Option, block_number: u64, network: &str, @@ -58,7 +58,7 @@ fn set_cached_input( bincode::serialize_into(file, input).map_err(|e| HostError::Anyhow(e.into())) } -async fn validate_cache_input( +pub async fn validate_cache_input( cached_input: Option, provider: &RpcBlockDataProvider, ) -> HostResult { @@ -93,10 +93,11 @@ async fn validate_cache_input( } } -async fn handle_proof( +pub async fn handle_proof( ProverState { opts, chain_specs: support_chain_specs, + .. }: ProverState, req: Value, ) -> HostResult { diff --git a/host/src/server/api/v2/mod.rs b/host/src/server/api/v2/mod.rs new file mode 100644 index 000000000..5e3cb98b2 --- /dev/null +++ b/host/src/server/api/v2/mod.rs @@ -0,0 +1 @@ +mod proof; diff --git a/host/src/server/api/v2/proof/get.rs b/host/src/server/api/v2/proof/get.rs new file mode 100644 index 000000000..e69de29bb diff --git a/host/src/server/api/v2/proof/mod.rs b/host/src/server/api/v2/proof/mod.rs new file mode 100644 index 000000000..cfb3c1edf --- /dev/null +++ b/host/src/server/api/v2/proof/mod.rs @@ -0,0 +1,3 @@ +mod get; +mod status; +mod submit; diff --git a/host/src/server/api/v2/proof/status.rs b/host/src/server/api/v2/proof/status.rs new file mode 100644 index 000000000..e69de29bb diff --git a/host/src/server/api/v2/proof/submit.rs b/host/src/server/api/v2/proof/submit.rs new file mode 100644 index 000000000..f997f65c1 --- /dev/null +++ b/host/src/server/api/v2/proof/submit.rs @@ -0,0 +1,79 @@ +use axum::{debug_handler, extract::State, routing::post, Json, Router}; +use raiko_core::interfaces::ProofRequest; +use serde_json::Value; +use tracing::info; +use utoipa::OpenApi; + +use crate::{ + interfaces::{HostError, HostResult}, + metrics::{inc_current_req, inc_guest_req_count, inc_host_req_count}, + ProverState, +}; + +#[utoipa::path(post, path = "/proof/submit", + tag = "Proving", + request_body = ProofRequestOpt, + responses ( + (status = 200, description = "Successfully submitted proof task", body = Status) + ) +)] +#[debug_handler(state = ProverState)] +/// Submit a proof task with requested config. +/// +/// Accepts a proof request and creates a proving task with the specified guest prover. +/// The guest provers currently available are: +/// - native - constructs a block and checks for equality +/// - sgx - uses the sgx environment to construct a block and produce proof of execution +/// - sp1 - uses the sp1 prover +/// - risc0 - uses the risc0 prover +async fn submit_handler( + State(prover_state): State, + Json(req): Json, +) -> HostResult> { + inc_current_req(); + // Override the existing proof request config from the config file and command line + // options with the request from the client. + let mut config = prover_state.opts.proof_request_opt.clone(); + config.merge(&req)?; + + // Construct the actual proof request from the available configs. + let proof_request = ProofRequest::try_from(config)?; + inc_host_req_count(proof_request.block_number); + inc_guest_req_count(&proof_request.proof_type, proof_request.block_number); + + info!( + "# Generating proof for block {} on {}", + proof_request.block_number, proof_request.network + ); + prover_state + .tx + .try_send((proof_request, prover_state.opts)) + .map_err(|e| match e { + tokio::sync::mpsc::error::TrySendError::Full(_) => HostError::CapacityFull, + tokio::sync::mpsc::error::TrySendError::Closed(_) => HostError::HandleDropped, + })?; + let task_db = prover_state.task_db.lock().await; + let mut manager = task_db.manage()?; + #[allow(unreachable_code)] + manager.enqueue_task( + // TODO:(petar) implement task details here + todo!(), + )?; + Ok(Json(serde_json::json!("{}"))) + // handle_proof(prover_state, req).await.map_err(|e| { + // dec_current_req(); + // e + // }) +} + +#[derive(OpenApi)] +#[openapi(paths(submit_handler))] +struct Docs; + +pub fn create_docs() -> utoipa::openapi::OpenApi { + Docs::openapi() +} + +pub fn create_router() -> Router { + Router::new().route("/", post(submit_handler)) +} diff --git a/task_manager/Cargo.toml b/task_manager/Cargo.toml index 212970729..69912ab82 100644 --- a/task_manager/Cargo.toml +++ b/task_manager/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "task_manager" +name = "raiko-task-manager" version = "0.1.0" authors = ["Mamy Ratsimbazafy "] edition = "2021" # { workspace = true } diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index 626d64b6f..8ac4823f5 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -13,7 +13,7 @@ mod tests { use rand_chacha::ChaCha8Rng; use raiko_lib::primitives::B256; - use task_manager::{EnqueueTaskParams, TaskDb, TaskProofsys, TaskStatus}; + use raiko_task_manager::{EnqueueTaskParams, TaskDb, TaskProofsys, TaskStatus}; fn create_random_task(submitter: String) -> EnqueueTaskParams { let mut rng = ChaCha8Rng::seed_from_u64(123); From 26f6010ecdfb8fc16702092de53ee88d09e4ccd0 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Mon, 17 Jun 2024 12:12:15 +0200 Subject: [PATCH 19/44] chore(task_manager): run cargo fmt --- host/src/server/api/v2/proof/get.rs | 1 + host/src/server/api/v2/proof/status.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/host/src/server/api/v2/proof/get.rs b/host/src/server/api/v2/proof/get.rs index e69de29bb..8b1378917 100644 --- a/host/src/server/api/v2/proof/get.rs +++ b/host/src/server/api/v2/proof/get.rs @@ -0,0 +1 @@ + diff --git a/host/src/server/api/v2/proof/status.rs b/host/src/server/api/v2/proof/status.rs index e69de29bb..8b1378917 100644 --- a/host/src/server/api/v2/proof/status.rs +++ b/host/src/server/api/v2/proof/status.rs @@ -0,0 +1 @@ + From e127031a5b8d14969b813385f35a5f3ff2817030 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Mon, 17 Jun 2024 13:56:12 +0200 Subject: [PATCH 20/44] [WIP](task_manager): write status and proof get handlers --- Cargo.lock | 1 + host/src/server/api/mod.rs | 6 ++- host/src/server/api/v1/mod.rs | 6 +-- host/src/server/api/v2/mod.rs | 71 ++++++++++++++++++++++++ host/src/server/api/v2/proof/get.rs | 41 ++++++++++++++ host/src/server/api/v2/proof/mod.rs | 21 ++++++++ host/src/server/api/v2/proof/status.rs | 42 +++++++++++++++ host/src/server/api/v2/proof/submit.rs | 2 +- task_manager/Cargo.toml | 3 +- task_manager/src/lib.rs | 75 +++++++++++++++++++++++++- 10 files changed, 259 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 79e4b14e7..4e7d96f4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5437,6 +5437,7 @@ dependencies = [ "rand 0.9.0-alpha.1", "rand_chacha 0.9.0-alpha.1", "rusqlite", + "serde", "tempfile", "thiserror", ] diff --git a/host/src/server/api/mod.rs b/host/src/server/api/mod.rs index 8a5d9c56f..930f988cf 100644 --- a/host/src/server/api/mod.rs +++ b/host/src/server/api/mod.rs @@ -36,10 +36,12 @@ pub fn create_router(concurrency_limit: usize, jwt_secret: Option<&str>) -> Rout let trace = TraceLayer::new_for_http(); let v1_api = v1::create_router(concurrency_limit); + let v2_api = v2::create_router(concurrency_limit); let router = Router::new() - .nest("/v1", v1_api.clone()) - .merge(v1_api) + .nest("/v1", v1_api) + .nest("/v2", v2_api.clone()) + .merge(v2_api) .layer(middleware) .layer(middleware::from_fn(check_max_body_size)) .layer(trace) diff --git a/host/src/server/api/v1/mod.rs b/host/src/server/api/v1/mod.rs index 3977e49c8..e621c35b8 100644 --- a/host/src/server/api/v1/mod.rs +++ b/host/src/server/api/v1/mod.rs @@ -9,9 +9,9 @@ use utoipa_swagger_ui::SwaggerUi; use crate::{interfaces::HostError, ProverState}; -mod health; -mod metrics; -mod proof; +pub mod health; +pub mod metrics; +pub mod proof; #[derive(OpenApi)] #[openapi( diff --git a/host/src/server/api/v2/mod.rs b/host/src/server/api/v2/mod.rs index 5e3cb98b2..f378e9263 100644 --- a/host/src/server/api/v2/mod.rs +++ b/host/src/server/api/v2/mod.rs @@ -1 +1,72 @@ mod proof; +use axum::Router; +use utoipa::OpenApi; +use utoipa_scalar::{Scalar, Servable}; +use utoipa_swagger_ui::SwaggerUi; + +use crate::{ + server::api::v1::{self, GuestOutputDoc, ProofResponse, Status}, + ProverState, +}; + +#[derive(OpenApi)] +#[openapi( + info( + title = "Raiko Proverd Server API", + version = "2.0", + description = "Raiko Proverd Server API", + contact( + name = "API Support", + url = "https://community.taiko.xyz", + email = "info@taiko.xyz", + ), + license( + name = "MIT", + url = "https://github.com/taikoxyz/raiko/blob/taiko/unstable/LICENSE" + ), + ), + components( + schemas( + raiko_core::interfaces::ProofRequestOpt, + raiko_core::interfaces::ProverSpecificOpts, + crate::interfaces::HostError, + GuestOutputDoc, + ProofResponse, + Status, + ) + ), + tags( + (name = "Proving", description = "Routes that handle proving requests"), + (name = "Health", description = "Routes that report the server health status"), + (name = "Metrics", description = "Routes that give detailed insight into the server") + ) +)] +/// The root API struct which is generated from the `OpenApi` derive macro. +pub struct Docs; + +#[must_use] +pub fn create_docs() -> utoipa::openapi::OpenApi { + [ + v1::health::create_docs(), + v1::metrics::create_docs(), + proof::create_docs(), + ] + .into_iter() + .fold(Docs::openapi(), |mut doc, sub_doc| { + doc.merge(sub_doc); + doc + }) +} + +pub fn create_router(concurrency_limit: usize) -> Router { + let docs = create_docs(); + + Router::new() + // Only add the concurrency limit to the proof route. We want to still be able to call + // healthchecks and metrics to have insight into the system. + .nest("/proof", proof::create_router()) + .nest("/health", v1::health::create_router()) + .nest("/metrics", v1::metrics::create_router()) + .merge(SwaggerUi::new("/swagger-ui").url("/api-docs/openapi.json", docs.clone())) + .merge(Scalar::with_url("/scalar", docs)) +} diff --git a/host/src/server/api/v2/proof/get.rs b/host/src/server/api/v2/proof/get.rs index 8b1378917..1ee898a6e 100644 --- a/host/src/server/api/v2/proof/get.rs +++ b/host/src/server/api/v2/proof/get.rs @@ -1 +1,42 @@ +use axum::{ + debug_handler, + extract::{Path, State}, + routing::get, + Json, Router, +}; +use utoipa::OpenApi; +use crate::{interfaces::HostResult, ProverState}; + +#[utoipa::path(get, path = "/proof/:task_id", + tag = "Proving", + request_body = ProofRequestOpt, + responses ( + (status = 200, description = "Successfully retrieved a proof", body = Status) + ) +)] +#[debug_handler(state = ProverState)] +/// Get proof for given task id. +/// +/// Accepts a proving task id. +async fn get_handler( + State(prover_state): State, + Path(task_id): Path, +) -> HostResult>> { + let task_db = prover_state.task_db.lock().await; + let mut manager = task_db.manage()?; + let status = manager.get_task_proof_by_id(task_id)?; + Ok(Json(status)) +} + +#[derive(OpenApi)] +#[openapi(paths(get_handler))] +struct Docs; + +pub fn create_docs() -> utoipa::openapi::OpenApi { + Docs::openapi() +} + +pub fn create_router() -> Router { + Router::new().route("/:task_id", get(get_handler)) +} diff --git a/host/src/server/api/v2/proof/mod.rs b/host/src/server/api/v2/proof/mod.rs index cfb3c1edf..fdf822615 100644 --- a/host/src/server/api/v2/proof/mod.rs +++ b/host/src/server/api/v2/proof/mod.rs @@ -1,3 +1,24 @@ +use axum::Router; +use utoipa::openapi; + +use crate::ProverState; + mod get; mod status; mod submit; + +pub fn create_docs() -> openapi::OpenApi { + [status::create_docs(), submit::create_docs()] + .into_iter() + .fold(get::create_docs(), |mut doc, sub_doc| { + doc.merge(sub_doc); + doc + }) +} + +pub fn create_router() -> Router { + Router::new() + .nest("", get::create_router()) + .nest("", status::create_router()) + .nest("", submit::create_router()) +} diff --git a/host/src/server/api/v2/proof/status.rs b/host/src/server/api/v2/proof/status.rs index 8b1378917..a8320a954 100644 --- a/host/src/server/api/v2/proof/status.rs +++ b/host/src/server/api/v2/proof/status.rs @@ -1 +1,43 @@ +use axum::{ + debug_handler, + extract::{Path, State}, + routing::get, + Json, Router, +}; +use raiko_task_manager::TaskProvingStatus; +use utoipa::OpenApi; +use crate::{interfaces::HostResult, ProverState}; + +#[utoipa::path(get, path = "/proof/status/:task_id", + tag = "Proving", + request_body = ProofRequestOpt, + responses ( + (status = 200, description = "Successfully retrieved proving task status", body = Status) + ) +)] +#[debug_handler(state = ProverState)] +/// Check for a proving task status. +/// +/// Accepts a proving task id. +async fn status_handler( + State(prover_state): State, + Path(task_id): Path, +) -> HostResult> { + let task_db = prover_state.task_db.lock().await; + let mut manager = task_db.manage()?; + let status = manager.get_task_proving_status_by_id(task_id)?; + Ok(Json(status)) +} + +#[derive(OpenApi)] +#[openapi(paths(status_handler))] +struct Docs; + +pub fn create_docs() -> utoipa::openapi::OpenApi { + Docs::openapi() +} + +pub fn create_router() -> Router { + Router::new().route("/status/:task_id", get(status_handler)) +} diff --git a/host/src/server/api/v2/proof/submit.rs b/host/src/server/api/v2/proof/submit.rs index f997f65c1..0a535505d 100644 --- a/host/src/server/api/v2/proof/submit.rs +++ b/host/src/server/api/v2/proof/submit.rs @@ -75,5 +75,5 @@ pub fn create_docs() -> utoipa::openapi::OpenApi { } pub fn create_router() -> Router { - Router::new().route("/", post(submit_handler)) + Router::new().route("/submit", post(submit_handler)) } diff --git a/task_manager/Cargo.toml b/task_manager/Cargo.toml index 69912ab82..bff2edc5c 100644 --- a/task_manager/Cargo.toml +++ b/task_manager/Cargo.toml @@ -8,8 +8,9 @@ edition = "2021" # { workspace = true } raiko-lib.workspace = true rusqlite = { workspace = true, features = ["chrono"] } num_enum.workspace = true -chrono.workspace = true +chrono = { workspace = true, features = ["serde"] } thiserror.workspace = true +serde.workspace = true [dev-dependencies] rand = "0.9.0-alpha.1" # This is an alpha version, that has rng.gen_iter::() diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index e7a144f59..68864e06d 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -164,6 +164,7 @@ use raiko_lib::primitives::{BlockNumber, ChainId, B256}; use rusqlite::{ Error as SqlError, {named_params, Statement}, {Connection, OpenFlags}, }; +use serde::Serialize; // Types // ---------------------------------------------------------------- @@ -200,7 +201,9 @@ pub struct TaskManager<'db> { enqueue_task: Statement<'db>, update_task_progress: Statement<'db>, get_task_proof: Statement<'db>, + get_task_proof_by_id: Statement<'db>, get_task_proving_status: Statement<'db>, + get_task_proving_status_by_id: Statement<'db>, #[allow(dead_code)] get_tasks_unfinished: Statement<'db>, get_db_size: Statement<'db>, @@ -215,7 +218,7 @@ pub enum TaskProofsys { #[allow(non_camel_case_types)] #[rustfmt::skip] -#[derive(PartialEq, Debug, Copy, Clone, IntoPrimitive, FromPrimitive)] +#[derive(PartialEq, Debug, Copy, Clone, IntoPrimitive, FromPrimitive, Serialize)] #[repr(i32)] pub enum TaskStatus { Success = 0, @@ -625,6 +628,18 @@ impl TaskDb { ", )?; + let get_task_proof_by_id = conn.prepare( + " + SELECT proof + FROM task_proofs tp + LEFT JOIN + tasks t ON tp.id_task = t.id_task + WHERE 1=1 + AND t.id_task = :task_id + LIMIT 1; + ", + )?; + let get_task_proving_status = conn.prepare( " SELECT @@ -648,6 +663,27 @@ impl TaskDb { ", )?; + let get_task_proving_status_by_id = conn.prepare( + " + SELECT + t3p.thirdparty_desc, + ts.id_status, + MAX(timestamp) + FROM + task_status ts + LEFT JOIN + tasks t ON ts.id_task = t.id_task + LEFT JOIN + thirdparties t3p ON ts.id_thirdparty = t3p.id_thirdparty + WHERE 1=1 + AND t.id_task = :task_id + GROUP BY + t3p.id_thirdparty + ORDER BY + ts.timestamp DESC; + ", + )?; + let get_tasks_unfinished = conn.prepare( " SELECT @@ -680,7 +716,9 @@ impl TaskDb { enqueue_task, update_task_progress, get_task_proof, + get_task_proof_by_id, get_task_proving_status, + get_task_proving_status_by_id, get_tasks_unfinished, get_db_size, }) @@ -701,7 +739,7 @@ pub struct EnqueueTaskParams { pub payload: Vec, } -type TaskProvingStatus = Vec<(Option, TaskStatus, DateTime)>; +pub type TaskProvingStatus = Vec<(Option, TaskStatus, DateTime)>; impl<'db> TaskManager<'db> { pub fn enqueue_task( @@ -780,6 +818,28 @@ impl<'db> TaskManager<'db> { Ok(proving_status) } + /// Returns the latest triplet (submitter or fulfiller, status, last update time) + pub fn get_task_proving_status_by_id( + &mut self, + task_id: u64, + ) -> TaskManagerResult { + let rows = self.get_task_proving_status_by_id.query_map( + named_params! { + ":task_id": task_id, + }, + |row| { + Ok(( + row.get::<_, Option>(0)?, + TaskStatus::from(row.get::<_, i32>(1)?), + row.get::<_, DateTime>(2)?, + )) + }, + )?; + let proving_status = rows.collect::, _>>()?; + + Ok(proving_status) + } + pub fn get_task_proof( &mut self, chain_id: ChainId, @@ -798,6 +858,17 @@ impl<'db> TaskManager<'db> { Ok(proof) } + pub fn get_task_proof_by_id(&mut self, task_id: u64) -> TaskManagerResult> { + let proof = self.get_task_proof_by_id.query_row( + named_params! { + ":task_id": task_id, + }, + |r| r.get(0), + )?; + + Ok(proof) + } + /// Returns the total and detailed database size pub fn get_db_size(&mut self) -> TaskManagerResult<(usize, Vec<(String, usize)>)> { let rows = self From 4279519fcb0ec688feda811e70284b6a9767f3d1 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Mon, 17 Jun 2024 14:07:27 +0200 Subject: [PATCH 21/44] refactor(host): use merge instead of nest --- host/src/server/api/v2/proof/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/host/src/server/api/v2/proof/mod.rs b/host/src/server/api/v2/proof/mod.rs index fdf822615..4121a4199 100644 --- a/host/src/server/api/v2/proof/mod.rs +++ b/host/src/server/api/v2/proof/mod.rs @@ -18,7 +18,7 @@ pub fn create_docs() -> openapi::OpenApi { pub fn create_router() -> Router { Router::new() - .nest("", get::create_router()) - .nest("", status::create_router()) - .nest("", submit::create_router()) + .merge(get::create_router()) + .merge(status::create_router()) + .merge(submit::create_router()) } From 24f23e2573ee13f37455d6041d9d938942fd51a2 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Tue, 18 Jun 2024 11:37:56 +0200 Subject: [PATCH 22/44] chore(format): format workflow files --- .github/workflows/ci-native.yml | 13 ++++++------- .github/workflows/ci-risc0.yml | 15 +++++++-------- .github/workflows/ci-sgx-all.yml | 26 +++++++++++++------------- .github/workflows/ci-sgx-hardware.yml | 10 +++++----- .github/workflows/ci-sp1.yml | 12 ++++++------ .github/workflows/openapi-deploy.yml | 2 +- 6 files changed, 38 insertions(+), 40 deletions(-) diff --git a/.github/workflows/ci-native.yml b/.github/workflows/ci-native.yml index 4621e7226..0c1ab7874 100644 --- a/.github/workflows/ci-native.yml +++ b/.github/workflows/ci-native.yml @@ -2,11 +2,10 @@ name: CI - Native on: workflow_call - jobs: - build-test-native: - name: Build and test native - uses: ./.github/workflows/ci-build-test-reusable.yml - with: - version_name: "native" - version_toolchain: "nightly-2024-04-17" + build-test-native: + name: Build and test native + uses: ./.github/workflows/ci-build-test-reusable.yml + with: + version_name: "native" + version_toolchain: "nightly-2024-04-17" diff --git a/.github/workflows/ci-risc0.yml b/.github/workflows/ci-risc0.yml index bc0d95f2f..126d106b9 100644 --- a/.github/workflows/ci-risc0.yml +++ b/.github/workflows/ci-risc0.yml @@ -1,6 +1,6 @@ name: CI - RISC0 -on: +on: workflow_call: push: branches: ["main"] @@ -10,11 +10,10 @@ on: paths: - "provers/risc0/**" - jobs: - build-test-risc0: - name: Build and test risc0 - uses: ./.github/workflows/ci-build-test-reusable.yml - with: - version_name: "risc0" - version_toolchain: "stable" + build-test-risc0: + name: Build and test risc0 + uses: ./.github/workflows/ci-build-test-reusable.yml + with: + version_name: "risc0" + version_toolchain: "stable" diff --git a/.github/workflows/ci-sgx-all.yml b/.github/workflows/ci-sgx-all.yml index d08512cf8..e8af4b923 100644 --- a/.github/workflows/ci-sgx-all.yml +++ b/.github/workflows/ci-sgx-all.yml @@ -11,17 +11,17 @@ on: - "provers/sgx/**" jobs: - build-test-sgx: - name: Build and test sgx - uses: ./.github/workflows/ci-build-test-reusable.yml - with: - version_name: "sgx" - version_toolchain: "stable" + build-test-sgx: + name: Build and test sgx + uses: ./.github/workflows/ci-build-test-reusable.yml + with: + version_name: "sgx" + version_toolchain: "stable" - build-test-sgx-docker: - name: Build and test sgx with Docker - uses: ./.github/workflows/ci-sgx-docker.yml - - build-test-sgx-hardware: - name: Build and test sgx in hardware - uses: ./.github/workflows/ci-sgx-hardware.yml + build-test-sgx-docker: + name: Build and test sgx with Docker + uses: ./.github/workflows/ci-sgx-docker.yml + + build-test-sgx-hardware: + name: Build and test sgx in hardware + uses: ./.github/workflows/ci-sgx-hardware.yml diff --git a/.github/workflows/ci-sgx-hardware.yml b/.github/workflows/ci-sgx-hardware.yml index 53c648d8b..6efa67ae8 100644 --- a/.github/workflows/ci-sgx-hardware.yml +++ b/.github/workflows/ci-sgx-hardware.yml @@ -11,7 +11,7 @@ jobs: TARGET: sgx CI: 1 EDMM: 0 - + steps: - uses: actions/checkout@v4 with: @@ -21,15 +21,15 @@ jobs: with: toolchain: stable profile: minimal - + - name: Install cargo-binstall uses: cargo-bins/cargo-binstall@v1.6.4 - + - name: Install sgx run: make install - + - name: Build sgx prover run: make build - + - name: Test sgx prover run: make test diff --git a/.github/workflows/ci-sp1.yml b/.github/workflows/ci-sp1.yml index 6c0654b98..85844908b 100644 --- a/.github/workflows/ci-sp1.yml +++ b/.github/workflows/ci-sp1.yml @@ -11,9 +11,9 @@ on: - "provers/sp1/**" jobs: - build-test-sgx: - name: Build and test sp1 - uses: ./.github/workflows/ci-build-test-reusable.yml - with: - version_name: "sp1" - version_toolchain: "nightly-2024-04-18" + build-test-sgx: + name: Build and test sp1 + uses: ./.github/workflows/ci-build-test-reusable.yml + with: + version_name: "sp1" + version_toolchain: "nightly-2024-04-18" diff --git a/.github/workflows/openapi-deploy.yml b/.github/workflows/openapi-deploy.yml index a27375797..f0354dac0 100644 --- a/.github/workflows/openapi-deploy.yml +++ b/.github/workflows/openapi-deploy.yml @@ -43,7 +43,7 @@ jobs: if: github.ref == 'refs/heads/main' uses: actions/upload-pages-artifact@v2 with: - path: './openapi' + path: "./openapi" - name: Deploy to GitHub Pages if: github.ref == 'refs/heads/main' From caa4714a6a616cd0072c6e89a9e4ca0a39e7cdda Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Tue, 18 Jun 2024 13:31:56 +0200 Subject: [PATCH 23/44] chore(deps): use consistent dependency style --- Cargo.lock | 1 + host/Cargo.toml | 4 ++-- task_manager/Cargo.toml | 9 +++++---- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4e7d96f4e..eb336e30f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5433,6 +5433,7 @@ dependencies = [ "alloy-primitives", "chrono", "num_enum 0.7.2", + "raiko-core", "raiko-lib", "rand 0.9.0-alpha.1", "rand_chacha 0.9.0-alpha.1", diff --git a/host/Cargo.toml b/host/Cargo.toml index c051892cc..939538622 100644 --- a/host/Cargo.toml +++ b/host/Cargo.toml @@ -13,8 +13,8 @@ sgx-prover = { path = "../provers/sgx/prover", optional = true } # raiko raiko-lib = { workspace = true, features = ["c-kzg"] } -raiko-core.workspace = true -raiko-task-manager.workspace = true +raiko-core = { workspace = true } +raiko-task-manager = { workspace = true } # alloy alloy-rlp = { workspace = true } diff --git a/task_manager/Cargo.toml b/task_manager/Cargo.toml index bff2edc5c..32d88ed5c 100644 --- a/task_manager/Cargo.toml +++ b/task_manager/Cargo.toml @@ -5,12 +5,13 @@ authors = ["Mamy Ratsimbazafy "] edition = "2021" # { workspace = true } [dependencies] -raiko-lib.workspace = true +raiko-lib = { workspace = true } +raiko-core = { workspace = true } rusqlite = { workspace = true, features = ["chrono"] } -num_enum.workspace = true +num_enum = { workspace = true } chrono = { workspace = true, features = ["serde"] } -thiserror.workspace = true -serde.workspace = true +thiserror = { workspace = true } +serde = { workspace = true } [dev-dependencies] rand = "0.9.0-alpha.1" # This is an alpha version, that has rng.gen_iter::() From 291a2999dd19a08b6d07c8f4e543f0a94c9fe6d4 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Tue, 18 Jun 2024 13:32:28 +0200 Subject: [PATCH 24/44] chore(host): rename tx to task_channel --- host/src/lib.rs | 4 ++-- host/src/server/api/v2/proof/submit.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/host/src/lib.rs b/host/src/lib.rs index b498519ae..c20f8854b 100644 --- a/host/src/lib.rs +++ b/host/src/lib.rs @@ -134,7 +134,7 @@ pub struct ProverState { pub opts: Cli, pub chain_specs: SupportedChainSpecs, pub task_db: Arc>, - pub tx: mpsc::Sender<(ProofRequest, Cli)>, + pub task_channel: mpsc::Sender<(ProofRequest, Cli)>, } impl ProverState { @@ -174,7 +174,7 @@ impl ProverState { opts, chain_specs, task_db, - tx, + task_channel: tx, }) } } diff --git a/host/src/server/api/v2/proof/submit.rs b/host/src/server/api/v2/proof/submit.rs index 0a535505d..1e8ebadee 100644 --- a/host/src/server/api/v2/proof/submit.rs +++ b/host/src/server/api/v2/proof/submit.rs @@ -46,7 +46,7 @@ async fn submit_handler( proof_request.block_number, proof_request.network ); prover_state - .tx + .task_channel .try_send((proof_request, prover_state.opts)) .map_err(|e| match e { tokio::sync::mpsc::error::TrySendError::Full(_) => HostError::CapacityFull, From 5d41651cb1226ba7ee42d64db4fefd3fbbb52bef Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Tue, 18 Jun 2024 14:20:52 +0200 Subject: [PATCH 25/44] [WIP](task_manager): add initial submit logic --- core/src/preflight.rs | 1 + host/src/interfaces.rs | 10 +++ host/src/lib.rs | 12 +--- host/src/server/api/v2/proof/get.rs | 5 +- host/src/server/api/v2/proof/status.rs | 6 +- host/src/server/api/v2/proof/submit.rs | 88 ++++++++++++++++++++------ lib/src/input.rs | 1 + task_manager/src/lib.rs | 12 ++++ 8 files changed, 103 insertions(+), 32 deletions(-) diff --git a/core/src/preflight.rs b/core/src/preflight.rs index f2ff72abe..55329e841 100644 --- a/core/src/preflight.rs +++ b/core/src/preflight.rs @@ -322,6 +322,7 @@ async fn prepare_taiko_chain_input( block_proposed: proposal_event, prover_data, skip_verify_blob: false, + tx_len: transactions.len() as u64, }) } diff --git a/host/src/interfaces.rs b/host/src/interfaces.rs index b4ec7807a..df620349b 100644 --- a/host/src/interfaces.rs +++ b/host/src/interfaces.rs @@ -2,6 +2,7 @@ use axum::response::IntoResponse; use raiko_core::interfaces::ProofType; use raiko_lib::prover::ProverError; use raiko_task_manager::TaskManagerError; +use tokio::sync::mpsc::error::TrySendError; use utoipa::ToSchema; /// The standardized error returned by the Raiko host. @@ -96,5 +97,14 @@ impl IntoResponse for HostError { } } +impl From> for HostError { + fn from(value: TrySendError) -> Self { + match value { + TrySendError::Full(_) => HostError::CapacityFull, + TrySendError::Closed(_) => HostError::HandleDropped, + } + } +} + /// A type alias for the standardized result type returned by the Raiko host. pub type HostResult = axum::response::Result; diff --git a/host/src/lib.rs b/host/src/lib.rs index c20f8854b..88dc13c8e 100644 --- a/host/src/lib.rs +++ b/host/src/lib.rs @@ -133,7 +133,6 @@ impl Cli { pub struct ProverState { pub opts: Cli, pub chain_specs: SupportedChainSpecs, - pub task_db: Arc>, pub task_channel: mpsc::Sender<(ProofRequest, Cli)>, } @@ -157,14 +156,10 @@ impl ProverState { } } - let db = TaskDb::open_or_create(&opts.sqlite_file)?; - let task_db = Arc::new(Mutex::new(db)); - // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); - - let (tx, mut rx) = mpsc::channel(opts.concurrency_limit); + let (task_channel, mut receiver) = mpsc::channel(opts.concurrency_limit); tokio::spawn(async move { - while let Some(_proof_request_opt) = rx.recv().await { + while let Some(_proof_request_opt) = receiver.recv().await { // TODO:(petar) implement proof request handler here todo!(); } @@ -173,8 +168,7 @@ impl ProverState { Ok(Self { opts, chain_specs, - task_db, - task_channel: tx, + task_channel, }) } } diff --git a/host/src/server/api/v2/proof/get.rs b/host/src/server/api/v2/proof/get.rs index 1ee898a6e..a377d5933 100644 --- a/host/src/server/api/v2/proof/get.rs +++ b/host/src/server/api/v2/proof/get.rs @@ -4,6 +4,7 @@ use axum::{ routing::get, Json, Router, }; +use raiko_task_manager::TaskDb; use utoipa::OpenApi; use crate::{interfaces::HostResult, ProverState}; @@ -23,8 +24,8 @@ async fn get_handler( State(prover_state): State, Path(task_id): Path, ) -> HostResult>> { - let task_db = prover_state.task_db.lock().await; - let mut manager = task_db.manage()?; + let db = TaskDb::open_or_create(&prover_state.opts.sqlite_file)?; + let mut manager = db.manage()?; let status = manager.get_task_proof_by_id(task_id)?; Ok(Json(status)) } diff --git a/host/src/server/api/v2/proof/status.rs b/host/src/server/api/v2/proof/status.rs index a8320a954..4ddab7012 100644 --- a/host/src/server/api/v2/proof/status.rs +++ b/host/src/server/api/v2/proof/status.rs @@ -4,7 +4,7 @@ use axum::{ routing::get, Json, Router, }; -use raiko_task_manager::TaskProvingStatus; +use raiko_task_manager::{TaskDb, TaskProvingStatus}; use utoipa::OpenApi; use crate::{interfaces::HostResult, ProverState}; @@ -24,8 +24,8 @@ async fn status_handler( State(prover_state): State, Path(task_id): Path, ) -> HostResult> { - let task_db = prover_state.task_db.lock().await; - let mut manager = task_db.manage()?; + let db = TaskDb::open_or_create(&prover_state.opts.sqlite_file)?; + let mut manager = db.manage()?; let status = manager.get_task_proving_status_by_id(task_id)?; Ok(Json(status)) } diff --git a/host/src/server/api/v2/proof/submit.rs b/host/src/server/api/v2/proof/submit.rs index 1e8ebadee..3ef8c45fe 100644 --- a/host/src/server/api/v2/proof/submit.rs +++ b/host/src/server/api/v2/proof/submit.rs @@ -1,12 +1,18 @@ use axum::{debug_handler, extract::State, routing::post, Json, Router}; -use raiko_core::interfaces::ProofRequest; +use raiko_core::{interfaces::ProofRequest, provider::rpc::RpcBlockDataProvider, Raiko}; +use raiko_lib::Measurement; +use raiko_task_manager::{EnqueueTaskParams, TaskDb}; use serde_json::Value; use tracing::info; use utoipa::OpenApi; use crate::{ interfaces::{HostError, HostResult}, - metrics::{inc_current_req, inc_guest_req_count, inc_host_req_count}, + memory, + metrics::{ + inc_current_req, inc_guest_req_count, inc_host_req_count, observe_prepare_input_time, + }, + server::api::v1::proof::{get_cached_input, validate_cache_input}, ProverState, }; @@ -45,25 +51,71 @@ async fn submit_handler( "# Generating proof for block {} on {}", proof_request.block_number, proof_request.network ); + + let l1_chain_spec = prover_state + .chain_specs + .get_chain_spec(&proof_request.l1_network.to_string()) + .ok_or_else(|| HostError::InvalidRequestConfig("Unsupported l1 network".to_string()))?; + + let taiko_chain_spec = prover_state + .chain_specs + .get_chain_spec(&proof_request.network.to_string()) + .ok_or_else(|| HostError::InvalidRequestConfig("Unsupported raiko network".to_string()))?; + + let cached_input = get_cached_input( + &prover_state.opts.cache_path, + proof_request.block_number, + &proof_request.network.to_string(), + ); + + let raiko = Raiko::new( + l1_chain_spec.clone(), + taiko_chain_spec.clone(), + proof_request.clone(), + ); + + let provider = RpcBlockDataProvider::new( + &taiko_chain_spec.rpc.clone(), + proof_request.block_number - 1, + )?; + + let input = match validate_cache_input(cached_input, &provider).await { + Ok(cache_input) => cache_input, + Err(_) => { + // no valid cache + memory::reset_stats(); + let measurement = Measurement::start("Generating input...", false); + let input = raiko.generate_input(provider).await?; + let input_time = measurement.stop_with("=> Input generated"); + observe_prepare_input_time(proof_request.block_number, input_time, true); + memory::print_stats("Input generation peak memory used: "); + input + } + }; + + let params = EnqueueTaskParams { + chain_id: l1_chain_spec.chain_id, + proof_system: proof_request.proof_type.clone().into(), + block_number: proof_request.block_number, + submitter: proof_request.prover.to_string(), + blockhash: input.block_hash_reference, + parent_hash: input.block_header_reference.parent_hash, + state_root: input.block_header_reference.state_root, + gas_used: input.block_header_reference.gas_used as u64, + payload: input.taiko.tx_data, + num_transactions: input.taiko.tx_len, + }; + + let db = TaskDb::open_or_create(&prover_state.opts.sqlite_file)?; + // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); + let mut manager = db.manage()?; + prover_state .task_channel - .try_send((proof_request, prover_state.opts)) - .map_err(|e| match e { - tokio::sync::mpsc::error::TrySendError::Full(_) => HostError::CapacityFull, - tokio::sync::mpsc::error::TrySendError::Closed(_) => HostError::HandleDropped, - })?; - let task_db = prover_state.task_db.lock().await; - let mut manager = task_db.manage()?; - #[allow(unreachable_code)] - manager.enqueue_task( - // TODO:(petar) implement task details here - todo!(), - )?; + .try_send((proof_request, prover_state.opts))?; + + manager.enqueue_task(params)?; Ok(Json(serde_json::json!("{}"))) - // handle_proof(prover_state, req).await.map_err(|e| { - // dec_current_req(); - // e - // }) } #[derive(OpenApi)] diff --git a/lib/src/input.rs b/lib/src/input.rs index d0b30e47f..2fa280249 100644 --- a/lib/src/input.rs +++ b/lib/src/input.rs @@ -96,6 +96,7 @@ pub struct TaikoGuestInput { pub prover_data: TaikoProverData, pub tx_blob_hash: Option, pub skip_verify_blob: bool, + pub tx_len: u64, } #[derive(Clone, Default, Debug, Serialize, Deserialize)] diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index 68864e06d..de4032eea 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -160,6 +160,7 @@ use std::{ use chrono::{DateTime, Utc}; use num_enum::{FromPrimitive, IntoPrimitive}; +use raiko_core::interfaces::ProofType; use raiko_lib::primitives::{BlockNumber, ChainId, B256}; use rusqlite::{ Error as SqlError, {named_params, Statement}, {Connection, OpenFlags}, @@ -216,6 +217,17 @@ pub enum TaskProofsys { SGX = 2, } +impl From for TaskProofsys { + fn from(value: ProofType) -> Self { + match value { + ProofType::Sp1 => Self::SP1, + ProofType::Sgx => Self::SGX, + ProofType::Risc0 => Self::Risc0, + ProofType::Native => unreachable!(), + } + } +} + #[allow(non_camel_case_types)] #[rustfmt::skip] #[derive(PartialEq, Debug, Copy, Clone, IntoPrimitive, FromPrimitive, Serialize)] From 5e53b2b3ef0c6d5150f1b4d555828337dbc0a5f1 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Tue, 18 Jun 2024 14:28:50 +0200 Subject: [PATCH 26/44] chore(clippy): remove unused parameter --- host/src/server/api/mod.rs | 2 +- host/src/server/api/v2/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/host/src/server/api/mod.rs b/host/src/server/api/mod.rs index 930f988cf..226ef0a4a 100644 --- a/host/src/server/api/mod.rs +++ b/host/src/server/api/mod.rs @@ -36,7 +36,7 @@ pub fn create_router(concurrency_limit: usize, jwt_secret: Option<&str>) -> Rout let trace = TraceLayer::new_for_http(); let v1_api = v1::create_router(concurrency_limit); - let v2_api = v2::create_router(concurrency_limit); + let v2_api = v2::create_router(); let router = Router::new() .nest("/v1", v1_api) diff --git a/host/src/server/api/v2/mod.rs b/host/src/server/api/v2/mod.rs index f378e9263..993445e1c 100644 --- a/host/src/server/api/v2/mod.rs +++ b/host/src/server/api/v2/mod.rs @@ -58,7 +58,7 @@ pub fn create_docs() -> utoipa::openapi::OpenApi { }) } -pub fn create_router(concurrency_limit: usize) -> Router { +pub fn create_router() -> Router { let docs = create_docs(); Router::new() From 405459e18506f826f1dfccc7b565b5b0abe30aac Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Tue, 18 Jun 2024 14:39:55 +0200 Subject: [PATCH 27/44] chore(clippy): remove unused imports --- host/src/lib.rs | 14 ++++++-------- host/src/server/api/v2/mod.rs | 3 ++- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/host/src/lib.rs b/host/src/lib.rs index 88dc13c8e..5171fe75c 100644 --- a/host/src/lib.rs +++ b/host/src/lib.rs @@ -11,12 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - -pub mod interfaces; -pub mod metrics; -pub mod server; - -use std::{alloc, path::PathBuf, sync::Arc}; +use std::{alloc, path::PathBuf}; use anyhow::Context; use cap::Cap; @@ -26,13 +21,16 @@ use raiko_core::{ merge, }; use raiko_lib::consts::SupportedChainSpecs; -use raiko_task_manager::TaskDb; use serde::{Deserialize, Serialize}; use serde_json::Value; -use tokio::sync::{mpsc, Mutex}; +use tokio::sync::mpsc; use crate::interfaces::HostResult; +pub mod interfaces; +pub mod metrics; +pub mod server; + #[global_allocator] static ALLOCATOR: Cap = Cap::new(alloc::System, usize::MAX); diff --git a/host/src/server/api/v2/mod.rs b/host/src/server/api/v2/mod.rs index 993445e1c..47d9b49ff 100644 --- a/host/src/server/api/v2/mod.rs +++ b/host/src/server/api/v2/mod.rs @@ -1,4 +1,3 @@ -mod proof; use axum::Router; use utoipa::OpenApi; use utoipa_scalar::{Scalar, Servable}; @@ -9,6 +8,8 @@ use crate::{ ProverState, }; +mod proof; + #[derive(OpenApi)] #[openapi( info( From 3e5fc6dceddeae3ccc05f3fd5febda9fddcfa684 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Thu, 20 Jun 2024 15:44:47 +0200 Subject: [PATCH 28/44] refactor(core): add copy trait to proof types --- core/src/interfaces.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/core/src/interfaces.rs b/core/src/interfaces.rs index 466f61b4f..eff8c0b4f 100644 --- a/core/src/interfaces.rs +++ b/core/src/interfaces.rs @@ -73,7 +73,18 @@ impl From for RaikoError { pub type RaikoResult = Result; #[derive( - PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Deserialize, Serialize, ToSchema, Hash, ValueEnum, + PartialEq, + Eq, + PartialOrd, + Ord, + Clone, + Debug, + Deserialize, + Serialize, + ToSchema, + Hash, + ValueEnum, + Copy, )] /// Available proof types. pub enum ProofType { From d4326cbc83fdef9d1a4b198e54a18540e70d8e40 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Thu, 20 Jun 2024 15:45:11 +0200 Subject: [PATCH 29/44] feat(task_manager): simplify db and adapt tests --- Cargo.lock | 1 + host/src/server/api/v2/proof/submit.rs | 64 +-- task_manager/Cargo.toml | 1 + task_manager/src/lib.rs | 629 ++++++++++++------------- task_manager/tests/main.rs | 252 ++++------ 5 files changed, 399 insertions(+), 548 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5d54bdcf1..e9a1632c5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5343,6 +5343,7 @@ dependencies = [ "rand_chacha 0.9.0-alpha.1", "rusqlite", "serde", + "serde_json", "tempfile", "thiserror", ] diff --git a/host/src/server/api/v2/proof/submit.rs b/host/src/server/api/v2/proof/submit.rs index 3ef8c45fe..f7b54410d 100644 --- a/host/src/server/api/v2/proof/submit.rs +++ b/host/src/server/api/v2/proof/submit.rs @@ -1,18 +1,13 @@ use axum::{debug_handler, extract::State, routing::post, Json, Router}; -use raiko_core::{interfaces::ProofRequest, provider::rpc::RpcBlockDataProvider, Raiko}; -use raiko_lib::Measurement; -use raiko_task_manager::{EnqueueTaskParams, TaskDb}; +use raiko_core::interfaces::ProofRequest; +use raiko_task_manager::TaskDb; use serde_json::Value; use tracing::info; use utoipa::OpenApi; use crate::{ interfaces::{HostError, HostResult}, - memory, - metrics::{ - inc_current_req, inc_guest_req_count, inc_host_req_count, observe_prepare_input_time, - }, - server::api::v1::proof::{get_cached_input, validate_cache_input}, + metrics::{inc_current_req, inc_guest_req_count, inc_host_req_count}, ProverState, }; @@ -57,64 +52,15 @@ async fn submit_handler( .get_chain_spec(&proof_request.l1_network.to_string()) .ok_or_else(|| HostError::InvalidRequestConfig("Unsupported l1 network".to_string()))?; - let taiko_chain_spec = prover_state - .chain_specs - .get_chain_spec(&proof_request.network.to_string()) - .ok_or_else(|| HostError::InvalidRequestConfig("Unsupported raiko network".to_string()))?; - - let cached_input = get_cached_input( - &prover_state.opts.cache_path, - proof_request.block_number, - &proof_request.network.to_string(), - ); - - let raiko = Raiko::new( - l1_chain_spec.clone(), - taiko_chain_spec.clone(), - proof_request.clone(), - ); - - let provider = RpcBlockDataProvider::new( - &taiko_chain_spec.rpc.clone(), - proof_request.block_number - 1, - )?; - - let input = match validate_cache_input(cached_input, &provider).await { - Ok(cache_input) => cache_input, - Err(_) => { - // no valid cache - memory::reset_stats(); - let measurement = Measurement::start("Generating input...", false); - let input = raiko.generate_input(provider).await?; - let input_time = measurement.stop_with("=> Input generated"); - observe_prepare_input_time(proof_request.block_number, input_time, true); - memory::print_stats("Input generation peak memory used: "); - input - } - }; - - let params = EnqueueTaskParams { - chain_id: l1_chain_spec.chain_id, - proof_system: proof_request.proof_type.clone().into(), - block_number: proof_request.block_number, - submitter: proof_request.prover.to_string(), - blockhash: input.block_hash_reference, - parent_hash: input.block_header_reference.parent_hash, - state_root: input.block_header_reference.state_root, - gas_used: input.block_header_reference.gas_used as u64, - payload: input.taiko.tx_data, - num_transactions: input.taiko.tx_len, - }; - let db = TaskDb::open_or_create(&prover_state.opts.sqlite_file)?; // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); let mut manager = db.manage()?; prover_state .task_channel - .try_send((proof_request, prover_state.opts))?; + .try_send((proof_request.clone(), prover_state.opts))?; - manager.enqueue_task(params)?; + manager.enqueue_task(l1_chain_spec.chain_id, &proof_request)?; Ok(Json(serde_json::json!("{}"))) } diff --git a/task_manager/Cargo.toml b/task_manager/Cargo.toml index 32d88ed5c..d0e7c50fc 100644 --- a/task_manager/Cargo.toml +++ b/task_manager/Cargo.toml @@ -12,6 +12,7 @@ num_enum = { workspace = true } chrono = { workspace = true, features = ["serde"] } thiserror = { workspace = true } serde = { workspace = true } +serde_json = { workspace = true } [dev-dependencies] rand = "0.9.0-alpha.1" # This is an alpha version, that has rng.gen_iter::() diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index de4032eea..78d981921 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -160,7 +160,7 @@ use std::{ use chrono::{DateTime, Utc}; use num_enum::{FromPrimitive, IntoPrimitive}; -use raiko_core::interfaces::ProofType; +use raiko_core::interfaces::{ProofRequest, ProofType}; use raiko_lib::primitives::{BlockNumber, ChainId, B256}; use rusqlite::{ Error as SqlError, {named_params, Statement}, {Connection, OpenFlags}, @@ -170,12 +170,14 @@ use serde::Serialize; // Types // ---------------------------------------------------------------- -#[derive(PartialEq, Debug, thiserror::Error)] +#[derive(Debug, thiserror::Error)] pub enum TaskManagerError { #[error("IO Error {0}")] IOError(IOErrorKind), #[error("SQL Error {0}")] SqlError(String), + #[error("Serde Error {0}")] + SerdeError(#[from] serde_json::error::Error), } pub type TaskManagerResult = Result; @@ -212,9 +214,10 @@ pub struct TaskManager<'db> { #[derive(Debug, Copy, Clone)] pub enum TaskProofsys { - Risc0 = 0, - SP1 = 1, - SGX = 2, + Native = 0, + Risc0 = 1, + SP1 = 2, + SGX = 3, } impl From for TaskProofsys { @@ -223,7 +226,18 @@ impl From for TaskProofsys { ProofType::Sp1 => Self::SP1, ProofType::Sgx => Self::SGX, ProofType::Risc0 => Self::Risc0, - ProofType::Native => unreachable!(), + ProofType::Native => Self::Native, + } + } +} + +impl From for ProofType { + fn from(val: TaskProofsys) -> Self { + match val { + TaskProofsys::Native => ProofType::Native, + TaskProofsys::Risc0 => ProofType::Risc0, + TaskProofsys::SP1 => ProofType::Sp1, + TaskProofsys::SGX => ProofType::Sgx, } } } @@ -298,116 +312,84 @@ impl TaskDb { r#" -- Metadata and mappings ----------------------------------------------- - CREATE TABLE metadata( - key BLOB UNIQUE NOT NULL PRIMARY KEY, - value BLOB + key BLOB UNIQUE NOT NULL PRIMARY KEY, + value BLOB ); - + INSERT INTO - metadata(key, value) + metadata(key, value) VALUES - ('task_db_version', 0); - + ('task_db_version', 0); + CREATE TABLE proofsys( - id_proofsys INTEGER UNIQUE NOT NULL PRIMARY KEY, - desc TEXT NOT NULL + id INTEGER UNIQUE NOT NULL PRIMARY KEY, + desc TEXT NOT NULL ); - + INSERT INTO - proofsys(id_proofsys, desc) + proofsys(id, desc) VALUES - (0, 'Risc0'), - (1, 'SP1'), - (2, 'SGX'); - + (0, 'Native'), + (1, 'Risc0'), + (2, 'SP1'), + (3, 'SGX'); + CREATE TABLE status_codes( - id_status INTEGER UNIQUE NOT NULL PRIMARY KEY, - desc TEXT NOT NULL + id INTEGER UNIQUE NOT NULL PRIMARY KEY, + desc TEXT NOT NULL ); - + INSERT INTO - status_codes(id_status, desc) + status_codes(id, desc) VALUES - ( 0, 'Success'), - ( 1000, 'Registered'), - ( 2000, 'Work-in-progress'), - (-1000, 'Proof failure (generic)'), - (-1100, 'Proof failure (Out-Of-Memory)'), - (-2000, 'Network failure'), - (-3000, 'Cancelled'), - (-3100, 'Cancelled (never started)'), - (-3200, 'Cancelled (aborted)'), - (-3210, 'Cancellation in progress'), - (-4000, 'Invalid or unsupported block'), - (-9999, 'Unspecified failure reason'); - + (0, 'Success'), + (1000, 'Registered'), + (2000, 'Work-in-progress'), + (-1000, 'Proof failure (generic)'), + (-1100, 'Proof failure (Out-Of-Memory)'), + (-2000, 'Network failure'), + (-3000, 'Cancelled'), + (-3100, 'Cancelled (never started)'), + (-3200, 'Cancelled (aborted)'), + (-3210, 'Cancellation in progress'), + (-4000, 'Invalid or unsupported block'), + (-9999, 'Unspecified failure reason'); + -- Data ----------------------------------------------- - - -- Different blockchains might have the same blockhash in case of a fork - -- for example Ethereum and Ethereum Classic. - -- As "GuestInput" refers to ChainID, the proving task would be different. - CREATE TABLE blocks( - chain_id INTEGER NOT NULL, - blockhash BLOB NOT NULL, - block_number INTEGER NOT NULL, - parent_hash BLOB NOT NULL, - state_root BLOB NOT NULL, - num_transactions INTEGER NOT NULL, - gas_used INTEGER NOT NULL, - PRIMARY KEY (chain_id, blockhash) - ); - -- Notes: -- 1. a blockhash may appear as many times as there are prover backends. - -- 2. For query speed over (chain_id, blockhash, id_proofsys) + -- 2. For query speed over (chain_id, blockhash) -- there is no need to create an index as the UNIQUE constraint -- has an implied index, see: -- - https://sqlite.org/lang_createtable.html#uniqueconst -- - https://www.sqlite.org/fileformat2.html#representation_of_sql_indices CREATE TABLE tasks( - id_task INTEGER UNIQUE NOT NULL PRIMARY KEY, - chain_id INTEGER NOT NULL, - blockhash BLOB NOT NULL, - id_proofsys INTEGER NOT NULL, - FOREIGN KEY(chain_id, blockhash) REFERENCES blocks(chain_id, blockhash) - FOREIGN KEY(id_proofsys) REFERENCES proofsys(id_proofsys) - UNIQUE (chain_id, blockhash, id_proofsys) - ); - - -- Payloads will be very large, just the block would be 1.77MB on L1 in Jan 2024, - -- https://ethresear.ch/t/on-block-sizes-gas-limits-and-scalability/18444 - -- mandating ideally a separated high-performance KV-store to reduce IO. - -- This is without EIP-4844 blobs and the extra input for zkVMs. - CREATE TABLE task_payloads( - id_task INTEGER UNIQUE NOT NULL PRIMARY KEY, - payload BLOB NOT NULL, - FOREIGN KEY(id_task) REFERENCES tasks(id_task) + id INTEGER UNIQUE NOT NULL PRIMARY KEY, + chain_id INTEGER NOT NULL, + block_number INTEGER NOT NULL, + proofsys_id INTEGER NOT NULL, + request BLOB, + FOREIGN KEY(proofsys_id) REFERENCES proofsys(id), + UNIQUE (chain_id, block_number, proofsys_id) ); - + -- Proofs might also be large, so we isolate them in a dedicated table CREATE TABLE task_proofs( - id_task INTEGER UNIQUE NOT NULL PRIMARY KEY, - proof BLOB NOT NULL, - FOREIGN KEY(id_task) REFERENCES tasks(id_task) - ); - - CREATE TABLE thirdparties( - id_thirdparty INTEGER UNIQUE NOT NULL PRIMARY KEY, - thirdparty_desc TEXT UNIQUE NOT NULL + task_id INTEGER UNIQUE NOT NULL PRIMARY KEY, + proof BLOB NOT NULL, + FOREIGN KEY(task_id) REFERENCES tasks(id) ); - + CREATE TABLE task_status( - id_task INTEGER NOT NULL, - id_thirdparty INTEGER, - id_status INTEGER NOT NULL, - timestamp TIMESTAMP DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')) NOT NULL, - FOREIGN KEY(id_task) REFERENCES tasks(id_task) - FOREIGN KEY(id_thirdparty) REFERENCES thirdparties(id_thirdparty) - FOREIGN KEY(id_status) REFERENCES status_codes(id_status) + task_id INTEGER NOT NULL, + status_id INTEGER NOT NULL, + timestamp TIMESTAMP DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')) NOT NULL, + FOREIGN KEY(task_id) REFERENCES tasks(id), + FOREIGN KEY(status_id) REFERENCES status_codes(id), + UNIQUE (task_id, timestamp) ); - "#, )?; @@ -419,49 +401,28 @@ impl TaskDb { conn.execute_batch( r#" CREATE VIEW enqueue_task AS - SELECT - t.id_task, - t.chain_id, - t.blockhash, - t.id_proofsys, - ts.id_status, - ts.id_thirdparty AS submitter, - t3p.thirdparty_desc, - b.block_number, - b.parent_hash, - b.state_root, - b.num_transactions, - b.gas_used, - tpl.payload - FROM - tasks t - LEFT JOIN - blocks b on ( - b.chain_id = t.chain_id - AND b.blockhash = t.blockhash - ) - LEFT JOIN - task_status ts on ts.id_task = t.id_task - LEFT JOIN - task_payloads tpl on tpl.id_task = t.id_task - LEFT JOIN - thirdparties t3p on t3p.id_thirdparty = ts.id_thirdparty; - + SELECT + t.id, + t.chain_id, + t.block_number, + t.proofsys_id, + t.request + FROM + tasks t + LEFT JOIN task_status ts on ts.task_id = t.id; + CREATE VIEW update_task_progress AS - SELECT - t.id_task, - t.chain_id, - t.blockhash, - t.id_proofsys, - ts.id_status, - ts.id_thirdparty AS fulfiller, - tpf.proof - FROM - tasks t - LEFT JOIN - task_status ts on ts.id_task = t.id_task - LEFT JOIN - task_proofs tpf on tpf.id_task = t.id_task; + SELECT + t.id, + t.chain_id, + t.block_number, + t.proofsys_id, + ts.status_id, + tpf.proof + FROM + tasks t + LEFT JOIN task_status ts on ts.task_id = t.id + LEFT JOIN task_proofs tpf on tpf.task_id = t.id; "#, )?; @@ -512,101 +473,130 @@ impl TaskDb { conn.execute_batch( r#" -- PRAGMA temp_store = 'MEMORY'; - - CREATE TEMPORARY TABLE temp.current_task(id_task INTEGER); - - CREATE TEMPORARY TRIGGER enqueue_task_insert_trigger - INSTEAD OF INSERT ON enqueue_task - BEGIN - INSERT INTO blocks(chain_id, blockhash, block_number, parent_hash, state_root, num_transactions, gas_used) - VALUES (new.chain_id, new.blockhash, new.block_number, new.parent_hash, new.state_root, new.num_transactions, new.gas_used); - - INSERT INTO tasks(chain_id, blockhash, id_proofsys) - VALUES (new.chain_id, new.blockhash, new.id_proofsys); - - INSERT INTO current_task - SELECT id_task FROM tasks - WHERE rowid = last_insert_rowid() - LIMIT 1; - - INSERT INTO task_payloads(id_task, payload) - SELECT tmp.id_task, new.payload - FROM current_task tmp - LIMIT 1; - - INSERT OR IGNORE INTO thirdparties(thirdparty_desc) - VALUES (new.submitter); - - -- Tasks are initialized at status 1000 - registered - -- timestamp is auto-filled with datetime('now'), see its field definition - INSERT INTO task_status(id_task, id_thirdparty, id_status) - SELECT tmp.id_task, t3p.id_thirdparty, 1000 - FROM current_task tmp - JOIN thirdparties t3p - WHERE t3p.thirdparty_desc = new.submitter - LIMIT 1; - - DELETE FROM current_task; - END; - - CREATE TEMPORARY TRIGGER update_task_progress_trigger - INSTEAD OF INSERT ON update_task_progress - BEGIN - INSERT INTO current_task - SELECT id_task - FROM tasks - WHERE 1=1 - AND chain_id = new.chain_id - AND blockhash = new.blockhash - AND id_proofsys = new.id_proofsys - LIMIT 1; - - -- If fulfiller is NULL, due to IGNORE and the NOT NULL requirement, - -- table will be left as-is. - INSERT OR IGNORE INTO thirdparties(thirdparty_desc) - VALUES (new.fulfiller); - - -- timestamp is auto-filled with datetime('now'), see its field definition - INSERT INTO task_status(id_task, id_thirdparty, id_status) - SELECT tmp.id_task, t3p.id_thirdparty, new.id_status - FROM current_task tmp - LEFT JOIN thirdparties t3p - -- fulfiller can be NULL, for example - -- for tasks Cancelled before they were ever sent to a prover. - ON t3p.thirdparty_desc = new.fulfiller - LIMIT 1; - - INSERT OR REPLACE INTO task_proofs - SELECT id_task, new.proof - FROM current_task - WHERE new.proof IS NOT NULL - LIMIT 1; - - DELETE FROM current_task; - END; - "#)?; + CREATE TEMPORARY TABLE temp.current_task(task_id INTEGER); + + CREATE TEMPORARY TRIGGER enqueue_task_insert_trigger INSTEAD OF + INSERT + ON enqueue_task + BEGIN + INSERT INTO + tasks(chain_id, block_number, proofsys_id, request) + VALUES + ( + new.chain_id, + new.block_number, + new.proofsys_id, + new.request + ); + + INSERT INTO + current_task + SELECT + id + FROM + tasks + WHERE + rowid = last_insert_rowid() + LIMIT + 1; + + -- Tasks are initialized at status 1000 - registered + -- timestamp is auto-filled with datetime('now'), see its field definition + INSERT INTO + task_status(task_id, status_id) + SELECT + tmp.task_id, + 1000 + FROM + current_task tmp; + + DELETE FROM + current_task; + END; + + CREATE TEMPORARY TRIGGER update_task_progress_trigger INSTEAD OF + INSERT + ON update_task_progress + BEGIN + INSERT INTO + current_task + SELECT + id + FROM + tasks + WHERE + chain_id = new.chain_id + AND block_number = new.block_number + AND proofsys_id = new.proofsys_id + LIMIT + 1; + + -- timestamp is auto-filled with datetime('now'), see its field definition + INSERT INTO + task_status(task_id, status_id) + SELECT + tmp.task_id, + new.status_id + FROM + current_task tmp + LIMIT + 1; + + INSERT + OR REPLACE INTO task_proofs + SELECT + task_id, + new.proof + FROM + current_task + WHERE + new.proof IS NOT NULL + LIMIT + 1; + + DELETE FROM + current_task; + END; + "#, + )?; let enqueue_task = conn.prepare( " - INSERT INTO enqueue_task( - chain_id, blockhash, id_proofsys, submitter, - block_number, parent_hash, state_root, num_transactions, gas_used, - payload) - VALUES ( - :chain_id, :blockhash, :id_proofsys, :submitter, - :block_number, :parent_hash, :state_root, :num_transactions, :gas_used, - :payload); + INSERT INTO + enqueue_task( + chain_id, + block_number, + proofsys_id, + request + ) + VALUES + ( + :chain_id, + :block_number, + :proofsys_id, + :request + ); ", )?; let update_task_progress = conn.prepare( " - INSERT INTO update_task_progress( - chain_id, blockhash, id_proofsys, - fulfiller, id_status, proof) - VALUES ( - :chain_id, :blockhash, :id_proofsys, - :fulfiller, :id_status, :proof); + INSERT INTO + update_task_progress( + chain_id, + block_number, + proofsys_id, + status_id, + proof + ) + VALUES + ( + :chain_id, + :block_number, + :proofsys_id, + :status_id, + :proof + ); ", )?; @@ -618,109 +608,100 @@ impl TaskDb { let get_db_size = conn.prepare( " SELECT - name as table_name, - SUM(pgsize) as table_size - FROM dbstat - GROUP BY table_name - ORDER BY SUM(pgsize) DESC; + name as table_name, + SUM(pgsize) as table_size + FROM + dbstat + GROUP BY + table_name + ORDER BY + SUM(pgsize) DESC; ", )?; let get_task_proof = conn.prepare( " - SELECT proof - FROM task_proofs tp - LEFT JOIN - tasks t ON tp.id_task = t.id_task - WHERE 1=1 - AND t.chain_id = :chain_id - AND t.blockhash = :blockhash - AND t.id_proofsys = :id_proofsys - LIMIT 1; + SELECT + proof + FROM + task_proofs tp + LEFT JOIN tasks t ON tp.task_id = t.id + WHERE + t.chain_id = :chain_id + AND t.block_number = :block_number + AND t.proofsys_id = :proofsys_id + LIMIT + 1; ", )?; let get_task_proof_by_id = conn.prepare( " - SELECT proof - FROM task_proofs tp - LEFT JOIN - tasks t ON tp.id_task = t.id_task - WHERE 1=1 - AND t.id_task = :task_id - LIMIT 1; + SELECT + proof + FROM + task_proofs tp + LEFT JOIN tasks t ON tp.task_id = t.id + WHERE + t.id= :task_id + LIMIT + 1; ", )?; let get_task_proving_status = conn.prepare( " SELECT - t3p.thirdparty_desc, - ts.id_status, - MAX(timestamp) + ts.status_id, + timestamp FROM - task_status ts - LEFT JOIN - tasks t ON ts.id_task = t.id_task - LEFT JOIN - thirdparties t3p ON ts.id_thirdparty = t3p.id_thirdparty - WHERE 1=1 - AND t.chain_id = :chain_id - AND t.blockhash = :blockhash - AND t.id_proofsys = :id_proofsys - GROUP BY - t3p.id_thirdparty + task_status ts + LEFT JOIN tasks t ON ts.task_id = t.id + WHERE + t.chain_id = :chain_id + AND t.block_number = :block_number + AND t.proofsys_id = :proofsys_id ORDER BY - ts.timestamp DESC; + ts.timestamp DESC; ", )?; let get_task_proving_status_by_id = conn.prepare( " SELECT - t3p.thirdparty_desc, - ts.id_status, - MAX(timestamp) + ts.status_id, + timestamp FROM - task_status ts - LEFT JOIN - tasks t ON ts.id_task = t.id_task - LEFT JOIN - thirdparties t3p ON ts.id_thirdparty = t3p.id_thirdparty - WHERE 1=1 - AND t.id_task = :task_id - GROUP BY - t3p.id_thirdparty + task_status ts + LEFT JOIN tasks t ON ts.task_id = t.id + WHERE + t.id = :task_id ORDER BY - ts.timestamp DESC; + ts.timestamp DESC; ", )?; let get_tasks_unfinished = conn.prepare( " SELECT - t.chain_id, - t.blockhash, - t.id_proofsys, - t3p.thirdparty_desc, - ts.id_status, - MAX(timestamp) + t.chain_id, + t.block_number, + t.proofsys_id, + ts.status_id, + timestamp FROM - task_status ts - LEFT JOIN - tasks t ON ts.id_task = t.id_task - LEFT JOIN - thirdparties t3p ON ts.id_thirdparty = t3p.id_thirdparty - WHERE 1=1 - AND id_status NOT IN ( - 0, -- Success - -3000, -- Cancelled - -3100, -- Cancelled (never started) - -3200 -- Cancelled (aborted) - -- What do we do with -4000 Invalid/unsupported blocks? - -- And -9999 Unspecified failure reason? - -- For now we return them until we know more of the failure modes - ); + task_status ts + LEFT JOIN tasks t ON ts.task_id = t.id + WHERE + status_id NOT IN ( + 0, -- Success + -3000, -- Cancelled + -3100, -- Cancelled (never started) + -3200 -- Cancelled (aborted) + -- What do we do with -4000 Invalid/unsupported blocks? + -- And -9999 Unspecified failure reason? + -- For now we return them until we know more of the failure modes + ); ", )?; @@ -751,54 +732,33 @@ pub struct EnqueueTaskParams { pub payload: Vec, } -pub type TaskProvingStatus = Vec<(Option, TaskStatus, DateTime)>; +pub type TaskProvingStatus = Vec<(TaskStatus, DateTime)>; impl<'db> TaskManager<'db> { - pub fn enqueue_task( - &mut self, - EnqueueTaskParams { - chain_id, - blockhash, - proof_system, - submitter, - block_number, - parent_hash, - state_root, - num_transactions, - gas_used, - payload, - }: EnqueueTaskParams, - ) -> TaskManagerResult<()> { + pub fn enqueue_task(&mut self, chain_id: u64, request: &ProofRequest) -> TaskManagerResult<()> { self.enqueue_task.execute(named_params! { ":chain_id": chain_id, - ":blockhash": blockhash.as_slice(), - ":id_proofsys": proof_system as u8, - ":submitter": submitter, - ":block_number": block_number, - ":parent_hash": parent_hash.as_slice(), - ":state_root": state_root.as_slice(), - ":num_transactions": num_transactions, - ":gas_used": gas_used, - ":payload": payload, + ":block_number": request.block_number, + ":proofsys_id": TaskProofsys::from(request.proof_type) as u8, + ":request": serde_json::to_vec(&request)?, })?; + Ok(()) } pub fn update_task_progress( &mut self, chain_id: ChainId, - blockhash: &B256, - proof_system: TaskProofsys, - fulfiller: Option<&str>, + block_number: u64, + proof_type: ProofType, status: TaskStatus, proof: Option<&[u8]>, ) -> TaskManagerResult<()> { self.update_task_progress.execute(named_params! { ":chain_id": chain_id, - ":blockhash": blockhash.as_slice(), - ":id_proofsys": proof_system as u8, - ":fulfiller": fulfiller, - ":id_status": status as i32, + ":block_number": block_number, + ":proofsys_id": TaskProofsys::from(proof_type) as u8, + ":status_id": status as i32, ":proof": proof })?; Ok(()) @@ -808,26 +768,24 @@ impl<'db> TaskManager<'db> { pub fn get_task_proving_status( &mut self, chain_id: ChainId, - blockhash: &B256, - proof_system: TaskProofsys, + block_number: u64, + proof_type: ProofType, ) -> TaskManagerResult { let rows = self.get_task_proving_status.query_map( named_params! { ":chain_id": chain_id, - ":blockhash": blockhash.as_slice(), - ":id_proofsys": proof_system as u8, + ":block_number": block_number, + ":proofsys_id": TaskProofsys::from(proof_type) as u8, }, |row| { Ok(( - row.get::<_, Option>(0)?, - TaskStatus::from(row.get::<_, i32>(1)?), - row.get::<_, DateTime>(2)?, + TaskStatus::from(row.get::<_, i32>(0)?), + row.get::<_, DateTime>(1)?, )) }, )?; - let proving_status = rows.collect::, _>>()?; - Ok(proving_status) + Ok(rows.collect::, _>>()?) } /// Returns the latest triplet (submitter or fulfiller, status, last update time) @@ -841,9 +799,8 @@ impl<'db> TaskManager<'db> { }, |row| { Ok(( - row.get::<_, Option>(0)?, - TaskStatus::from(row.get::<_, i32>(1)?), - row.get::<_, DateTime>(2)?, + TaskStatus::from(row.get::<_, i32>(0)?), + row.get::<_, DateTime>(1)?, )) }, )?; @@ -855,14 +812,14 @@ impl<'db> TaskManager<'db> { pub fn get_task_proof( &mut self, chain_id: ChainId, - blockhash: &B256, - proof_system: TaskProofsys, + block_number: u64, + proof_type: ProofType, ) -> TaskManagerResult> { let proof = self.get_task_proof.query_row( named_params! { ":chain_id": chain_id, - ":blockhash": blockhash.as_slice(), - ":id_proofsys": proof_system as u8, + ":block_number": block_number, + ":proofsys_id": TaskProofsys::from(proof_type) as u8, }, |r| r.get(0), )?; diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index 8ac4823f5..31b61eeaf 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -7,40 +7,41 @@ #[cfg(test)] mod tests { - use std::time::Duration; + use std::{collections::HashMap, time::Duration}; + use alloy_primitives::Address; + use raiko_core::interfaces::{ProofRequest, ProofType}; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; use raiko_lib::primitives::B256; - use raiko_task_manager::{EnqueueTaskParams, TaskDb, TaskProofsys, TaskStatus}; - - fn create_random_task(submitter: String) -> EnqueueTaskParams { - let mut rng = ChaCha8Rng::seed_from_u64(123); + use raiko_task_manager::{TaskDb, TaskStatus}; + fn create_random_task(rng: &mut ChaCha8Rng) -> (u64, ProofRequest) { let chain_id = 100; - let blockhash = B256::random(); - let proof_system = TaskProofsys::Risc0; + let proof_type = match rng.gen_range(0..4) { + 0 => ProofType::Native, + 1 => ProofType::Sgx, + 2 => ProofType::Sp1, + _ => ProofType::Risc0, + }; let block_number = rng.gen_range(1..4_000_000); - let parent_hash = B256::random(); - let state_root = B256::random(); - let num_transactions = rng.gen_range(0..1000); - let gas_used = rng.gen_range(0..100_000_000); - let payload_length = rng.gen_range(1_000_000..10_000_000); - let payload: Vec = (&mut rng).gen_iter::().take(payload_length).collect(); - - EnqueueTaskParams { + let graffiti = B256::random(); + let prover_args = HashMap::new(); + let prover = Address::random(); + + ( chain_id, - blockhash, - proof_system, - submitter, - block_number, - parent_hash, - state_root, - num_transactions, - gas_used, - payload, - } + ProofRequest { + block_number, + network: "network".to_string(), + l1_network: "l1_network".to_string(), + graffiti, + prover, + proof_type, + prover_args, + }, + ) } #[test] @@ -62,40 +63,8 @@ mod tests { // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); let mut tama = db.manage().unwrap(); - tama.enqueue_task(create_random_task("test_enqueue_task".to_owned())) - .unwrap(); - } - - #[test] - fn test_get_db_size() { - // Materialized local DB - let dir = std::env::current_dir().unwrap().join("tests"); - let file = dir.as_path().join("test_get_db_size.sqlite"); - if file.exists() { - std::fs::remove_file(&file).unwrap() - }; - - // // temp dir DB - // use tempfile::tempdir; - // let dir = tempdir().unwrap(); - // let file = dir.path().join("test_get_db_size.sqlite"); - - #[allow(unused_mut)] - let mut db = TaskDb::open_or_create(&file).unwrap(); - // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); - let mut tama = db.manage().unwrap(); - - let mut rng = ChaCha8Rng::seed_from_u64(123); - - for _ in 0..42 { - let submitter = format!("test_get_db_size/{}", rng.gen_range(1..10)); - - tama.enqueue_task(create_random_task(submitter)).unwrap(); - } - - let (db_size, db_tables_size) = tama.get_db_size().unwrap(); - println!("db_tables_size: {:?}", db_tables_size); - assert!(db_size / 1024 / 1024 > 40); + let (chain_id, request) = create_random_task(&mut ChaCha8Rng::seed_from_u64(123)); + tama.enqueue_task(chain_id, &request).unwrap(); } #[test] @@ -123,55 +92,52 @@ mod tests { let mut tasks = vec![]; for _ in 0..5 { - let submitter = format!("test_get_db_size/{}", rng.gen_range(1..10)); - let task = create_random_task(submitter.clone()); + let (chain_id, request) = create_random_task(&mut rng); - tama.enqueue_task(task.clone()).unwrap(); + tama.enqueue_task(chain_id, &request).unwrap(); let task_status = tama - .get_task_proving_status(task.chain_id, &task.blockhash, task.proof_system) + .get_task_proving_status(chain_id, request.block_number, request.proof_type) .unwrap(); assert_eq!(task_status.len(), 1); - let (submitter_name, status, _) = task_status + let (status, _) = task_status .first() .expect("Already confirmed there is exactly 1 element"); - assert_eq!(submitter_name, &Some(submitter.clone())); assert_eq!(status, &TaskStatus::Registered); - tasks.push((task.chain_id, task.blockhash, task.proof_system, submitter)); + tasks.push((chain_id, request.block_number, request.proof_type)); } std::thread::sleep(Duration::from_millis(1)); { + let task_status = tama + .get_task_proving_status(tasks[0].0, tasks[0].1, tasks[0].2) + .unwrap(); + println!("{task_status:?}"); tama.update_task_progress( tasks[0].0, - &tasks[0].1, + tasks[0].1, tasks[0].2, - None, TaskStatus::Cancelled_NeverStarted, None, ) .unwrap(); - { - let task_status = tama - .get_task_proving_status(tasks[0].0, &tasks[0].1, tasks[0].2) - .unwrap(); - assert_eq!(task_status.len(), 2); - assert_eq!(task_status[0].0, None); - assert_eq!(task_status[0].1, TaskStatus::Cancelled_NeverStarted); - assert_eq!(task_status[1].0, Some(tasks[0].3.clone())); - assert_eq!(task_status[1].1, TaskStatus::Registered); - } + let task_status = tama + .get_task_proving_status(tasks[0].0, tasks[0].1, tasks[0].2) + .unwrap(); + println!("{task_status:?}"); + assert_eq!(task_status.len(), 2); + assert_eq!(task_status[0].0, TaskStatus::Cancelled_NeverStarted); + assert_eq!(task_status[1].0, TaskStatus::Registered); } // ----------------------- { tama.update_task_progress( tasks[1].0, - &tasks[1].1, + tasks[1].1, tasks[1].2, - Some("A prover Network"), TaskStatus::WorkInProgress, None, ) @@ -179,22 +145,19 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2) + .get_task_proving_status(tasks[1].0, tasks[1].1, tasks[1].2) .unwrap(); assert_eq!(task_status.len(), 2); - assert_eq!(task_status[0].0, Some(String::from("A prover Network"))); - assert_eq!(task_status[0].1, TaskStatus::WorkInProgress); - assert_eq!(task_status[1].0, Some(tasks[1].3.clone())); - assert_eq!(task_status[1].1, TaskStatus::Registered); + assert_eq!(task_status[0].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[1].0, TaskStatus::Registered); } std::thread::sleep(Duration::from_millis(1)); tama.update_task_progress( tasks[1].0, - &tasks[1].1, + tasks[1].1, tasks[1].2, - Some("A prover Network"), TaskStatus::CancellationInProgress, None, ) @@ -202,22 +165,20 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2) + .get_task_proving_status(tasks[1].0, tasks[1].1, tasks[1].2) .unwrap(); - assert_eq!(task_status.len(), 2); - assert_eq!(task_status[0].0, Some(String::from("A prover Network"))); - assert_eq!(task_status[0].1, TaskStatus::CancellationInProgress); - assert_eq!(task_status[1].0, Some(tasks[1].3.clone())); - assert_eq!(task_status[1].1, TaskStatus::Registered); + assert_eq!(task_status.len(), 3); + assert_eq!(task_status[0].0, TaskStatus::CancellationInProgress); + assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[2].0, TaskStatus::Registered); } std::thread::sleep(Duration::from_millis(1)); tama.update_task_progress( tasks[1].0, - &tasks[1].1, + tasks[1].1, tasks[1].2, - Some("A prover Network"), TaskStatus::Cancelled, None, ) @@ -225,13 +186,13 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[1].0, &tasks[1].1, tasks[1].2) + .get_task_proving_status(tasks[1].0, tasks[1].1, tasks[1].2) .unwrap(); - assert_eq!(task_status.len(), 2); - assert_eq!(task_status[0].0, Some(String::from("A prover Network"))); - assert_eq!(task_status[0].1, TaskStatus::Cancelled); - assert_eq!(task_status[1].0, Some(tasks[1].3.clone())); - assert_eq!(task_status[1].1, TaskStatus::Registered); + assert_eq!(task_status.len(), 4); + assert_eq!(task_status[0].0, TaskStatus::Cancelled); + assert_eq!(task_status[1].0, TaskStatus::CancellationInProgress); + assert_eq!(task_status[2].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[3].0, TaskStatus::Registered); } } @@ -239,9 +200,8 @@ mod tests { { tama.update_task_progress( tasks[2].0, - &tasks[2].1, + tasks[2].1, tasks[2].2, - Some("A based prover"), TaskStatus::WorkInProgress, None, ) @@ -249,13 +209,11 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[2].0, &tasks[2].1, tasks[2].2) + .get_task_proving_status(tasks[2].0, tasks[2].1, tasks[2].2) .unwrap(); assert_eq!(task_status.len(), 2); - assert_eq!(task_status[0].0, Some(String::from("A based prover"))); - assert_eq!(task_status[0].1, TaskStatus::WorkInProgress); - assert_eq!(task_status[1].0, Some(tasks[2].3.clone())); - assert_eq!(task_status[1].1, TaskStatus::Registered); + assert_eq!(task_status[0].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[1].0, TaskStatus::Registered); } std::thread::sleep(Duration::from_millis(1)); @@ -263,9 +221,8 @@ mod tests { let proof: Vec<_> = (&mut rng).gen_iter::().take(128).collect(); tama.update_task_progress( tasks[2].0, - &tasks[2].1, + tasks[2].1, tasks[2].2, - Some("A based prover"), TaskStatus::Success, Some(&proof), ) @@ -273,18 +230,17 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[2].0, &tasks[2].1, tasks[2].2) + .get_task_proving_status(tasks[2].0, tasks[2].1, tasks[2].2) .unwrap(); - assert_eq!(task_status.len(), 2); - assert_eq!(task_status[0].0, Some(String::from("A based prover"))); - assert_eq!(task_status[0].1, TaskStatus::Success); - assert_eq!(task_status[1].0, Some(tasks[2].3.clone())); - assert_eq!(task_status[1].1, TaskStatus::Registered); + assert_eq!(task_status.len(), 3); + assert_eq!(task_status[0].0, TaskStatus::Success); + assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[2].0, TaskStatus::Registered); } assert_eq!( proof, - tama.get_task_proof(tasks[2].0, &tasks[2].1, tasks[2].2) + tama.get_task_proof(tasks[2].0, tasks[2].1, tasks[2].2) .unwrap() ); } @@ -293,9 +249,8 @@ mod tests { { tama.update_task_progress( tasks[3].0, - &tasks[3].1, + tasks[3].1, tasks[3].2, - Some("A flaky prover"), TaskStatus::WorkInProgress, None, ) @@ -303,22 +258,19 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2) + .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].2) .unwrap(); assert_eq!(task_status.len(), 2); - assert_eq!(task_status[0].0, Some(String::from("A flaky prover"))); - assert_eq!(task_status[0].1, TaskStatus::WorkInProgress); - assert_eq!(task_status[1].0, Some(tasks[3].3.clone())); - assert_eq!(task_status[1].1, TaskStatus::Registered); + assert_eq!(task_status[0].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[1].0, TaskStatus::Registered); } std::thread::sleep(Duration::from_millis(1)); tama.update_task_progress( tasks[3].0, - &tasks[3].1, + tasks[3].1, tasks[3].2, - Some("A flaky prover"), TaskStatus::NetworkFailure, None, ) @@ -326,22 +278,20 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2) + .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].2) .unwrap(); - assert_eq!(task_status.len(), 2); - assert_eq!(task_status[0].0, Some(String::from("A flaky prover"))); - assert_eq!(task_status[0].1, TaskStatus::NetworkFailure); - assert_eq!(task_status[1].0, Some(tasks[3].3.clone())); - assert_eq!(task_status[1].1, TaskStatus::Registered); + assert_eq!(task_status.len(), 3); + assert_eq!(task_status[0].0, TaskStatus::NetworkFailure); + assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[2].0, TaskStatus::Registered); } std::thread::sleep(Duration::from_millis(1)); tama.update_task_progress( tasks[3].0, - &tasks[3].1, + tasks[3].1, tasks[3].2, - Some("A based prover"), TaskStatus::WorkInProgress, None, ) @@ -349,15 +299,13 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2) + .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].2) .unwrap(); - assert_eq!(task_status.len(), 3); - assert_eq!(task_status[0].0, Some(String::from("A based prover"))); - assert_eq!(task_status[0].1, TaskStatus::WorkInProgress); - assert_eq!(task_status[1].0, Some(String::from("A flaky prover"))); - assert_eq!(task_status[1].1, TaskStatus::NetworkFailure); - assert_eq!(task_status[2].0, Some(tasks[3].3.clone())); - assert_eq!(task_status[2].1, TaskStatus::Registered); + assert_eq!(task_status.len(), 4); + assert_eq!(task_status[0].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[1].0, TaskStatus::NetworkFailure); + assert_eq!(task_status[2].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[3].0, TaskStatus::Registered); } std::thread::sleep(Duration::from_millis(1)); @@ -365,9 +313,8 @@ mod tests { let proof: Vec<_> = (&mut rng).gen_iter::().take(128).collect(); tama.update_task_progress( tasks[3].0, - &tasks[3].1, + tasks[3].1, tasks[3].2, - Some("A based prover"), TaskStatus::Success, Some(&proof), ) @@ -375,20 +322,19 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[3].0, &tasks[3].1, tasks[3].2) + .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].2) .unwrap(); - assert_eq!(task_status.len(), 3); - assert_eq!(task_status[0].0, Some(String::from("A based prover"))); - assert_eq!(task_status[0].1, TaskStatus::Success); - assert_eq!(task_status[1].0, Some(String::from("A flaky prover"))); - assert_eq!(task_status[1].1, TaskStatus::NetworkFailure); - assert_eq!(task_status[2].0, Some(tasks[3].3.clone())); - assert_eq!(task_status[2].1, TaskStatus::Registered); + assert_eq!(task_status.len(), 5); + assert_eq!(task_status[0].0, TaskStatus::Success); + assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[2].0, TaskStatus::NetworkFailure); + assert_eq!(task_status[3].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[4].0, TaskStatus::Registered); } assert_eq!( proof, - tama.get_task_proof(tasks[3].0, &tasks[3].1, tasks[3].2) + tama.get_task_proof(tasks[3].0, tasks[3].1, tasks[3].2) .unwrap() ); } From 4be8774da517ac912fef18149adc0529ff30bb65 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Thu, 20 Jun 2024 15:55:26 +0200 Subject: [PATCH 30/44] fix(clippy): fix dereference issue --- core/src/interfaces.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/src/interfaces.rs b/core/src/interfaces.rs index eff8c0b4f..795a0d7fb 100644 --- a/core/src/interfaces.rs +++ b/core/src/interfaces.rs @@ -149,7 +149,7 @@ impl ProofType { .await .map_err(|e| e.into()); #[cfg(not(feature = "sp1"))] - Err(RaikoError::FeatureNotSupportedError(self.clone())) + Err(RaikoError::FeatureNotSupportedError(*self)) } ProofType::Risc0 => { #[cfg(feature = "risc0")] @@ -157,7 +157,7 @@ impl ProofType { .await .map_err(|e| e.into()); #[cfg(not(feature = "risc0"))] - Err(RaikoError::FeatureNotSupportedError(self.clone())) + Err(RaikoError::FeatureNotSupportedError(*self)) } ProofType::Sgx => { #[cfg(feature = "sgx")] @@ -165,7 +165,7 @@ impl ProofType { .await .map_err(|e| e.into()); #[cfg(not(feature = "sgx"))] - Err(RaikoError::FeatureNotSupportedError(self.clone())) + Err(RaikoError::FeatureNotSupportedError(*self)) } } } From 5a38b0aa9125f1377e2cc894ccefdcc037cd75b2 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Mon, 24 Jun 2024 11:18:41 +0200 Subject: [PATCH 31/44] [WIP]: handle proof request by worker and update task status --- host/src/lib.rs | 226 +++++++++++++++++++++++-- host/src/server/api/mod.rs | 4 +- host/src/server/api/v1/mod.rs | 6 +- host/src/server/api/v2/proof/submit.rs | 8 +- 4 files changed, 225 insertions(+), 19 deletions(-) diff --git a/host/src/lib.rs b/host/src/lib.rs index 5171fe75c..a21d7819d 100644 --- a/host/src/lib.rs +++ b/host/src/lib.rs @@ -17,15 +17,29 @@ use anyhow::Context; use cap::Cap; use clap::Parser; use raiko_core::{ - interfaces::{ProofRequest, ProofRequestOpt}, + interfaces::{ProofRequest, ProofRequestOpt, RaikoError}, merge, + provider::rpc::RpcBlockDataProvider, + Raiko, }; -use raiko_lib::consts::SupportedChainSpecs; +use raiko_lib::{consts::SupportedChainSpecs, Measurement}; +use raiko_task_manager::{TaskDb, TaskStatus}; use serde::{Deserialize, Serialize}; use serde_json::Value; use tokio::sync::mpsc; - -use crate::interfaces::HostResult; +use tracing::info; + +use crate::{ + interfaces::{HostError, HostResult}, + metrics::{ + inc_guest_error, inc_guest_req_count, inc_guest_success, inc_host_error, + inc_host_req_count, observe_guest_time, observe_prepare_input_time, observe_total_time, + }, + server::api::v1::{ + proof::{get_cached_input, set_cached_input, validate_cache_input}, + ProofResponse, + }, +}; pub mod interfaces; pub mod metrics; @@ -127,11 +141,13 @@ impl Cli { } } +type TaskChannelOpts = (ProofRequest, Cli, SupportedChainSpecs); + #[derive(Debug, Clone)] pub struct ProverState { pub opts: Cli, pub chain_specs: SupportedChainSpecs, - pub task_channel: mpsc::Sender<(ProofRequest, Cli)>, + pub task_channel: mpsc::Sender, } impl ProverState { @@ -154,12 +170,103 @@ impl ProverState { } } - let (task_channel, mut receiver) = mpsc::channel(opts.concurrency_limit); - - tokio::spawn(async move { - while let Some(_proof_request_opt) = receiver.recv().await { - // TODO:(petar) implement proof request handler here - todo!(); + let (task_channel, mut receiver) = mpsc::channel::(opts.concurrency_limit); + + let _spawn = tokio::spawn(async move { + while let Some((proof_request, opts, chain_specs)) = receiver.recv().await { + let proof_request_clone = proof_request.clone(); + let opts_clone = opts.clone(); + let chain_specs_clone = chain_specs.clone(); + + let proof_result: HostResult = async move { + { + let db = TaskDb::open_or_create(&opts_clone.sqlite_file)?; + // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); + let mut manager = db.manage()?; + let taiko_chain_spec = chain_specs_clone + .get_chain_spec(&proof_request_clone.network.to_string()) + .ok_or_else(|| { + HostError::InvalidRequestConfig( + "Unsupported raiko network".to_string(), + ) + })?; + manager.update_task_progress( + taiko_chain_spec.chain_id, + proof_request.block_number, + proof_request.proof_type, + TaskStatus::WorkInProgress, + None, + )?; + } + handle_proof(&proof_request_clone, &opts_clone, &chain_specs_clone).await + } + .await; + match proof_result { + Ok(proof) => { + let _: HostResult<()> = async move { + let db = TaskDb::open_or_create(&opts.sqlite_file)?; + // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); + let mut manager = db.manage()?; + let taiko_chain_spec = chain_specs + .get_chain_spec(&proof_request.network.to_string()) + .ok_or_else(|| { + HostError::InvalidRequestConfig( + "Unsupported raiko network".to_string(), + ) + })?; + let proof = proof.proof.unwrap(); + let proof = proof.as_bytes(); + manager.update_task_progress( + taiko_chain_spec.chain_id, + proof_request.block_number, + proof_request.proof_type, + TaskStatus::WorkInProgress, + Some(proof), + )?; + Ok(()) + } + .await; + } + Err(error) => { + let _: HostResult<()> = async move { + let db = TaskDb::open_or_create(&opts.sqlite_file)?; + // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); + let mut manager = db.manage()?; + let taiko_chain_spec = chain_specs + .get_chain_spec(&proof_request.network.to_string()) + .ok_or_else(|| { + HostError::InvalidRequestConfig( + "Unsupported raiko network".to_string(), + ) + })?; + + manager.update_task_progress( + taiko_chain_spec.chain_id, + proof_request.block_number, + proof_request.proof_type, + match error { + HostError::HandleDropped + | HostError::CapacityFull + | HostError::JoinHandle(_) + | HostError::InvalidAddress(_) + | HostError::InvalidRequestConfig(_) => unreachable!(), + HostError::Conversion(_) + | HostError::Serde(_) + | HostError::Core(_) + | HostError::Anyhow(_) + | HostError::FeatureNotSupportedError(_) + | HostError::Io(_) => TaskStatus::UnspecifiedFailureReason, + HostError::RPC(_) => TaskStatus::NetworkFailure, + HostError::Guest(_) => TaskStatus::ProofFailure_Generic, + HostError::TaskManager(_) => TaskStatus::SqlDbCorruption, + }, + None, + )?; + Ok(()) + } + .await; + } + } } }); @@ -171,6 +278,103 @@ impl ProverState { } } +pub async fn handle_proof( + proof_request: &ProofRequest, + opts: &Cli, + chain_specs: &SupportedChainSpecs, +) -> HostResult { + inc_host_req_count(proof_request.block_number); + inc_guest_req_count(&proof_request.proof_type, proof_request.block_number); + + info!( + "# Generating proof for block {} on {}", + proof_request.block_number, proof_request.network + ); + + // Check for a cached input for the given request config. + let cached_input = get_cached_input( + &opts.cache_path, + proof_request.block_number, + &proof_request.network.to_string(), + ); + + let l1_chain_spec = chain_specs + .get_chain_spec(&proof_request.l1_network.to_string()) + .ok_or_else(|| HostError::InvalidRequestConfig("Unsupported l1 network".to_string()))?; + + let taiko_chain_spec = chain_specs + .get_chain_spec(&proof_request.network.to_string()) + .ok_or_else(|| HostError::InvalidRequestConfig("Unsupported raiko network".to_string()))?; + + // Execute the proof generation. + let total_time = Measurement::start("", false); + + let raiko = Raiko::new( + l1_chain_spec.clone(), + taiko_chain_spec.clone(), + proof_request.clone(), + ); + let provider = RpcBlockDataProvider::new( + &taiko_chain_spec.rpc.clone(), + proof_request.block_number - 1, + )?; + let input = match validate_cache_input(cached_input, &provider).await { + Ok(cache_input) => cache_input, + Err(_) => { + // no valid cache + memory::reset_stats(); + let measurement = Measurement::start("Generating input...", false); + let input = raiko.generate_input(provider).await?; + let input_time = measurement.stop_with("=> Input generated"); + observe_prepare_input_time(proof_request.block_number, input_time, true); + memory::print_stats("Input generation peak memory used: "); + input + } + }; + memory::reset_stats(); + let output = raiko.get_output(&input)?; + memory::print_stats("Guest program peak memory used: "); + + memory::reset_stats(); + let measurement = Measurement::start("Generating proof...", false); + let proof = raiko.prove(input.clone(), &output).await.map_err(|e| { + let total_time = total_time.stop_with("====> Proof generation failed"); + observe_total_time(proof_request.block_number, total_time, false); + match e { + RaikoError::Guest(e) => { + inc_guest_error(&proof_request.proof_type, proof_request.block_number); + HostError::Core(e.into()) + } + e => { + inc_host_error(proof_request.block_number); + e.into() + } + } + })?; + let guest_time = measurement.stop_with("=> Proof generated"); + observe_guest_time( + &proof_request.proof_type, + proof_request.block_number, + guest_time, + true, + ); + memory::print_stats("Prover peak memory used: "); + + inc_guest_success(&proof_request.proof_type, proof_request.block_number); + let total_time = total_time.stop_with("====> Complete proof generated"); + observe_total_time(proof_request.block_number, total_time, true); + + // Cache the input for future use. + set_cached_input( + &opts.cache_path, + proof_request.block_number, + &proof_request.network.to_string(), + &input, + )?; + + ProofResponse::try_from(proof) +} + mod memory { use tracing::debug; diff --git a/host/src/server/api/mod.rs b/host/src/server/api/mod.rs index 226ef0a4a..806698a95 100644 --- a/host/src/server/api/mod.rs +++ b/host/src/server/api/mod.rs @@ -16,8 +16,8 @@ use tower_http::{ use crate::ProverState; -mod v1; -mod v2; +pub mod v1; +pub mod v2; pub fn create_router(concurrency_limit: usize, jwt_secret: Option<&str>) -> Router { let cors = CorsLayer::new() diff --git a/host/src/server/api/v1/mod.rs b/host/src/server/api/v1/mod.rs index e621c35b8..9141b508c 100644 --- a/host/src/server/api/v1/mod.rs +++ b/host/src/server/api/v1/mod.rs @@ -53,11 +53,11 @@ pub struct Docs; pub struct ProofResponse { #[schema(value_type = Option)] /// The output of the prover. - output: Option, + pub output: Option, /// The proof. - proof: Option, + pub proof: Option, /// The quote. - quote: Option, + pub quote: Option, } impl IntoResponse for ProofResponse { diff --git a/host/src/server/api/v2/proof/submit.rs b/host/src/server/api/v2/proof/submit.rs index f7b54410d..eebe66de2 100644 --- a/host/src/server/api/v2/proof/submit.rs +++ b/host/src/server/api/v2/proof/submit.rs @@ -56,9 +56,11 @@ async fn submit_handler( // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); let mut manager = db.manage()?; - prover_state - .task_channel - .try_send((proof_request.clone(), prover_state.opts))?; + prover_state.task_channel.try_send(( + proof_request.clone(), + prover_state.opts, + prover_state.chain_specs, + ))?; manager.enqueue_task(l1_chain_spec.chain_id, &proof_request)?; Ok(Json(serde_json::json!("{}"))) From 0c01e59da53e4af4892c0f01e662cb9a10003ca3 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Mon, 24 Jun 2024 12:03:13 +0200 Subject: [PATCH 32/44] [WIP]: add block fetching and initial blockhash getting for submit --- core/src/provider/mod.rs | 2 ++ core/src/provider/rpc.rs | 10 ++++++++++ host/src/server/api/v2/proof/submit.rs | 20 +++++++++++++++----- 3 files changed, 27 insertions(+), 5 deletions(-) diff --git a/core/src/provider/mod.rs b/core/src/provider/mod.rs index 7f9e974eb..e61f183d4 100644 --- a/core/src/provider/mod.rs +++ b/core/src/provider/mod.rs @@ -10,6 +10,8 @@ pub mod rpc; #[allow(async_fn_in_trait)] pub trait BlockDataProvider { + async fn get_block(&self, block_number: u64) -> RaikoResult; + async fn get_blocks(&self, blocks_to_fetch: &[(u64, bool)]) -> RaikoResult>; async fn get_accounts(&self, accounts: &[Address]) -> RaikoResult>; diff --git a/core/src/provider/rpc.rs b/core/src/provider/rpc.rs index 623d7f5c6..640dceca4 100644 --- a/core/src/provider/rpc.rs +++ b/core/src/provider/rpc.rs @@ -39,6 +39,16 @@ impl RpcBlockDataProvider { } impl BlockDataProvider for RpcBlockDataProvider { + async fn get_block(&self, block_number: u64) -> RaikoResult { + self.client + .request( + "eth_getBlockByNumber", + &(BlockNumberOrTag::from(block_number), true), + ) + .await + .map_err(|_| RaikoError::RPC("Failed calling eth_getBlockByNumber".to_owned())) + } + async fn get_blocks(&self, blocks_to_fetch: &[(u64, bool)]) -> RaikoResult> { let mut all_blocks = Vec::with_capacity(blocks_to_fetch.len()); diff --git a/host/src/server/api/v2/proof/submit.rs b/host/src/server/api/v2/proof/submit.rs index eebe66de2..a7dc20261 100644 --- a/host/src/server/api/v2/proof/submit.rs +++ b/host/src/server/api/v2/proof/submit.rs @@ -1,5 +1,8 @@ use axum::{debug_handler, extract::State, routing::post, Json, Router}; -use raiko_core::interfaces::ProofRequest; +use raiko_core::{ + interfaces::ProofRequest, + provider::{rpc::RpcBlockDataProvider, BlockDataProvider}, +}; use raiko_task_manager::TaskDb; use serde_json::Value; use tracing::info; @@ -47,10 +50,17 @@ async fn submit_handler( proof_request.block_number, proof_request.network ); - let l1_chain_spec = prover_state + let taiko_chain_spec = prover_state .chain_specs - .get_chain_spec(&proof_request.l1_network.to_string()) - .ok_or_else(|| HostError::InvalidRequestConfig("Unsupported l1 network".to_string()))?; + .get_chain_spec(&proof_request.network.to_string()) + .ok_or_else(|| HostError::InvalidRequestConfig("Unsupported taiko network".to_string()))?; + + let provider = RpcBlockDataProvider::new( + &taiko_chain_spec.rpc.clone(), + proof_request.block_number - 1, + )?; + let block = provider.get_block(proof_request.block_number).await?; + let _blockhash = block.header.hash; let db = TaskDb::open_or_create(&prover_state.opts.sqlite_file)?; // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); @@ -62,7 +72,7 @@ async fn submit_handler( prover_state.chain_specs, ))?; - manager.enqueue_task(l1_chain_spec.chain_id, &proof_request)?; + manager.enqueue_task(taiko_chain_spec.chain_id, &proof_request)?; Ok(Json(serde_json::json!("{}"))) } From dc0d2c45eabb4c92fcc0260bcfc8b5077f2e0835 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Mon, 24 Jun 2024 15:28:17 +0200 Subject: [PATCH 33/44] [WIP]: handle task creation, status and proof retrieval --- core/src/provider/mod.rs | 31 +++++++- core/src/provider/rpc.rs | 10 --- host/src/lib.rs | 43 ++++------ .../api/v2/{proof/submit.rs => proof.rs} | 78 ++++++++++++------- host/src/server/api/v2/proof/get.rs | 43 ---------- host/src/server/api/v2/proof/mod.rs | 24 ------ host/src/server/api/v2/proof/status.rs | 43 ---------- task_manager/src/lib.rs | 52 +++++++------ task_manager/tests/main.rs | 68 +++++++++------- 9 files changed, 156 insertions(+), 236 deletions(-) rename host/src/server/api/v2/{proof/submit.rs => proof.rs} (54%) delete mode 100644 host/src/server/api/v2/proof/get.rs delete mode 100644 host/src/server/api/v2/proof/mod.rs delete mode 100644 host/src/server/api/v2/proof/status.rs diff --git a/core/src/provider/mod.rs b/core/src/provider/mod.rs index e61f183d4..9a57cb300 100644 --- a/core/src/provider/mod.rs +++ b/core/src/provider/mod.rs @@ -1,17 +1,20 @@ -use alloy_primitives::{Address, U256}; +use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types::Block; +use raiko_lib::consts::SupportedChainSpecs; use revm::primitives::AccountInfo; use std::collections::HashMap; -use crate::{interfaces::RaikoResult, MerkleProof}; +use crate::{ + interfaces::{RaikoError, RaikoResult}, + provider::rpc::RpcBlockDataProvider, + MerkleProof, +}; pub mod db; pub mod rpc; #[allow(async_fn_in_trait)] pub trait BlockDataProvider { - async fn get_block(&self, block_number: u64) -> RaikoResult; - async fn get_blocks(&self, blocks_to_fetch: &[(u64, bool)]) -> RaikoResult>; async fn get_accounts(&self, accounts: &[Address]) -> RaikoResult>; @@ -26,3 +29,23 @@ pub trait BlockDataProvider { num_storage_proofs: usize, ) -> RaikoResult; } + +pub async fn get_task_data( + network: &str, + block_number: u64, + chain_specs: &SupportedChainSpecs, +) -> RaikoResult<(u64, B256)> { + let taiko_chain_spec = chain_specs + .get_chain_spec(network) + .ok_or_else(|| RaikoError::InvalidRequestConfig("Unsupported raiko network".to_string()))?; + let provider = RpcBlockDataProvider::new(&taiko_chain_spec.rpc.clone(), block_number - 1)?; + let blocks = provider.get_blocks(&[(block_number, true)]).await?; + let block = blocks + .first() + .ok_or_else(|| RaikoError::RPC("No block for requested block number".to_string()))?; + let blockhash = block + .header + .hash + .ok_or_else(|| RaikoError::RPC("No block hash for requested block".to_string()))?; + Ok((taiko_chain_spec.chain_id, blockhash)) +} diff --git a/core/src/provider/rpc.rs b/core/src/provider/rpc.rs index 640dceca4..623d7f5c6 100644 --- a/core/src/provider/rpc.rs +++ b/core/src/provider/rpc.rs @@ -39,16 +39,6 @@ impl RpcBlockDataProvider { } impl BlockDataProvider for RpcBlockDataProvider { - async fn get_block(&self, block_number: u64) -> RaikoResult { - self.client - .request( - "eth_getBlockByNumber", - &(BlockNumberOrTag::from(block_number), true), - ) - .await - .map_err(|_| RaikoError::RPC("Failed calling eth_getBlockByNumber".to_owned())) - } - async fn get_blocks(&self, blocks_to_fetch: &[(u64, bool)]) -> RaikoResult> { let mut all_blocks = Vec::with_capacity(blocks_to_fetch.len()); diff --git a/host/src/lib.rs b/host/src/lib.rs index a21d7819d..50ac284c3 100644 --- a/host/src/lib.rs +++ b/host/src/lib.rs @@ -19,7 +19,7 @@ use clap::Parser; use raiko_core::{ interfaces::{ProofRequest, ProofRequestOpt, RaikoError}, merge, - provider::rpc::RpcBlockDataProvider, + provider::{get_task_data, rpc::RpcBlockDataProvider}, Raiko, }; use raiko_lib::{consts::SupportedChainSpecs, Measurement}; @@ -177,22 +177,22 @@ impl ProverState { let proof_request_clone = proof_request.clone(); let opts_clone = opts.clone(); let chain_specs_clone = chain_specs.clone(); + let (chain_id, blockhash) = get_task_data( + &proof_request.network, + proof_request.block_number, + &chain_specs, + ) + .await + .unwrap(); let proof_result: HostResult = async move { { let db = TaskDb::open_or_create(&opts_clone.sqlite_file)?; // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); let mut manager = db.manage()?; - let taiko_chain_spec = chain_specs_clone - .get_chain_spec(&proof_request_clone.network.to_string()) - .ok_or_else(|| { - HostError::InvalidRequestConfig( - "Unsupported raiko network".to_string(), - ) - })?; manager.update_task_progress( - taiko_chain_spec.chain_id, - proof_request.block_number, + chain_id, + blockhash, proof_request.proof_type, TaskStatus::WorkInProgress, None, @@ -207,18 +207,11 @@ impl ProverState { let db = TaskDb::open_or_create(&opts.sqlite_file)?; // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); let mut manager = db.manage()?; - let taiko_chain_spec = chain_specs - .get_chain_spec(&proof_request.network.to_string()) - .ok_or_else(|| { - HostError::InvalidRequestConfig( - "Unsupported raiko network".to_string(), - ) - })?; let proof = proof.proof.unwrap(); let proof = proof.as_bytes(); manager.update_task_progress( - taiko_chain_spec.chain_id, - proof_request.block_number, + chain_id, + blockhash, proof_request.proof_type, TaskStatus::WorkInProgress, Some(proof), @@ -232,17 +225,9 @@ impl ProverState { let db = TaskDb::open_or_create(&opts.sqlite_file)?; // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); let mut manager = db.manage()?; - let taiko_chain_spec = chain_specs - .get_chain_spec(&proof_request.network.to_string()) - .ok_or_else(|| { - HostError::InvalidRequestConfig( - "Unsupported raiko network".to_string(), - ) - })?; - manager.update_task_progress( - taiko_chain_spec.chain_id, - proof_request.block_number, + chain_id, + blockhash, proof_request.proof_type, match error { HostError::HandleDropped diff --git a/host/src/server/api/v2/proof/submit.rs b/host/src/server/api/v2/proof.rs similarity index 54% rename from host/src/server/api/v2/proof/submit.rs rename to host/src/server/api/v2/proof.rs index a7dc20261..47678c919 100644 --- a/host/src/server/api/v2/proof/submit.rs +++ b/host/src/server/api/v2/proof.rs @@ -1,20 +1,18 @@ use axum::{debug_handler, extract::State, routing::post, Json, Router}; -use raiko_core::{ - interfaces::ProofRequest, - provider::{rpc::RpcBlockDataProvider, BlockDataProvider}, -}; -use raiko_task_manager::TaskDb; +use raiko_core::{interfaces::ProofRequest, provider::get_task_data}; +use raiko_task_manager::{TaskDb, TaskStatus}; use serde_json::Value; use tracing::info; use utoipa::OpenApi; use crate::{ - interfaces::{HostError, HostResult}, + interfaces::HostResult, metrics::{inc_current_req, inc_guest_req_count, inc_host_req_count}, + server::api::v1::ProofResponse, ProverState, }; -#[utoipa::path(post, path = "/proof/submit", +#[utoipa::path(post, path = "/proof", tag = "Proving", request_body = ProofRequestOpt, responses ( @@ -22,7 +20,7 @@ use crate::{ ) )] #[debug_handler(state = ProverState)] -/// Submit a proof task with requested config. +/// Submit a proof task with requested config, get task status or get proof value. /// /// Accepts a proof request and creates a proving task with the specified guest prover. /// The guest provers currently available are: @@ -30,7 +28,7 @@ use crate::{ /// - sgx - uses the sgx environment to construct a block and produce proof of execution /// - sp1 - uses the sp1 prover /// - risc0 - uses the risc0 prover -async fn submit_handler( +async fn proof_handler( State(prover_state): State, Json(req): Json, ) -> HostResult> { @@ -50,34 +48,56 @@ async fn submit_handler( proof_request.block_number, proof_request.network ); - let taiko_chain_spec = prover_state - .chain_specs - .get_chain_spec(&proof_request.network.to_string()) - .ok_or_else(|| HostError::InvalidRequestConfig("Unsupported taiko network".to_string()))?; - - let provider = RpcBlockDataProvider::new( - &taiko_chain_spec.rpc.clone(), - proof_request.block_number - 1, - )?; - let block = provider.get_block(proof_request.block_number).await?; - let _blockhash = block.header.hash; + let (chain_id, block_hash) = get_task_data( + &proof_request.network, + proof_request.block_number, + &prover_state.chain_specs, + ) + .await?; let db = TaskDb::open_or_create(&prover_state.opts.sqlite_file)?; // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); let mut manager = db.manage()?; - prover_state.task_channel.try_send(( - proof_request.clone(), - prover_state.opts, - prover_state.chain_specs, - ))?; + let status = manager.get_task_proving_status(chain_id, block_hash, proof_request.proof_type)?; + + if status.is_empty() { + prover_state.task_channel.try_send(( + proof_request.clone(), + prover_state.opts, + prover_state.chain_specs, + ))?; + + manager.enqueue_task(chain_id, block_hash, &proof_request)?; + return Ok(Json(serde_json::json!("{}"))); + } + + let (status, _) = status.first().unwrap(); + + if matches!(status, TaskStatus::Success) { + let proof = manager.get_task_proof(chain_id, block_hash, proof_request.proof_type)?; + + let response = ProofResponse { + proof: Some(String::from_utf8(proof).unwrap()), + output: None, + quote: None, + }; + + return Ok(Json(serde_json::to_value(response)?)); + } - manager.enqueue_task(taiko_chain_spec.chain_id, &proof_request)?; - Ok(Json(serde_json::json!("{}"))) + Ok(Json(serde_json::json!( + { + "status": "ok", + "data": { + "status": status + } + } + ))) } #[derive(OpenApi)] -#[openapi(paths(submit_handler))] +#[openapi(paths(proof_handler))] struct Docs; pub fn create_docs() -> utoipa::openapi::OpenApi { @@ -85,5 +105,5 @@ pub fn create_docs() -> utoipa::openapi::OpenApi { } pub fn create_router() -> Router { - Router::new().route("/submit", post(submit_handler)) + Router::new().route("/proof", post(proof_handler)) } diff --git a/host/src/server/api/v2/proof/get.rs b/host/src/server/api/v2/proof/get.rs deleted file mode 100644 index a377d5933..000000000 --- a/host/src/server/api/v2/proof/get.rs +++ /dev/null @@ -1,43 +0,0 @@ -use axum::{ - debug_handler, - extract::{Path, State}, - routing::get, - Json, Router, -}; -use raiko_task_manager::TaskDb; -use utoipa::OpenApi; - -use crate::{interfaces::HostResult, ProverState}; - -#[utoipa::path(get, path = "/proof/:task_id", - tag = "Proving", - request_body = ProofRequestOpt, - responses ( - (status = 200, description = "Successfully retrieved a proof", body = Status) - ) -)] -#[debug_handler(state = ProverState)] -/// Get proof for given task id. -/// -/// Accepts a proving task id. -async fn get_handler( - State(prover_state): State, - Path(task_id): Path, -) -> HostResult>> { - let db = TaskDb::open_or_create(&prover_state.opts.sqlite_file)?; - let mut manager = db.manage()?; - let status = manager.get_task_proof_by_id(task_id)?; - Ok(Json(status)) -} - -#[derive(OpenApi)] -#[openapi(paths(get_handler))] -struct Docs; - -pub fn create_docs() -> utoipa::openapi::OpenApi { - Docs::openapi() -} - -pub fn create_router() -> Router { - Router::new().route("/:task_id", get(get_handler)) -} diff --git a/host/src/server/api/v2/proof/mod.rs b/host/src/server/api/v2/proof/mod.rs deleted file mode 100644 index 4121a4199..000000000 --- a/host/src/server/api/v2/proof/mod.rs +++ /dev/null @@ -1,24 +0,0 @@ -use axum::Router; -use utoipa::openapi; - -use crate::ProverState; - -mod get; -mod status; -mod submit; - -pub fn create_docs() -> openapi::OpenApi { - [status::create_docs(), submit::create_docs()] - .into_iter() - .fold(get::create_docs(), |mut doc, sub_doc| { - doc.merge(sub_doc); - doc - }) -} - -pub fn create_router() -> Router { - Router::new() - .merge(get::create_router()) - .merge(status::create_router()) - .merge(submit::create_router()) -} diff --git a/host/src/server/api/v2/proof/status.rs b/host/src/server/api/v2/proof/status.rs deleted file mode 100644 index 4ddab7012..000000000 --- a/host/src/server/api/v2/proof/status.rs +++ /dev/null @@ -1,43 +0,0 @@ -use axum::{ - debug_handler, - extract::{Path, State}, - routing::get, - Json, Router, -}; -use raiko_task_manager::{TaskDb, TaskProvingStatus}; -use utoipa::OpenApi; - -use crate::{interfaces::HostResult, ProverState}; - -#[utoipa::path(get, path = "/proof/status/:task_id", - tag = "Proving", - request_body = ProofRequestOpt, - responses ( - (status = 200, description = "Successfully retrieved proving task status", body = Status) - ) -)] -#[debug_handler(state = ProverState)] -/// Check for a proving task status. -/// -/// Accepts a proving task id. -async fn status_handler( - State(prover_state): State, - Path(task_id): Path, -) -> HostResult> { - let db = TaskDb::open_or_create(&prover_state.opts.sqlite_file)?; - let mut manager = db.manage()?; - let status = manager.get_task_proving_status_by_id(task_id)?; - Ok(Json(status)) -} - -#[derive(OpenApi)] -#[openapi(paths(status_handler))] -struct Docs; - -pub fn create_docs() -> utoipa::openapi::OpenApi { - Docs::openapi() -} - -pub fn create_router() -> Router { - Router::new().route("/status/:task_id", get(status_handler)) -} diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index 78d981921..86d1b2b38 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -161,7 +161,7 @@ use std::{ use chrono::{DateTime, Utc}; use num_enum::{FromPrimitive, IntoPrimitive}; use raiko_core::interfaces::{ProofRequest, ProofType}; -use raiko_lib::primitives::{BlockNumber, ChainId, B256}; +use raiko_lib::primitives::{ChainId, B256}; use rusqlite::{ Error as SqlError, {named_params, Statement}, {Connection, OpenFlags}, }; @@ -368,11 +368,11 @@ impl TaskDb { CREATE TABLE tasks( id INTEGER UNIQUE NOT NULL PRIMARY KEY, chain_id INTEGER NOT NULL, - block_number INTEGER NOT NULL, + blockhash BLOB NOT NULL, proofsys_id INTEGER NOT NULL, request BLOB, FOREIGN KEY(proofsys_id) REFERENCES proofsys(id), - UNIQUE (chain_id, block_number, proofsys_id) + UNIQUE (chain_id, blockhash, proofsys_id) ); -- Proofs might also be large, so we isolate them in a dedicated table @@ -404,7 +404,7 @@ impl TaskDb { SELECT t.id, t.chain_id, - t.block_number, + t.blockhash, t.proofsys_id, t.request FROM @@ -415,7 +415,7 @@ impl TaskDb { SELECT t.id, t.chain_id, - t.block_number, + t.blockhash, t.proofsys_id, ts.status_id, tpf.proof @@ -480,11 +480,11 @@ impl TaskDb { ON enqueue_task BEGIN INSERT INTO - tasks(chain_id, block_number, proofsys_id, request) + tasks(chain_id, blockhash, proofsys_id, request) VALUES ( new.chain_id, - new.block_number, + new.blockhash, new.proofsys_id, new.request ); @@ -526,7 +526,7 @@ impl TaskDb { tasks WHERE chain_id = new.chain_id - AND block_number = new.block_number + AND blockhash = new.blockhash AND proofsys_id = new.proofsys_id LIMIT 1; @@ -565,14 +565,14 @@ impl TaskDb { INSERT INTO enqueue_task( chain_id, - block_number, + blockhash, proofsys_id, request ) VALUES ( :chain_id, - :block_number, + :blockhash, :proofsys_id, :request ); @@ -584,7 +584,7 @@ impl TaskDb { INSERT INTO update_task_progress( chain_id, - block_number, + blockhash, proofsys_id, status_id, proof @@ -592,7 +592,7 @@ impl TaskDb { VALUES ( :chain_id, - :block_number, + :blockhash, :proofsys_id, :status_id, :proof @@ -628,7 +628,7 @@ impl TaskDb { LEFT JOIN tasks t ON tp.task_id = t.id WHERE t.chain_id = :chain_id - AND t.block_number = :block_number + AND t.blockhash = :blockhash AND t.proofsys_id = :proofsys_id LIMIT 1; @@ -659,7 +659,7 @@ impl TaskDb { LEFT JOIN tasks t ON ts.task_id = t.id WHERE t.chain_id = :chain_id - AND t.block_number = :block_number + AND t.blockhash = :blockhash AND t.proofsys_id = :proofsys_id ORDER BY ts.timestamp DESC; @@ -685,7 +685,7 @@ impl TaskDb { " SELECT t.chain_id, - t.block_number, + t.blockhash, t.proofsys_id, ts.status_id, timestamp @@ -724,7 +724,6 @@ pub struct EnqueueTaskParams { pub blockhash: B256, pub proof_system: TaskProofsys, pub submitter: String, - pub block_number: BlockNumber, pub parent_hash: B256, pub state_root: B256, pub num_transactions: u64, @@ -735,10 +734,15 @@ pub struct EnqueueTaskParams { pub type TaskProvingStatus = Vec<(TaskStatus, DateTime)>; impl<'db> TaskManager<'db> { - pub fn enqueue_task(&mut self, chain_id: u64, request: &ProofRequest) -> TaskManagerResult<()> { + pub fn enqueue_task( + &mut self, + chain_id: u64, + blockhash: B256, + request: &ProofRequest, + ) -> TaskManagerResult<()> { self.enqueue_task.execute(named_params! { ":chain_id": chain_id, - ":block_number": request.block_number, + ":blockhash": blockhash.to_vec(), ":proofsys_id": TaskProofsys::from(request.proof_type) as u8, ":request": serde_json::to_vec(&request)?, })?; @@ -749,14 +753,14 @@ impl<'db> TaskManager<'db> { pub fn update_task_progress( &mut self, chain_id: ChainId, - block_number: u64, + blockhash: B256, proof_type: ProofType, status: TaskStatus, proof: Option<&[u8]>, ) -> TaskManagerResult<()> { self.update_task_progress.execute(named_params! { ":chain_id": chain_id, - ":block_number": block_number, + ":blockhash": blockhash.to_vec(), ":proofsys_id": TaskProofsys::from(proof_type) as u8, ":status_id": status as i32, ":proof": proof @@ -768,13 +772,13 @@ impl<'db> TaskManager<'db> { pub fn get_task_proving_status( &mut self, chain_id: ChainId, - block_number: u64, + blockhash: B256, proof_type: ProofType, ) -> TaskManagerResult { let rows = self.get_task_proving_status.query_map( named_params! { ":chain_id": chain_id, - ":block_number": block_number, + ":blockhash": blockhash.to_vec(), ":proofsys_id": TaskProofsys::from(proof_type) as u8, }, |row| { @@ -812,13 +816,13 @@ impl<'db> TaskManager<'db> { pub fn get_task_proof( &mut self, chain_id: ChainId, - block_number: u64, + blockhash: B256, proof_type: ProofType, ) -> TaskManagerResult> { let proof = self.get_task_proof.query_row( named_params! { ":chain_id": chain_id, - ":block_number": block_number, + ":blockhash": blockhash.to_vec(), ":proofsys_id": TaskProofsys::from(proof_type) as u8, }, |r| r.get(0), diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index 31b61eeaf..e4a8144e7 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -17,7 +17,7 @@ mod tests { use raiko_lib::primitives::B256; use raiko_task_manager::{TaskDb, TaskStatus}; - fn create_random_task(rng: &mut ChaCha8Rng) -> (u64, ProofRequest) { + fn create_random_task(rng: &mut ChaCha8Rng) -> (u64, B256, ProofRequest) { let chain_id = 100; let proof_type = match rng.gen_range(0..4) { 0 => ProofType::Native, @@ -26,12 +26,14 @@ mod tests { _ => ProofType::Risc0, }; let block_number = rng.gen_range(1..4_000_000); + let block_hash = B256::random(); let graffiti = B256::random(); let prover_args = HashMap::new(); let prover = Address::random(); ( chain_id, + block_hash, ProofRequest { block_number, network: "network".to_string(), @@ -63,8 +65,9 @@ mod tests { // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); let mut tama = db.manage().unwrap(); - let (chain_id, request) = create_random_task(&mut ChaCha8Rng::seed_from_u64(123)); - tama.enqueue_task(chain_id, &request).unwrap(); + let (chain_id, block_hash, request) = + create_random_task(&mut ChaCha8Rng::seed_from_u64(123)); + tama.enqueue_task(chain_id, block_hash, &request).unwrap(); } #[test] @@ -92,12 +95,12 @@ mod tests { let mut tasks = vec![]; for _ in 0..5 { - let (chain_id, request) = create_random_task(&mut rng); + let (chain_id, block_hash, request) = create_random_task(&mut rng); - tama.enqueue_task(chain_id, &request).unwrap(); + tama.enqueue_task(chain_id, block_hash, &request).unwrap(); let task_status = tama - .get_task_proving_status(chain_id, request.block_number, request.proof_type) + .get_task_proving_status(chain_id, block_hash, request.proof_type) .unwrap(); assert_eq!(task_status.len(), 1); let (status, _) = task_status @@ -105,27 +108,32 @@ mod tests { .expect("Already confirmed there is exactly 1 element"); assert_eq!(status, &TaskStatus::Registered); - tasks.push((chain_id, request.block_number, request.proof_type)); + tasks.push(( + chain_id, + block_hash, + request.block_number, + request.proof_type, + )); } std::thread::sleep(Duration::from_millis(1)); { let task_status = tama - .get_task_proving_status(tasks[0].0, tasks[0].1, tasks[0].2) + .get_task_proving_status(tasks[0].0, tasks[0].1, tasks[0].3) .unwrap(); println!("{task_status:?}"); tama.update_task_progress( tasks[0].0, tasks[0].1, - tasks[0].2, + tasks[0].3, TaskStatus::Cancelled_NeverStarted, None, ) .unwrap(); let task_status = tama - .get_task_proving_status(tasks[0].0, tasks[0].1, tasks[0].2) + .get_task_proving_status(tasks[0].0, tasks[0].1, tasks[0].3) .unwrap(); println!("{task_status:?}"); assert_eq!(task_status.len(), 2); @@ -137,7 +145,7 @@ mod tests { tama.update_task_progress( tasks[1].0, tasks[1].1, - tasks[1].2, + tasks[1].3, TaskStatus::WorkInProgress, None, ) @@ -145,7 +153,7 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[1].0, tasks[1].1, tasks[1].2) + .get_task_proving_status(tasks[1].0, tasks[1].1, tasks[1].3) .unwrap(); assert_eq!(task_status.len(), 2); assert_eq!(task_status[0].0, TaskStatus::WorkInProgress); @@ -157,7 +165,7 @@ mod tests { tama.update_task_progress( tasks[1].0, tasks[1].1, - tasks[1].2, + tasks[1].3, TaskStatus::CancellationInProgress, None, ) @@ -165,7 +173,7 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[1].0, tasks[1].1, tasks[1].2) + .get_task_proving_status(tasks[1].0, tasks[1].1, tasks[1].3) .unwrap(); assert_eq!(task_status.len(), 3); assert_eq!(task_status[0].0, TaskStatus::CancellationInProgress); @@ -178,7 +186,7 @@ mod tests { tama.update_task_progress( tasks[1].0, tasks[1].1, - tasks[1].2, + tasks[1].3, TaskStatus::Cancelled, None, ) @@ -186,7 +194,7 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[1].0, tasks[1].1, tasks[1].2) + .get_task_proving_status(tasks[1].0, tasks[1].1, tasks[1].3) .unwrap(); assert_eq!(task_status.len(), 4); assert_eq!(task_status[0].0, TaskStatus::Cancelled); @@ -201,7 +209,7 @@ mod tests { tama.update_task_progress( tasks[2].0, tasks[2].1, - tasks[2].2, + tasks[2].3, TaskStatus::WorkInProgress, None, ) @@ -209,7 +217,7 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[2].0, tasks[2].1, tasks[2].2) + .get_task_proving_status(tasks[2].0, tasks[2].1, tasks[2].3) .unwrap(); assert_eq!(task_status.len(), 2); assert_eq!(task_status[0].0, TaskStatus::WorkInProgress); @@ -222,7 +230,7 @@ mod tests { tama.update_task_progress( tasks[2].0, tasks[2].1, - tasks[2].2, + tasks[2].3, TaskStatus::Success, Some(&proof), ) @@ -230,7 +238,7 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[2].0, tasks[2].1, tasks[2].2) + .get_task_proving_status(tasks[2].0, tasks[2].1, tasks[2].3) .unwrap(); assert_eq!(task_status.len(), 3); assert_eq!(task_status[0].0, TaskStatus::Success); @@ -240,7 +248,7 @@ mod tests { assert_eq!( proof, - tama.get_task_proof(tasks[2].0, tasks[2].1, tasks[2].2) + tama.get_task_proof(tasks[2].0, tasks[2].1, tasks[2].3) .unwrap() ); } @@ -250,7 +258,7 @@ mod tests { tama.update_task_progress( tasks[3].0, tasks[3].1, - tasks[3].2, + tasks[3].3, TaskStatus::WorkInProgress, None, ) @@ -258,7 +266,7 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].2) + .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].3) .unwrap(); assert_eq!(task_status.len(), 2); assert_eq!(task_status[0].0, TaskStatus::WorkInProgress); @@ -270,7 +278,7 @@ mod tests { tama.update_task_progress( tasks[3].0, tasks[3].1, - tasks[3].2, + tasks[3].3, TaskStatus::NetworkFailure, None, ) @@ -278,7 +286,7 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].2) + .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].3) .unwrap(); assert_eq!(task_status.len(), 3); assert_eq!(task_status[0].0, TaskStatus::NetworkFailure); @@ -291,7 +299,7 @@ mod tests { tama.update_task_progress( tasks[3].0, tasks[3].1, - tasks[3].2, + tasks[3].3, TaskStatus::WorkInProgress, None, ) @@ -299,7 +307,7 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].2) + .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].3) .unwrap(); assert_eq!(task_status.len(), 4); assert_eq!(task_status[0].0, TaskStatus::WorkInProgress); @@ -314,7 +322,7 @@ mod tests { tama.update_task_progress( tasks[3].0, tasks[3].1, - tasks[3].2, + tasks[3].3, TaskStatus::Success, Some(&proof), ) @@ -322,7 +330,7 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].2) + .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].3) .unwrap(); assert_eq!(task_status.len(), 5); assert_eq!(task_status[0].0, TaskStatus::Success); @@ -334,7 +342,7 @@ mod tests { assert_eq!( proof, - tama.get_task_proof(tasks[3].0, tasks[3].1, tasks[3].2) + tama.get_task_proof(tasks[3].0, tasks[3].1, tasks[3].3) .unwrap() ); } From 7f9188735e68968894640d1886d7292457c901ba Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Mon, 24 Jun 2024 15:47:11 +0200 Subject: [PATCH 34/44] fix(host): fix route sub-path --- host/src/server/api/v2/proof.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/host/src/server/api/v2/proof.rs b/host/src/server/api/v2/proof.rs index 47678c919..5b07e8b0f 100644 --- a/host/src/server/api/v2/proof.rs +++ b/host/src/server/api/v2/proof.rs @@ -105,5 +105,5 @@ pub fn create_docs() -> utoipa::openapi::OpenApi { } pub fn create_router() -> Router { - Router::new().route("/proof", post(proof_handler)) + Router::new().route("/", post(proof_handler)) } From ca1c7b028a9a835364390762cfee78453e401e57 Mon Sep 17 00:00:00 2001 From: smtmfft <99081233+smtmfft@users.noreply.github.com> Date: Thu, 27 Jun 2024 17:18:57 +0800 Subject: [PATCH 35/44] feat(raiko): abstract task manager and impl a mem db for easy integration (#296) * impl a mem db for easy integration Signed-off-by: smtmfft * fix clippy and unit test Signed-off-by: smtmfft * fix fmt Signed-off-by: smtmfft --------- Signed-off-by: smtmfft --- Cargo.lock | 2 + core/src/interfaces.rs | 2 + host/src/lib.rs | 28 +- host/src/server/api/v2/proof.rs | 42 +- task_manager/Cargo.toml | 2 + task_manager/src/adv_sqlite.rs | 888 +++++++++++++++++++++++++++++++ task_manager/src/lib.rs | 912 +++++--------------------------- task_manager/src/mem_db.rs | 311 +++++++++++ task_manager/tests/main.rs | 209 ++++++-- 9 files changed, 1532 insertions(+), 864 deletions(-) create mode 100644 task_manager/src/adv_sqlite.rs create mode 100644 task_manager/src/mem_db.rs diff --git a/Cargo.lock b/Cargo.lock index 153ee3e3d..767c368bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5896,6 +5896,7 @@ version = "0.1.0" dependencies = [ "alloy-primitives", "chrono", + "hex", "num_enum 0.7.2", "raiko-core", "raiko-lib", @@ -5906,6 +5907,7 @@ dependencies = [ "serde_json", "tempfile", "thiserror", + "tracing", ] [[package]] diff --git a/core/src/interfaces.rs b/core/src/interfaces.rs index 795a0d7fb..00574217f 100644 --- a/core/src/interfaces.rs +++ b/core/src/interfaces.rs @@ -79,6 +79,7 @@ pub type RaikoResult = Result; Ord, Clone, Debug, + Default, Deserialize, Serialize, ToSchema, @@ -88,6 +89,7 @@ pub type RaikoResult = Result; )] /// Available proof types. pub enum ProofType { + #[default] /// # Native /// /// This builds the block the same way the node does and then runs the result. diff --git a/host/src/lib.rs b/host/src/lib.rs index 218a19708..ac94ee2d9 100644 --- a/host/src/lib.rs +++ b/host/src/lib.rs @@ -14,7 +14,7 @@ use raiko_core::{ Raiko, }; use raiko_lib::{consts::SupportedChainSpecs, Measurement}; -use raiko_task_manager::{TaskDb, TaskStatus}; +use raiko_task_manager::{get_task_manager, TaskManager, TaskManagerOpts, TaskStatus}; use serde::{Deserialize, Serialize}; use serde_json::Value; use tokio::sync::mpsc; @@ -112,6 +112,9 @@ pub struct Cli { #[arg(long, require_equals = true, default_value = "raiko.sqlite")] /// Set the path to the sqlite db file sqlite_file: PathBuf, + + #[arg(long, require_equals = true, default_value = "1048576")] + max_db_size: usize, } impl Cli { @@ -171,16 +174,19 @@ impl ProverState { ) .await .unwrap(); - + let task_manager_opts = &TaskManagerOpts { + sqlite_file: opts.sqlite_file.clone(), + max_db_size: opts.max_db_size, + }; let proof_result: HostResult = async move { { - let db = TaskDb::open_or_create(&opts_clone.sqlite_file)?; - // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); - let mut manager = db.manage()?; + let manager_binding = get_task_manager(task_manager_opts); + let mut manager = manager_binding.lock().unwrap(); manager.update_task_progress( chain_id, blockhash, proof_request.proof_type, + Some(proof_request.prover.to_string()), TaskStatus::WorkInProgress, None, )?; @@ -191,15 +197,15 @@ impl ProverState { match proof_result { Ok(proof) => { let _: HostResult<()> = async move { - let db = TaskDb::open_or_create(&opts.sqlite_file)?; - // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); - let mut manager = db.manage()?; let proof = proof.proof.unwrap(); let proof = proof.as_bytes(); + let manager_binding = get_task_manager(task_manager_opts); + let mut manager = manager_binding.lock().unwrap(); manager.update_task_progress( chain_id, blockhash, proof_request.proof_type, + Some(proof_request.prover.to_string()), TaskStatus::WorkInProgress, Some(proof), )?; @@ -209,13 +215,13 @@ impl ProverState { } Err(error) => { let _: HostResult<()> = async move { - let db = TaskDb::open_or_create(&opts.sqlite_file)?; - // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); - let mut manager = db.manage()?; + let manager_binding = get_task_manager(task_manager_opts); + let mut manager = manager_binding.lock().unwrap(); manager.update_task_progress( chain_id, blockhash, proof_request.proof_type, + Some(proof_request.prover.to_string()), match error { HostError::HandleDropped | HostError::CapacityFull diff --git a/host/src/server/api/v2/proof.rs b/host/src/server/api/v2/proof.rs index 5b07e8b0f..bd864c438 100644 --- a/host/src/server/api/v2/proof.rs +++ b/host/src/server/api/v2/proof.rs @@ -1,6 +1,9 @@ use axum::{debug_handler, extract::State, routing::post, Json, Router}; -use raiko_core::{interfaces::ProofRequest, provider::get_task_data}; -use raiko_task_manager::{TaskDb, TaskStatus}; +use raiko_core::interfaces::ProofRequest; +use raiko_core::provider::get_task_data; +use raiko_task_manager::{ + get_task_manager, EnqueueTaskParams, TaskManager, TaskManagerOpts, TaskStatus, +}; use serde_json::Value; use tracing::info; use utoipa::OpenApi; @@ -55,11 +58,17 @@ async fn proof_handler( ) .await?; - let db = TaskDb::open_or_create(&prover_state.opts.sqlite_file)?; - // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); - let mut manager = db.manage()?; - - let status = manager.get_task_proving_status(chain_id, block_hash, proof_request.proof_type)?; + let manager_binding = get_task_manager(&TaskManagerOpts { + sqlite_file: prover_state.opts.sqlite_file.clone(), + max_db_size: prover_state.opts.max_db_size, + }); + let mut manager = manager_binding.lock().unwrap(); + let status = manager.get_task_proving_status( + chain_id, + block_hash, + proof_request.proof_type, + Some(proof_request.prover.to_string()), + )?; if status.is_empty() { prover_state.task_channel.try_send(( @@ -68,14 +77,25 @@ async fn proof_handler( prover_state.chain_specs, ))?; - manager.enqueue_task(chain_id, block_hash, &proof_request)?; + manager.enqueue_task(&EnqueueTaskParams { + chain_id, + blockhash: block_hash, + proof_system: proof_request.proof_type, + prover: proof_request.prover.to_string(), + block_number: proof_request.block_number, + })?; return Ok(Json(serde_json::json!("{}"))); } - let (status, _) = status.first().unwrap(); + let status = status.first().unwrap().0; if matches!(status, TaskStatus::Success) { - let proof = manager.get_task_proof(chain_id, block_hash, proof_request.proof_type)?; + let proof = manager.get_task_proof( + chain_id, + block_hash, + proof_request.proof_type, + Some(proof_request.prover.to_string()), + )?; let response = ProofResponse { proof: Some(String::from_utf8(proof).unwrap()), @@ -90,7 +110,7 @@ async fn proof_handler( { "status": "ok", "data": { - "status": status + "status": status, } } ))) diff --git a/task_manager/Cargo.toml b/task_manager/Cargo.toml index d0e7c50fc..9b411b776 100644 --- a/task_manager/Cargo.toml +++ b/task_manager/Cargo.toml @@ -13,6 +13,8 @@ chrono = { workspace = true, features = ["serde"] } thiserror = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } +hex = { workspace = true } +tracing = { workspace = true } [dev-dependencies] rand = "0.9.0-alpha.1" # This is an alpha version, that has rng.gen_iter::() diff --git a/task_manager/src/adv_sqlite.rs b/task_manager/src/adv_sqlite.rs new file mode 100644 index 000000000..672c7cec9 --- /dev/null +++ b/task_manager/src/adv_sqlite.rs @@ -0,0 +1,888 @@ +// Raiko +// Copyright (c) 2024 Taiko Labs +// Licensed and distributed under either of +// * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT). +// * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0). +// at your option. This file may not be copied, modified, or distributed except according to those terms. + +//! # Raiko Task Manager +//! +//! At the moment (Apr '24) proving requires a significant amount of time +//! and maintaining a connection with a potentially external party. +//! +//! By design Raiko is stateless, it prepares inputs and forward to the various proof systems. +//! However some proving backend like Risc0's Bonsai are also stateless, +//! and only accepts proofs and return result. +//! Hence to handle crashes, networking losses and restarts, we need to persist +//! the status of proof requests, task submitted, proof received, proof forwarded. +//! +//! In the diagram: +//! _____________ ______________ _______________ +//! Taiko L2 -> | Taiko-geth | ======> | Raiko-host | =========> | Raiko-guests | +//! | Taiko-reth | | | | Risc0 | +//! |____________| |_____________| | SGX | +//! | SP1 | +//! |______________| +//! _____________________________ +//! =========> | Prover Networks | +//! | Risc0's Bonsai | +//! | Succinct's Prover Network | +//! |____________________________| +//! _________________________ +//! =========> | Raiko-dist | +//! | Distributed Risc0 | +//! | Distributed SP1 | +//! |_______________________| +//! +//! We would position Raiko task manager either before Raiko-host or after Raiko-host. +//! +//! ## Implementation +//! +//! The task manager is a set of tables and KV-stores. +//! - Keys for table joins are prefixed with id +//! - KV-stores for (almost) immutable data +//! - KV-store for large inputs and indistinguishable from random proofs +//! - Tables for tasks and their metadata. +//! +//! __________________________ +//! | metadata | +//! |_________________________| A simple KV-store with the DB version for migration/upgrade detection. +//! | Key | Value | Future version may add new fields, without breaking older versions. +//! |_________________|_______| +//! | task_db_version | 0 | +//! |_________________|_______| +//! +//! ________________________ +//! | Proof systems | +//! |______________________| A map: ID -> proof systems +//! | id_proofsys | Desc | +//! |_____________|________| +//! | 0 | Risc0 | (0 for Risc0 and 1 for SP1 is intentional) +//! | 1 | SP1 | +//! | 2 | SGX | +//! |_____________|________| +//! +//! _________________________________________________ +//! | Task Status code | +//! |________________________________________________| +//! | id_status | Desc | +//! |_____________|__________________________________| +//! | 0 | Success | +//! | 1000 | Registered | +//! | 2000 | Work-in-progress | +//! | | | +//! | -1000 | Proof failure (prover - generic) | +//! | -1100 | Proof failure (OOM) | +//! | | | +//! | -2000 | Network failure | +//! | | | +//! | -3000 | Cancelled | +//! | -3100 | Cancelled (never started) | +//! | -3200 | Cancelled (aborted) | +//! | -3210 | Cancellation in progress | (Yes -3210 is intentional ;)) +//! | | | +//! | -4000 | Invalid or unsupported block | +//! | | | +//! | -9999 | Unspecified failure reason | +//! |_____________|__________________________________| +//! +//! Rationale: +//! - Convention, failures use negative status code. +//! - We leave space for new status codes +//! - -X000 status code are for generic failures segregated by failures: +//! on the networking side, the prover side or trying to prove an invalid block. +//! +//! A catchall -9999 error code is provided if a failure is not due to +//! either the network, the prover or the requester invalid block. +//! They should not exist in the DB and a proper analysis +//! and eventually status code should be assigned. +//! +//! ________________________________________________________________________________________________ +//! | Tasks metadata | +//! |________________________________________________________________________________________________| +//! | id_task | chain_id | block_number | blockhash | parent_hash | state_root | # of txs | gas_used | +//! |_________|__________|______________|___________|_____________|____________|__________|__________| +//! ____________________________________ +//! | Task queue | +//! |___________________________________| +//! | id_task | blockhash | id_proofsys | +//! |_________|___________|_____________| +//! ______________________________________ +//! | Task payloads | +//! |_____________________________________| +//! | id_task | inputs (serialized) | +//! |_________|___________________________| +//! _____________________________________ +//! | Task requests | +//! |____________________________________| +//! | id_task | id_submitter | timestamp | +//! |_________|______________|___________| +//! ___________________________________________________________________________________ +//! | Task progress trail | +//! |__________________________________________________________________________________| +//! | id_task | third_party | id_status | timestamp | +//! |_________|________________________|_________________________|_____________________| +//! | 101 | 'Based Proposer" | 1000 (Registered) | 2024-01-01 00:00:01 | +//! | 101 | 'A Prover Network' | 2000 (WIP) | 2024-01-01 00:00:01 | +//! | 101 | 'A Prover Network' | -2000 (Network failure) | 2024-01-01 00:02:00 | +//! | 101 | 'Proof in the Pudding' | 2000 (WIP) | 2024-01-01 00:02:30 | +//!·| 101 | 'Proof in the Pudding' | 0 (Success) | 2024-01-01 01:02:30 | +//! +//! Rationale: +//! - payloads are very large and warrant a dedicated table, with pruning +//! - metadata is useful to audit block building and prover efficiency +//! - Due to failures and retries, we may submit the same task to multiple fulfillers +//! or retry with the same fulfiller so we keep an audit trail of events. +//! +//! ____________________________ +//! | Proof cache | A map: ID -> proof +//! |___________________________| +//! | id_task | proof_value | +//! |__________|________________| A Groth16 proof is 2G₁+1G₂ elements +//! | 0 | 0xabcd...6789 | On BN254: 2*(2*32)+1*(2*2*32) = 256 bytes +//! | 1 | 0x1234...cdef | +//! | ... | ... | A SGX proof is ... +//! |__________|________________| A Stark proof (not wrapped in Groth16) would be several kilobytes +//! +//! Do we need pruning? +//! There are 60s * 60min * 24h * 30j = 2592000s in a month +//! dividing by 12, that's 216000 Ethereum slots. +//! Assuming 1kB of proofs per block (Stark-to-Groth16 Risc0 & SP1 + SGX, SGX size to be verified) +//! That's only 216MB per month. + +// Imports +// ---------------------------------------------------------------- +use std::{ + fs::File, + io::{Error as IOError, ErrorKind as IOErrorKind}, + path::Path, +}; + +use chrono::{DateTime, Utc}; +use num_enum::{FromPrimitive, IntoPrimitive}; +use raiko_core::interfaces::{ProofRequest, ProofType}; +use raiko_lib::primitives::{ChainId, B256}; +use rusqlite::{ + Error as SqlError, {named_params, Statement}, {Connection, OpenFlags}, +}; +use serde::Serialize; + +// Types +// ---------------------------------------------------------------- + +#[derive(Debug, thiserror::Error)] +pub enum TaskManagerError { + #[error("IO Error {0}")] + IOError(IOErrorKind), + #[error("SQL Error {0}")] + SqlError(String), + #[error("Serde Error {0}")] + SerdeError(#[from] serde_json::error::Error), +} + +pub type TaskManagerResult = Result; + +impl From for TaskManagerError { + fn from(error: IOError) -> TaskManagerError { + TaskManagerError::IOError(error.kind()) + } +} + +impl From for TaskManagerError { + fn from(error: SqlError) -> TaskManagerError { + TaskManagerError::SqlError(error.to_string()) + } +} + +#[derive(Debug)] +pub struct TaskDb { + conn: Connection, +} + +#[derive(Debug)] +pub struct TaskManager<'db> { + enqueue_task: Statement<'db>, + update_task_progress: Statement<'db>, + get_task_proof: Statement<'db>, + get_task_proof_by_id: Statement<'db>, + get_task_proving_status: Statement<'db>, + get_task_proving_status_by_id: Statement<'db>, + #[allow(dead_code)] + get_tasks_unfinished: Statement<'db>, + get_db_size: Statement<'db>, +} + +#[derive(Debug, Copy, Clone)] +pub enum TaskProofsys { + Native = 0, + Risc0 = 1, + SP1 = 2, + SGX = 3, +} + +impl From for TaskProofsys { + fn from(value: ProofType) -> Self { + match value { + ProofType::Sp1 => Self::SP1, + ProofType::Sgx => Self::SGX, + ProofType::Risc0 => Self::Risc0, + ProofType::Native => Self::Native, + } + } +} + +impl From for ProofType { + fn from(val: TaskProofsys) -> Self { + match val { + TaskProofsys::Native => ProofType::Native, + TaskProofsys::Risc0 => ProofType::Risc0, + TaskProofsys::SP1 => ProofType::Sp1, + TaskProofsys::SGX => ProofType::Sgx, + } + } +} + +#[allow(non_camel_case_types)] +#[rustfmt::skip] +#[derive(PartialEq, Debug, Copy, Clone, IntoPrimitive, FromPrimitive, Serialize)] +#[repr(i32)] +pub enum TaskStatus { + Success = 0, + Registered = 1000, + WorkInProgress = 2000, + ProofFailure_Generic = -1000, + ProofFailure_OutOfMemory = -1100, + NetworkFailure = -2000, + Cancelled = -3000, + Cancelled_NeverStarted = -3100, + Cancelled_Aborted = -3200, + CancellationInProgress = -3210, + InvalidOrUnsupportedBlock = -4000, + UnspecifiedFailureReason = -9999, + #[num_enum(default)] + SqlDbCorruption = -99999, +} + +// Implementation +// ---------------------------------------------------------------- + +impl TaskDb { + fn open(path: &Path) -> TaskManagerResult { + let conn = Connection::open_with_flags(path, OpenFlags::SQLITE_OPEN_READ_WRITE)?; + conn.pragma_update(None, "foreign_keys", true)?; + conn.pragma_update(None, "locking_mode", "EXCLUSIVE")?; + conn.pragma_update(None, "journal_mode", "WAL")?; + conn.pragma_update(None, "synchronous", "NORMAL")?; + conn.pragma_update(None, "temp_store", "MEMORY")?; + Ok(conn) + } + + fn create(path: &Path) -> TaskManagerResult { + let _file = File::options() + .write(true) + .read(true) + .create_new(true) + .open(path)?; + + let conn = Self::open(path)?; + Self::create_tables(&conn)?; + Self::create_views(&conn)?; + + Ok(conn) + } + + /// Open an existing TaskDb database at "path" + /// If a database does not exist at the path, one is created. + pub fn open_or_create(path: &Path) -> TaskManagerResult { + let conn = if path.exists() { + Self::open(path) + } else { + Self::create(path) + }?; + Ok(Self { conn }) + } + + // SQL + // ---------------------------------------------------------------- + + fn create_tables(conn: &Connection) -> TaskManagerResult<()> { + // Change the task_db_version if backward compatibility is broken + // and introduce a migration on DB opening ... if conserving history is important. + conn.execute_batch( + r#" + -- Metadata and mappings + ----------------------------------------------- + CREATE TABLE metadata( + key BLOB UNIQUE NOT NULL PRIMARY KEY, + value BLOB + ); + + INSERT INTO + metadata(key, value) + VALUES + ('task_db_version', 0); + + CREATE TABLE proofsys( + id INTEGER UNIQUE NOT NULL PRIMARY KEY, + desc TEXT NOT NULL + ); + + INSERT INTO + proofsys(id, desc) + VALUES + (0, 'Native'), + (1, 'Risc0'), + (2, 'SP1'), + (3, 'SGX'); + + CREATE TABLE status_codes( + id INTEGER UNIQUE NOT NULL PRIMARY KEY, + desc TEXT NOT NULL + ); + + INSERT INTO + status_codes(id, desc) + VALUES + (0, 'Success'), + (1000, 'Registered'), + (2000, 'Work-in-progress'), + (-1000, 'Proof failure (generic)'), + (-1100, 'Proof failure (Out-Of-Memory)'), + (-2000, 'Network failure'), + (-3000, 'Cancelled'), + (-3100, 'Cancelled (never started)'), + (-3200, 'Cancelled (aborted)'), + (-3210, 'Cancellation in progress'), + (-4000, 'Invalid or unsupported block'), + (-9999, 'Unspecified failure reason'); + + -- Data + ----------------------------------------------- + -- Notes: + -- 1. a blockhash may appear as many times as there are prover backends. + -- 2. For query speed over (chain_id, blockhash) + -- there is no need to create an index as the UNIQUE constraint + -- has an implied index, see: + -- - https://sqlite.org/lang_createtable.html#uniqueconst + -- - https://www.sqlite.org/fileformat2.html#representation_of_sql_indices + CREATE TABLE tasks( + id INTEGER UNIQUE NOT NULL PRIMARY KEY, + chain_id INTEGER NOT NULL, + blockhash BLOB NOT NULL, + proofsys_id INTEGER NOT NULL, + request BLOB, + FOREIGN KEY(proofsys_id) REFERENCES proofsys(id), + UNIQUE (chain_id, blockhash, proofsys_id) + ); + + -- Proofs might also be large, so we isolate them in a dedicated table + CREATE TABLE task_proofs( + task_id INTEGER UNIQUE NOT NULL PRIMARY KEY, + proof BLOB NOT NULL, + FOREIGN KEY(task_id) REFERENCES tasks(id) + ); + + CREATE TABLE task_status( + task_id INTEGER NOT NULL, + status_id INTEGER NOT NULL, + timestamp TIMESTAMP DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')) NOT NULL, + FOREIGN KEY(task_id) REFERENCES tasks(id), + FOREIGN KEY(status_id) REFERENCES status_codes(id), + UNIQUE (task_id, timestamp) + ); + "#, + )?; + + Ok(()) + } + + fn create_views(conn: &Connection) -> TaskManagerResult<()> { + // By convention, views will use an action verb as name. + conn.execute_batch( + r#" + CREATE VIEW enqueue_task AS + SELECT + t.id, + t.chain_id, + t.blockhash, + t.proofsys_id, + t.request + FROM + tasks t + LEFT JOIN task_status ts on ts.task_id = t.id; + + CREATE VIEW update_task_progress AS + SELECT + t.id, + t.chain_id, + t.blockhash, + t.proofsys_id, + ts.status_id, + tpf.proof + FROM + tasks t + LEFT JOIN task_status ts on ts.task_id = t.id + LEFT JOIN task_proofs tpf on tpf.task_id = t.id; + "#, + )?; + + Ok(()) + } + + /// Set a tracer to debug SQL execution + /// for example: + /// db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); + #[cfg(test)] + pub fn set_tracer(&mut self, trace_fn: Option) { + self.conn.trace(trace_fn); + } + + pub fn manage(&self) -> TaskManagerResult> { + // To update all the tables with the task_id assigned by Sqlite + // we require row IDs for the tasks table + // and we use last_insert_rowid() which is not reentrant and need a transaction lock + // and store them in a temporary table, configured to be in-memory. + // + // Alternative approaches considered: + // 1. Sqlite does not support variables (because it's embedded and significantly less overhead than other SQL "Client-Server" DBs). + // 2. using AUTOINCREMENT and/or the sqlite_sequence table + // - sqlite recommends not using AUTOINCREMENT for performance + // https://www.sqlite.org/autoinc.html + // 3. INSERT INTO ... RETURNING nested in a WITH clause (CTE / Common Table Expression) + // - Sqlite can only do RETURNING to the application, it cannot be nested in another query or diverted to another table + // https://sqlite.org/lang_returning.html#limitations_and_caveats + // 4. CREATE TEMPORARY TABLE AS with an INSERT INTO ... RETURNING nested + // - Same limitation AND CREATE TABLEAS seems to only support SELECT statements (but if we could nest RETURNING we can workaround that + // https://www.sqlite.org/lang_createtable.html#create_table_as_select_statements + // + // Hence we have to use row IDs and last_insert_rowid() + // + // Furthermore we use a view and an INSTEAD OF trigger to update the tables, + // the alternative being + // + // 5. Direct insert into tables + // This does not work as SQLite `execute` and `prepare` + // only process the first statement. + // + // And lastly, we need the view and trigger to be temporary because + // otherwise they can't access the temporary table: + // 6. https://sqlite.org/forum/info/4f998eeec510bceee69404541e5c9ca0a301868d59ec7c3486ecb8084309bba1 + // "Triggers in any schema other than temp may only access objects in their own schema. However, triggers in temp may access any object by name, even cross-schema." + + let conn = &self.conn; + conn.execute_batch( + r#" + -- PRAGMA temp_store = 'MEMORY'; + CREATE TEMPORARY TABLE temp.current_task(task_id INTEGER); + + CREATE TEMPORARY TRIGGER enqueue_task_insert_trigger INSTEAD OF + INSERT + ON enqueue_task + BEGIN + INSERT INTO + tasks(chain_id, blockhash, proofsys_id, request) + VALUES + ( + new.chain_id, + new.blockhash, + new.proofsys_id, + new.request + ); + + INSERT INTO + current_task + SELECT + id + FROM + tasks + WHERE + rowid = last_insert_rowid() + LIMIT + 1; + + -- Tasks are initialized at status 1000 - registered + -- timestamp is auto-filled with datetime('now'), see its field definition + INSERT INTO + task_status(task_id, status_id) + SELECT + tmp.task_id, + 1000 + FROM + current_task tmp; + + DELETE FROM + current_task; + END; + + CREATE TEMPORARY TRIGGER update_task_progress_trigger INSTEAD OF + INSERT + ON update_task_progress + BEGIN + INSERT INTO + current_task + SELECT + id + FROM + tasks + WHERE + chain_id = new.chain_id + AND blockhash = new.blockhash + AND proofsys_id = new.proofsys_id + LIMIT + 1; + + -- timestamp is auto-filled with datetime('now'), see its field definition + INSERT INTO + task_status(task_id, status_id) + SELECT + tmp.task_id, + new.status_id + FROM + current_task tmp + LIMIT + 1; + + INSERT + OR REPLACE INTO task_proofs + SELECT + task_id, + new.proof + FROM + current_task + WHERE + new.proof IS NOT NULL + LIMIT + 1; + + DELETE FROM + current_task; + END; + "#, + )?; + + let enqueue_task = conn.prepare( + " + INSERT INTO + enqueue_task( + chain_id, + blockhash, + proofsys_id, + request + ) + VALUES + ( + :chain_id, + :blockhash, + :proofsys_id, + :request + ); + ", + )?; + + let update_task_progress = conn.prepare( + " + INSERT INTO + update_task_progress( + chain_id, + blockhash, + proofsys_id, + status_id, + proof + ) + VALUES + ( + :chain_id, + :blockhash, + :proofsys_id, + :status_id, + :proof + ); + ", + )?; + + // The requires sqlite to be compiled with dbstat support: + // https://www.sqlite.org/dbstat.html + // which is the case for rusqlite + // https://github.com/rusqlite/rusqlite/blob/v0.31.0/libsqlite3-sys/build.rs#L126 + // but may not be the case for system-wide sqlite when debugging. + let get_db_size = conn.prepare( + " + SELECT + name as table_name, + SUM(pgsize) as table_size + FROM + dbstat + GROUP BY + table_name + ORDER BY + SUM(pgsize) DESC; + ", + )?; + + let get_task_proof = conn.prepare( + " + SELECT + proof + FROM + task_proofs tp + LEFT JOIN tasks t ON tp.task_id = t.id + WHERE + t.chain_id = :chain_id + AND t.blockhash = :blockhash + AND t.proofsys_id = :proofsys_id + LIMIT + 1; + ", + )?; + + let get_task_proof_by_id = conn.prepare( + " + SELECT + proof + FROM + task_proofs tp + LEFT JOIN tasks t ON tp.task_id = t.id + WHERE + t.id= :task_id + LIMIT + 1; + ", + )?; + + let get_task_proving_status = conn.prepare( + " + SELECT + ts.status_id, + timestamp + FROM + task_status ts + LEFT JOIN tasks t ON ts.task_id = t.id + WHERE + t.chain_id = :chain_id + AND t.blockhash = :blockhash + AND t.proofsys_id = :proofsys_id + ORDER BY + ts.timestamp; + ", + )?; + + let get_task_proving_status_by_id = conn.prepare( + " + SELECT + ts.status_id, + timestamp + FROM + task_status ts + LEFT JOIN tasks t ON ts.task_id = t.id + WHERE + t.id = :task_id + ORDER BY + ts.timestamp; + ", + )?; + + let get_tasks_unfinished = conn.prepare( + " + SELECT + t.chain_id, + t.blockhash, + t.proofsys_id, + ts.status_id, + timestamp + FROM + task_status ts + LEFT JOIN tasks t ON ts.task_id = t.id + WHERE + status_id NOT IN ( + 0, -- Success + -3000, -- Cancelled + -3100, -- Cancelled (never started) + -3200 -- Cancelled (aborted) + -- What do we do with -4000 Invalid/unsupported blocks? + -- And -9999 Unspecified failure reason? + -- For now we return them until we know more of the failure modes + ); + ", + )?; + + Ok(TaskManager { + enqueue_task, + update_task_progress, + get_task_proof, + get_task_proof_by_id, + get_task_proving_status, + get_task_proving_status_by_id, + get_tasks_unfinished, + get_db_size, + }) + } +} + +#[derive(Debug, Clone)] +pub struct EnqueueTaskParams { + pub chain_id: ChainId, + pub blockhash: B256, + pub proof_system: TaskProofsys, + pub submitter: String, + pub parent_hash: B256, + pub state_root: B256, + pub num_transactions: u64, + pub gas_used: u64, + pub payload: Vec, +} + +pub type TaskProvingStatus = Vec<(TaskStatus, DateTime)>; + +impl<'db> TaskManager<'db> { + pub fn enqueue_task( + &mut self, + chain_id: u64, + blockhash: B256, + request: &ProofRequest, + ) -> TaskManagerResult<()> { + self.enqueue_task.execute(named_params! { + ":chain_id": chain_id, + ":blockhash": blockhash.to_vec(), + ":proofsys_id": TaskProofsys::from(request.proof_type) as u8, + ":request": serde_json::to_vec(&request)?, + })?; + + Ok(()) + } + + pub fn update_task_progress( + &mut self, + chain_id: ChainId, + blockhash: B256, + proof_type: ProofType, + status: TaskStatus, + proof: Option<&[u8]>, + ) -> TaskManagerResult<()> { + self.update_task_progress.execute(named_params! { + ":chain_id": chain_id, + ":blockhash": blockhash.to_vec(), + ":proofsys_id": TaskProofsys::from(proof_type) as u8, + ":status_id": status as i32, + ":proof": proof + })?; + Ok(()) + } + + /// Returns the latest triplet (submitter or fulfiller, status, last update time) + pub fn get_task_proving_status( + &mut self, + chain_id: ChainId, + blockhash: B256, + proof_type: ProofType, + ) -> TaskManagerResult { + let rows = self.get_task_proving_status.query_map( + named_params! { + ":chain_id": chain_id, + ":blockhash": blockhash.to_vec(), + ":proofsys_id": TaskProofsys::from(proof_type) as u8, + }, + |row| { + Ok(( + TaskStatus::from(row.get::<_, i32>(0)?), + row.get::<_, DateTime>(1)?, + )) + }, + )?; + + Ok(rows.collect::, _>>()?) + } + + /// Returns the latest triplet (submitter or fulfiller, status, last update time) + pub fn get_task_proving_status_by_id( + &mut self, + task_id: u64, + ) -> TaskManagerResult { + let rows = self.get_task_proving_status_by_id.query_map( + named_params! { + ":task_id": task_id, + }, + |row| { + Ok(( + TaskStatus::from(row.get::<_, i32>(0)?), + row.get::<_, DateTime>(1)?, + )) + }, + )?; + let proving_status = rows.collect::, _>>()?; + + Ok(proving_status) + } + + pub fn get_task_proof( + &mut self, + chain_id: ChainId, + blockhash: B256, + proof_type: ProofType, + ) -> TaskManagerResult> { + let proof = self.get_task_proof.query_row( + named_params! { + ":chain_id": chain_id, + ":blockhash": blockhash.to_vec(), + ":proofsys_id": TaskProofsys::from(proof_type) as u8, + }, + |r| r.get(0), + )?; + + Ok(proof) + } + + pub fn get_task_proof_by_id(&mut self, task_id: u64) -> TaskManagerResult> { + let proof = self.get_task_proof_by_id.query_row( + named_params! { + ":task_id": task_id, + }, + |r| r.get(0), + )?; + + Ok(proof) + } + + /// Returns the total and detailed database size + pub fn get_db_size(&mut self) -> TaskManagerResult<(usize, Vec<(String, usize)>)> { + let rows = self + .get_db_size + .query_map([], |row| Ok((row.get(0)?, row.get(1)?)))?; + let details = rows.collect::, _>>()?; + let total = details.iter().fold(0, |acc, item| acc + item.1); + Ok((total, details)) + } +} + +#[cfg(test)] +mod tests { + // We only test private functions here. + // Public API will be tested in a dedicated tests folder + + use super::*; + use tempfile::tempdir; + + #[test] + fn error_on_missing() { + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + assert!(TaskDb::open(&file).is_err()); + } + + #[test] + fn ensure_exclusive() { + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + + let _db = TaskDb::create(&file).unwrap(); + assert!(TaskDb::open(&file).is_err()); + } + + #[test] + fn ensure_unicity() { + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + + let _db = TaskDb::create(&file).unwrap(); + assert!(TaskDb::create(&file).is_err()); + } +} diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index 86d1b2b38..eadf99a58 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -1,183 +1,25 @@ -// Raiko -// Copyright (c) 2024 Taiko Labs -// Licensed and distributed under either of -// * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT). -// * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0). -// at your option. This file may not be copied, modified, or distributed except according to those terms. - -//! # Raiko Task Manager -//! -//! At the moment (Apr '24) proving requires a significant amount of time -//! and maintaining a connection with a potentially external party. -//! -//! By design Raiko is stateless, it prepares inputs and forward to the various proof systems. -//! However some proving backend like Risc0's Bonsai are also stateless, -//! and only accepts proofs and return result. -//! Hence to handle crashes, networking losses and restarts, we need to persist -//! the status of proof requests, task submitted, proof received, proof forwarded. -//! -//! In the diagram: -//! _____________ ______________ _______________ -//! Taiko L2 -> | Taiko-geth | ======> | Raiko-host | =========> | Raiko-guests | -//! | Taiko-reth | | | | Risc0 | -//! |____________| |_____________| | SGX | -//! | SP1 | -//! |______________| -//! _____________________________ -//! =========> | Prover Networks | -//! | Risc0's Bonsai | -//! | Succinct's Prover Network | -//! |____________________________| -//! _________________________ -//! =========> | Raiko-dist | -//! | Distributed Risc0 | -//! | Distributed SP1 | -//! |_______________________| -//! -//! We would position Raiko task manager either before Raiko-host or after Raiko-host. -//! -//! ## Implementation -//! -//! The task manager is a set of tables and KV-stores. -//! - Keys for table joins are prefixed with id -//! - KV-stores for (almost) immutable data -//! - KV-store for large inputs and indistinguishable from random proofs -//! - Tables for tasks and their metadata. -//! -//! __________________________ -//! | metadata | -//! |_________________________| A simple KV-store with the DB version for migration/upgrade detection. -//! | Key | Value | Future version may add new fields, without breaking older versions. -//! |_________________|_______| -//! | task_db_version | 0 | -//! |_________________|_______| -//! -//! ________________________ -//! | Proof systems | -//! |______________________| A map: ID -> proof systems -//! | id_proofsys | Desc | -//! |_____________|________| -//! | 0 | Risc0 | (0 for Risc0 and 1 for SP1 is intentional) -//! | 1 | SP1 | -//! | 2 | SGX | -//! |_____________|________| -//! -//! _________________________________________________ -//! | Task Status code | -//! |________________________________________________| -//! | id_status | Desc | -//! |_____________|__________________________________| -//! | 0 | Success | -//! | 1000 | Registered | -//! | 2000 | Work-in-progress | -//! | | | -//! | -1000 | Proof failure (prover - generic) | -//! | -1100 | Proof failure (OOM) | -//! | | | -//! | -2000 | Network failure | -//! | | | -//! | -3000 | Cancelled | -//! | -3100 | Cancelled (never started) | -//! | -3200 | Cancelled (aborted) | -//! | -3210 | Cancellation in progress | (Yes -3210 is intentional ;)) -//! | | | -//! | -4000 | Invalid or unsupported block | -//! | | | -//! | -9999 | Unspecified failure reason | -//! |_____________|__________________________________| -//! -//! Rationale: -//! - Convention, failures use negative status code. -//! - We leave space for new status codes -//! - -X000 status code are for generic failures segregated by failures: -//! on the networking side, the prover side or trying to prove an invalid block. -//! -//! A catchall -9999 error code is provided if a failure is not due to -//! either the network, the prover or the requester invalid block. -//! They should not exist in the DB and a proper analysis -//! and eventually status code should be assigned. -//! -//! ________________________________________________________________________________________________ -//! | Tasks metadata | -//! |________________________________________________________________________________________________| -//! | id_task | chain_id | block_number | blockhash | parent_hash | state_root | # of txs | gas_used | -//! |_________|__________|______________|___________|_____________|____________|__________|__________| -//! ____________________________________ -//! | Task queue | -//! |___________________________________| -//! | id_task | blockhash | id_proofsys | -//! |_________|___________|_____________| -//! ______________________________________ -//! | Task payloads | -//! |_____________________________________| -//! | id_task | inputs (serialized) | -//! |_________|___________________________| -//! _____________________________________ -//! | Task requests | -//! |____________________________________| -//! | id_task | id_submitter | timestamp | -//! |_________|______________|___________| -//! ___________________________________________________________________________________ -//! | Task progress trail | -//! |__________________________________________________________________________________| -//! | id_task | third_party | id_status | timestamp | -//! |_________|________________________|_________________________|_____________________| -//! | 101 | 'Based Proposer" | 1000 (Registered) | 2024-01-01 00:00:01 | -//! | 101 | 'A Prover Network' | 2000 (WIP) | 2024-01-01 00:00:01 | -//! | 101 | 'A Prover Network' | -2000 (Network failure) | 2024-01-01 00:02:00 | -//! | 101 | 'Proof in the Pudding' | 2000 (WIP) | 2024-01-01 00:02:30 | -//!·| 101 | 'Proof in the Pudding' | 0 (Success) | 2024-01-01 01:02:30 | -//! -//! Rationale: -//! - payloads are very large and warrant a dedicated table, with pruning -//! - metadata is useful to audit block building and prover efficiency -//! - Due to failures and retries, we may submit the same task to multiple fulfillers -//! or retry with the same fulfiller so we keep an audit trail of events. -//! -//! ____________________________ -//! | Proof cache | A map: ID -> proof -//! |___________________________| -//! | id_task | proof_value | -//! |__________|________________| A Groth16 proof is 2G₁+1G₂ elements -//! | 0 | 0xabcd...6789 | On BN254: 2*(2*32)+1*(2*2*32) = 256 bytes -//! | 1 | 0x1234...cdef | -//! | ... | ... | A SGX proof is ... -//! |__________|________________| A Stark proof (not wrapped in Groth16) would be several kilobytes -//! -//! Do we need pruning? -//! There are 60s * 60min * 24h * 30j = 2592000s in a month -//! dividing by 12, that's 216000 Ethereum slots. -//! Assuming 1kB of proofs per block (Stark-to-Groth16 Risc0 & SP1 + SGX, SGX size to be verified) -//! That's only 216MB per month. - -// Imports -// ---------------------------------------------------------------- -use std::{ - fs::File, - io::{Error as IOError, ErrorKind as IOErrorKind}, - path::Path, -}; +use std::io::{Error as IOError, ErrorKind as IOErrorKind}; +use std::path::PathBuf; use chrono::{DateTime, Utc}; +use mem_db::InMemoryTaskManager; use num_enum::{FromPrimitive, IntoPrimitive}; -use raiko_core::interfaces::{ProofRequest, ProofType}; +use raiko_core::interfaces::ProofType; use raiko_lib::primitives::{ChainId, B256}; -use rusqlite::{ - Error as SqlError, {named_params, Statement}, {Connection, OpenFlags}, -}; +use rusqlite::Error as SqlError; use serde::Serialize; +// mod adv_sqlite; +mod mem_db; + // Types // ---------------------------------------------------------------- - -#[derive(Debug, thiserror::Error)] +#[derive(PartialEq, Debug, thiserror::Error)] pub enum TaskManagerError { #[error("IO Error {0}")] IOError(IOErrorKind), #[error("SQL Error {0}")] SqlError(String), - #[error("Serde Error {0}")] - SerdeError(#[from] serde_json::error::Error), } pub type TaskManagerResult = Result; @@ -194,51 +36,9 @@ impl From for TaskManagerError { } } -#[derive(Debug)] -pub struct TaskDb { - conn: Connection, -} - -#[derive(Debug)] -pub struct TaskManager<'db> { - enqueue_task: Statement<'db>, - update_task_progress: Statement<'db>, - get_task_proof: Statement<'db>, - get_task_proof_by_id: Statement<'db>, - get_task_proving_status: Statement<'db>, - get_task_proving_status_by_id: Statement<'db>, - #[allow(dead_code)] - get_tasks_unfinished: Statement<'db>, - get_db_size: Statement<'db>, -} - -#[derive(Debug, Copy, Clone)] -pub enum TaskProofsys { - Native = 0, - Risc0 = 1, - SP1 = 2, - SGX = 3, -} - -impl From for TaskProofsys { - fn from(value: ProofType) -> Self { - match value { - ProofType::Sp1 => Self::SP1, - ProofType::Sgx => Self::SGX, - ProofType::Risc0 => Self::Risc0, - ProofType::Native => Self::Native, - } - } -} - -impl From for ProofType { - fn from(val: TaskProofsys) -> Self { - match val { - TaskProofsys::Native => ProofType::Native, - TaskProofsys::Risc0 => ProofType::Risc0, - TaskProofsys::SP1 => ProofType::Sp1, - TaskProofsys::SGX => ProofType::Sgx, - } +impl From for TaskManagerError { + fn from(error: serde_json::Error) -> TaskManagerError { + TaskManagerError::SqlError(error.to_string()) } } @@ -250,6 +50,7 @@ pub enum TaskStatus { Success = 0, Registered = 1000, WorkInProgress = 2000, + WorkReported = 3000, ProofFailure_Generic = -1000, ProofFailure_OutOfMemory = -1100, NetworkFailure = -2000, @@ -263,626 +64,161 @@ pub enum TaskStatus { SqlDbCorruption = -99999, } -// Implementation -// ---------------------------------------------------------------- - -impl TaskDb { - fn open(path: &Path) -> TaskManagerResult { - let conn = Connection::open_with_flags(path, OpenFlags::SQLITE_OPEN_READ_WRITE)?; - conn.pragma_update(None, "foreign_keys", true)?; - conn.pragma_update(None, "locking_mode", "EXCLUSIVE")?; - conn.pragma_update(None, "journal_mode", "WAL")?; - conn.pragma_update(None, "synchronous", "NORMAL")?; - conn.pragma_update(None, "temp_store", "MEMORY")?; - Ok(conn) - } - - fn create(path: &Path) -> TaskManagerResult { - let _file = File::options() - .write(true) - .read(true) - .create_new(true) - .open(path)?; - - let conn = Self::open(path)?; - Self::create_tables(&conn)?; - Self::create_views(&conn)?; +#[derive(Debug, Clone, Default)] +pub struct EnqueueTaskParams { + pub chain_id: ChainId, + pub blockhash: B256, + pub proof_system: ProofType, + pub prover: String, + pub block_number: u64, +} - Ok(conn) - } +#[derive(Debug, Clone, Serialize)] +pub struct TaskDescriptor { + pub chain_id: ChainId, + pub blockhash: B256, + pub proof_system: ProofType, + pub prover: String, +} - /// Open an existing TaskDb database at "path" - /// If a database does not exist at the path, one is created. - pub fn open_or_create(path: &Path) -> TaskManagerResult { - let conn = if path.exists() { - Self::open(path) - } else { - Self::create(path) - }?; - Ok(Self { conn }) +impl TaskDescriptor { + pub fn to_vec(self) -> Vec { + self.into() } +} - // SQL - // ---------------------------------------------------------------- - - fn create_tables(conn: &Connection) -> TaskManagerResult<()> { - // Change the task_db_version if backward compatibility is broken - // and introduce a migration on DB opening ... if conserving history is important. - conn.execute_batch( - r#" - -- Metadata and mappings - ----------------------------------------------- - CREATE TABLE metadata( - key BLOB UNIQUE NOT NULL PRIMARY KEY, - value BLOB - ); - - INSERT INTO - metadata(key, value) - VALUES - ('task_db_version', 0); - - CREATE TABLE proofsys( - id INTEGER UNIQUE NOT NULL PRIMARY KEY, - desc TEXT NOT NULL - ); - - INSERT INTO - proofsys(id, desc) - VALUES - (0, 'Native'), - (1, 'Risc0'), - (2, 'SP1'), - (3, 'SGX'); - - CREATE TABLE status_codes( - id INTEGER UNIQUE NOT NULL PRIMARY KEY, - desc TEXT NOT NULL - ); - - INSERT INTO - status_codes(id, desc) - VALUES - (0, 'Success'), - (1000, 'Registered'), - (2000, 'Work-in-progress'), - (-1000, 'Proof failure (generic)'), - (-1100, 'Proof failure (Out-Of-Memory)'), - (-2000, 'Network failure'), - (-3000, 'Cancelled'), - (-3100, 'Cancelled (never started)'), - (-3200, 'Cancelled (aborted)'), - (-3210, 'Cancellation in progress'), - (-4000, 'Invalid or unsupported block'), - (-9999, 'Unspecified failure reason'); - - -- Data - ----------------------------------------------- - -- Notes: - -- 1. a blockhash may appear as many times as there are prover backends. - -- 2. For query speed over (chain_id, blockhash) - -- there is no need to create an index as the UNIQUE constraint - -- has an implied index, see: - -- - https://sqlite.org/lang_createtable.html#uniqueconst - -- - https://www.sqlite.org/fileformat2.html#representation_of_sql_indices - CREATE TABLE tasks( - id INTEGER UNIQUE NOT NULL PRIMARY KEY, - chain_id INTEGER NOT NULL, - blockhash BLOB NOT NULL, - proofsys_id INTEGER NOT NULL, - request BLOB, - FOREIGN KEY(proofsys_id) REFERENCES proofsys(id), - UNIQUE (chain_id, blockhash, proofsys_id) - ); - - -- Proofs might also be large, so we isolate them in a dedicated table - CREATE TABLE task_proofs( - task_id INTEGER UNIQUE NOT NULL PRIMARY KEY, - proof BLOB NOT NULL, - FOREIGN KEY(task_id) REFERENCES tasks(id) - ); - - CREATE TABLE task_status( - task_id INTEGER NOT NULL, - status_id INTEGER NOT NULL, - timestamp TIMESTAMP DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')) NOT NULL, - FOREIGN KEY(task_id) REFERENCES tasks(id), - FOREIGN KEY(status_id) REFERENCES status_codes(id), - UNIQUE (task_id, timestamp) - ); - "#, - )?; - - Ok(()) +impl From for Vec { + fn from(val: TaskDescriptor) -> Self { + let mut v = Vec::new(); + v.extend_from_slice(&val.chain_id.to_be_bytes()); + v.extend_from_slice(val.blockhash.as_ref()); + v.extend_from_slice(&(val.proof_system as u8).to_be_bytes()); + v.extend_from_slice(val.prover.as_bytes()); + v } +} - fn create_views(conn: &Connection) -> TaskManagerResult<()> { - // By convention, views will use an action verb as name. - conn.execute_batch( - r#" - CREATE VIEW enqueue_task AS - SELECT - t.id, - t.chain_id, - t.blockhash, - t.proofsys_id, - t.request - FROM - tasks t - LEFT JOIN task_status ts on ts.task_id = t.id; - - CREATE VIEW update_task_progress AS - SELECT - t.id, - t.chain_id, - t.blockhash, - t.proofsys_id, - ts.status_id, - tpf.proof - FROM - tasks t - LEFT JOIN task_status ts on ts.task_id = t.id - LEFT JOIN task_proofs tpf on tpf.task_id = t.id; - "#, - )?; - - Ok(()) +// Taskkey from EnqueueTaskParams +impl From<&EnqueueTaskParams> for TaskDescriptor { + fn from(params: &EnqueueTaskParams) -> TaskDescriptor { + TaskDescriptor { + chain_id: params.chain_id, + blockhash: params.blockhash, + proof_system: params.proof_system, + prover: params.prover.clone(), + } } +} - /// Set a tracer to debug SQL execution - /// for example: - /// db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); - #[cfg(test)] - pub fn set_tracer(&mut self, trace_fn: Option) { - self.conn.trace(trace_fn); - } +#[derive(Debug, Clone)] +pub struct TaskProvingStatus(pub TaskStatus, pub Option, pub DateTime); - pub fn manage(&self) -> TaskManagerResult> { - // To update all the tables with the task_id assigned by Sqlite - // we require row IDs for the tasks table - // and we use last_insert_rowid() which is not reentrant and need a transaction lock - // and store them in a temporary table, configured to be in-memory. - // - // Alternative approaches considered: - // 1. Sqlite does not support variables (because it's embedded and significantly less overhead than other SQL "Client-Server" DBs). - // 2. using AUTOINCREMENT and/or the sqlite_sequence table - // - sqlite recommends not using AUTOINCREMENT for performance - // https://www.sqlite.org/autoinc.html - // 3. INSERT INTO ... RETURNING nested in a WITH clause (CTE / Common Table Expression) - // - Sqlite can only do RETURNING to the application, it cannot be nested in another query or diverted to another table - // https://sqlite.org/lang_returning.html#limitations_and_caveats - // 4. CREATE TEMPORARY TABLE AS with an INSERT INTO ... RETURNING nested - // - Same limitation AND CREATE TABLEAS seems to only support SELECT statements (but if we could nest RETURNING we can workaround that - // https://www.sqlite.org/lang_createtable.html#create_table_as_select_statements - // - // Hence we have to use row IDs and last_insert_rowid() - // - // Furthermore we use a view and an INSTEAD OF trigger to update the tables, - // the alternative being - // - // 5. Direct insert into tables - // This does not work as SQLite `execute` and `prepare` - // only process the first statement. - // - // And lastly, we need the view and trigger to be temporary because - // otherwise they can't access the temporary table: - // 6. https://sqlite.org/forum/info/4f998eeec510bceee69404541e5c9ca0a301868d59ec7c3486ecb8084309bba1 - // "Triggers in any schema other than temp may only access objects in their own schema. However, triggers in temp may access any object by name, even cross-schema." - - let conn = &self.conn; - conn.execute_batch( - r#" - -- PRAGMA temp_store = 'MEMORY'; - CREATE TEMPORARY TABLE temp.current_task(task_id INTEGER); - - CREATE TEMPORARY TRIGGER enqueue_task_insert_trigger INSTEAD OF - INSERT - ON enqueue_task - BEGIN - INSERT INTO - tasks(chain_id, blockhash, proofsys_id, request) - VALUES - ( - new.chain_id, - new.blockhash, - new.proofsys_id, - new.request - ); - - INSERT INTO - current_task - SELECT - id - FROM - tasks - WHERE - rowid = last_insert_rowid() - LIMIT - 1; - - -- Tasks are initialized at status 1000 - registered - -- timestamp is auto-filled with datetime('now'), see its field definition - INSERT INTO - task_status(task_id, status_id) - SELECT - tmp.task_id, - 1000 - FROM - current_task tmp; - - DELETE FROM - current_task; - END; - - CREATE TEMPORARY TRIGGER update_task_progress_trigger INSTEAD OF - INSERT - ON update_task_progress - BEGIN - INSERT INTO - current_task - SELECT - id - FROM - tasks - WHERE - chain_id = new.chain_id - AND blockhash = new.blockhash - AND proofsys_id = new.proofsys_id - LIMIT - 1; - - -- timestamp is auto-filled with datetime('now'), see its field definition - INSERT INTO - task_status(task_id, status_id) - SELECT - tmp.task_id, - new.status_id - FROM - current_task tmp - LIMIT - 1; - - INSERT - OR REPLACE INTO task_proofs - SELECT - task_id, - new.proof - FROM - current_task - WHERE - new.proof IS NOT NULL - LIMIT - 1; - - DELETE FROM - current_task; - END; - "#, - )?; - - let enqueue_task = conn.prepare( - " - INSERT INTO - enqueue_task( - chain_id, - blockhash, - proofsys_id, - request - ) - VALUES - ( - :chain_id, - :blockhash, - :proofsys_id, - :request - ); - ", - )?; - - let update_task_progress = conn.prepare( - " - INSERT INTO - update_task_progress( - chain_id, - blockhash, - proofsys_id, - status_id, - proof - ) - VALUES - ( - :chain_id, - :blockhash, - :proofsys_id, - :status_id, - :proof - ); - ", - )?; - - // The requires sqlite to be compiled with dbstat support: - // https://www.sqlite.org/dbstat.html - // which is the case for rusqlite - // https://github.com/rusqlite/rusqlite/blob/v0.31.0/libsqlite3-sys/build.rs#L126 - // but may not be the case for system-wide sqlite when debugging. - let get_db_size = conn.prepare( - " - SELECT - name as table_name, - SUM(pgsize) as table_size - FROM - dbstat - GROUP BY - table_name - ORDER BY - SUM(pgsize) DESC; - ", - )?; - - let get_task_proof = conn.prepare( - " - SELECT - proof - FROM - task_proofs tp - LEFT JOIN tasks t ON tp.task_id = t.id - WHERE - t.chain_id = :chain_id - AND t.blockhash = :blockhash - AND t.proofsys_id = :proofsys_id - LIMIT - 1; - ", - )?; - - let get_task_proof_by_id = conn.prepare( - " - SELECT - proof - FROM - task_proofs tp - LEFT JOIN tasks t ON tp.task_id = t.id - WHERE - t.id= :task_id - LIMIT - 1; - ", - )?; - - let get_task_proving_status = conn.prepare( - " - SELECT - ts.status_id, - timestamp - FROM - task_status ts - LEFT JOIN tasks t ON ts.task_id = t.id - WHERE - t.chain_id = :chain_id - AND t.blockhash = :blockhash - AND t.proofsys_id = :proofsys_id - ORDER BY - ts.timestamp DESC; - ", - )?; - - let get_task_proving_status_by_id = conn.prepare( - " - SELECT - ts.status_id, - timestamp - FROM - task_status ts - LEFT JOIN tasks t ON ts.task_id = t.id - WHERE - t.id = :task_id - ORDER BY - ts.timestamp DESC; - ", - )?; - - let get_tasks_unfinished = conn.prepare( - " - SELECT - t.chain_id, - t.blockhash, - t.proofsys_id, - ts.status_id, - timestamp - FROM - task_status ts - LEFT JOIN tasks t ON ts.task_id = t.id - WHERE - status_id NOT IN ( - 0, -- Success - -3000, -- Cancelled - -3100, -- Cancelled (never started) - -3200 -- Cancelled (aborted) - -- What do we do with -4000 Invalid/unsupported blocks? - -- And -9999 Unspecified failure reason? - -- For now we return them until we know more of the failure modes - ); - ", - )?; - - Ok(TaskManager { - enqueue_task, - update_task_progress, - get_task_proof, - get_task_proof_by_id, - get_task_proving_status, - get_task_proving_status_by_id, - get_tasks_unfinished, - get_db_size, - }) - } -} +pub type TaskProvingStatusRecords = Vec; #[derive(Debug, Clone)] -pub struct EnqueueTaskParams { - pub chain_id: ChainId, - pub blockhash: B256, - pub proof_system: TaskProofsys, - pub submitter: String, - pub parent_hash: B256, - pub state_root: B256, - pub num_transactions: u64, - pub gas_used: u64, - pub payload: Vec, +pub struct TaskManagerOpts { + pub sqlite_file: PathBuf, + pub max_db_size: usize, } -pub type TaskProvingStatus = Vec<(TaskStatus, DateTime)>; +pub trait TaskManager { + /// new a task manager + fn new(opts: &TaskManagerOpts) -> Self; -impl<'db> TaskManager<'db> { - pub fn enqueue_task( + /// enqueue_task + fn enqueue_task( &mut self, - chain_id: u64, - blockhash: B256, - request: &ProofRequest, - ) -> TaskManagerResult<()> { - self.enqueue_task.execute(named_params! { - ":chain_id": chain_id, - ":blockhash": blockhash.to_vec(), - ":proofsys_id": TaskProofsys::from(request.proof_type) as u8, - ":request": serde_json::to_vec(&request)?, - })?; - - Ok(()) - } + request: &EnqueueTaskParams, + ) -> TaskManagerResult; - pub fn update_task_progress( + /// Update the task progress + fn update_task_progress( &mut self, chain_id: ChainId, blockhash: B256, - proof_type: ProofType, + proof_system: ProofType, + prover: Option, status: TaskStatus, proof: Option<&[u8]>, - ) -> TaskManagerResult<()> { - self.update_task_progress.execute(named_params! { - ":chain_id": chain_id, - ":blockhash": blockhash.to_vec(), - ":proofsys_id": TaskProofsys::from(proof_type) as u8, - ":status_id": status as i32, - ":proof": proof - })?; - Ok(()) - } + ) -> TaskManagerResult<()>; /// Returns the latest triplet (submitter or fulfiller, status, last update time) - pub fn get_task_proving_status( + fn get_task_proving_status( &mut self, chain_id: ChainId, blockhash: B256, - proof_type: ProofType, - ) -> TaskManagerResult { - let rows = self.get_task_proving_status.query_map( - named_params! { - ":chain_id": chain_id, - ":blockhash": blockhash.to_vec(), - ":proofsys_id": TaskProofsys::from(proof_type) as u8, - }, - |row| { - Ok(( - TaskStatus::from(row.get::<_, i32>(0)?), - row.get::<_, DateTime>(1)?, - )) - }, - )?; - - Ok(rows.collect::, _>>()?) - } + proof_system: ProofType, + prover: Option, + ) -> TaskManagerResult; /// Returns the latest triplet (submitter or fulfiller, status, last update time) - pub fn get_task_proving_status_by_id( + fn get_task_proving_status_by_id( &mut self, task_id: u64, - ) -> TaskManagerResult { - let rows = self.get_task_proving_status_by_id.query_map( - named_params! { - ":task_id": task_id, - }, - |row| { - Ok(( - TaskStatus::from(row.get::<_, i32>(0)?), - row.get::<_, DateTime>(1)?, - )) - }, - )?; - let proving_status = rows.collect::, _>>()?; - - Ok(proving_status) - } + ) -> TaskManagerResult; - pub fn get_task_proof( + /// Returns the proof for the given task + fn get_task_proof( &mut self, chain_id: ChainId, blockhash: B256, - proof_type: ProofType, - ) -> TaskManagerResult> { - let proof = self.get_task_proof.query_row( - named_params! { - ":chain_id": chain_id, - ":blockhash": blockhash.to_vec(), - ":proofsys_id": TaskProofsys::from(proof_type) as u8, - }, - |r| r.get(0), - )?; - - Ok(proof) - } + proof_system: ProofType, + prover: Option, + ) -> TaskManagerResult>; - pub fn get_task_proof_by_id(&mut self, task_id: u64) -> TaskManagerResult> { - let proof = self.get_task_proof_by_id.query_row( - named_params! { - ":task_id": task_id, - }, - |r| r.get(0), - )?; - - Ok(proof) - } + fn get_task_proof_by_id(&mut self, task_id: u64) -> TaskManagerResult>; /// Returns the total and detailed database size - pub fn get_db_size(&mut self) -> TaskManagerResult<(usize, Vec<(String, usize)>)> { - let rows = self - .get_db_size - .query_map([], |row| Ok((row.get(0)?, row.get(1)?)))?; - let details = rows.collect::, _>>()?; - let total = details.iter().fold(0, |acc, item| acc + item.1); - Ok((total, details)) - } + fn get_db_size(&mut self) -> TaskManagerResult<(usize, Vec<(String, usize)>)>; + + /// Prune old tasks + fn prune_db(&mut self) -> TaskManagerResult<()>; } -#[cfg(test)] -mod tests { - // We only test private functions here. - // Public API will be tested in a dedicated tests folder +use std::sync::{Arc, Mutex, Once}; - use super::*; - use tempfile::tempdir; +// todo: use feature to switch between sqlite and memory db +pub fn get_task_manager(opts: &TaskManagerOpts) -> Arc> { + static INIT: Once = Once::new(); + static mut SHARED_TASK_MANAGER: Option>> = None; - #[test] - fn error_on_missing() { - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - assert!(TaskDb::open(&file).is_err()); - } + INIT.call_once(|| { + let task_manager: Arc> = + Arc::new(Mutex::new(InMemoryTaskManager::new(opts))); + unsafe { + SHARED_TASK_MANAGER = Some(Arc::clone(&task_manager)); + } + }); - #[test] - fn ensure_exclusive() { - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); + unsafe { SHARED_TASK_MANAGER.as_ref().unwrap().clone() } +} - let _db = TaskDb::create(&file).unwrap(); - assert!(TaskDb::open(&file).is_err()); - } +#[cfg(test)] +mod test { + use super::*; #[test] - fn ensure_unicity() { - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - - let _db = TaskDb::create(&file).unwrap(); - assert!(TaskDb::create(&file).is_err()); + fn test_new_taskmanager() { + let opts = TaskManagerOpts { + sqlite_file: "test.db".to_string().into(), + max_db_size: 1024, + }; + let binding = get_task_manager(&opts); + let mut task_manager = binding.lock().unwrap(); + assert_eq!(task_manager.get_db_size().unwrap().0, 0); + + assert_eq!( + task_manager + .enqueue_task(&EnqueueTaskParams { + chain_id: 1, + blockhash: B256::default(), + proof_system: ProofType::Native, + prover: "test".to_string(), + block_number: 1 + }) + .unwrap() + .len(), + 1 + ); } } diff --git a/task_manager/src/mem_db.rs b/task_manager/src/mem_db.rs new file mode 100644 index 000000000..019ba2243 --- /dev/null +++ b/task_manager/src/mem_db.rs @@ -0,0 +1,311 @@ +// Raiko +// Copyright (c) 2024 Taiko Labs +// Licensed and distributed under either of +// * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT). +// * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0). +// at your option. This file may not be copied, modified, or distributed except according to those terms. + +// Imports +// ---------------------------------------------------------------- +use std::collections::HashMap; + +use crate::{ + EnqueueTaskParams, TaskDescriptor, TaskManager, TaskManagerError, TaskManagerOpts, + TaskManagerResult, TaskProvingStatus, TaskProvingStatusRecords, TaskStatus, +}; + +use chrono::Utc; +use raiko_core::interfaces::ProofType; +use raiko_lib::primitives::{keccak::keccak, ChainId, B256}; +use tracing::{debug, info}; + +#[derive(Debug)] +pub struct InMemoryTaskManager { + db: InMemoryTaskDb, +} + +#[derive(Debug)] +pub struct InMemoryTaskDb { + enqueue_task: HashMap, + task_id_desc: HashMap, + task_id: u64, +} + +impl InMemoryTaskDb { + fn new() -> InMemoryTaskDb { + InMemoryTaskDb { + enqueue_task: HashMap::new(), + task_id_desc: HashMap::new(), + task_id: 0, + } + } + + fn enqueue_task(&mut self, params: &EnqueueTaskParams) { + let task_desc_data: Vec = TaskDescriptor::from(params).into(); + let key: B256 = keccak(task_desc_data).into(); + let task_status = TaskProvingStatus( + TaskStatus::Registered, + Some(params.prover.clone()), + Utc::now(), + ); + + match self.enqueue_task.get(&key) { + Some(task_proving_records) => { + debug!( + "Task already exists: {:?}", + task_proving_records.last().unwrap().0 + ); + } // do nothing + None => { + info!("Enqueue new task: {:?}", params); + self.enqueue_task.insert(key, vec![task_status]); + self.task_id_desc.insert(self.task_id, key); + self.task_id += 1; + } + } + } + + fn update_task_progress( + &mut self, + chain_id: ChainId, + blockhash: B256, + proof_system: ProofType, + prover: Option, + status: TaskStatus, + proof: Option<&[u8]>, + ) { + let td_data: Vec = TaskDescriptor { + chain_id, + blockhash, + proof_system, + prover: prover.clone().unwrap_or_default().to_owned(), + } + .into(); + let key = keccak(td_data).into(); + assert!(self.enqueue_task.contains_key(&key)); + + let task_proving_records = self.enqueue_task.get(&key).unwrap(); + let task_status = task_proving_records.last().unwrap().0; + if status != task_status { + let new_records = task_proving_records + .iter() + .cloned() + .chain(std::iter::once(TaskProvingStatus( + status, + proof.map(hex::encode), + Utc::now(), + ))) + .collect(); + self.enqueue_task.insert(key, new_records); + } + } + + fn get_task_proving_status( + &mut self, + chain_id: ChainId, + blockhash: B256, + proof_system: ProofType, + prover: Option, + ) -> Result { + let key: B256 = keccak( + TaskDescriptor { + chain_id, + blockhash, + proof_system, + prover: prover.unwrap_or_default().to_owned(), + } + .to_vec(), + ) + .into(); + + match self.enqueue_task.get(&key) { + Some(proving_status_records) => Ok(proving_status_records.clone()), + None => Err(TaskManagerError::SqlError("Key not found".to_owned())), + } + } + + fn get_task_proving_status_by_id( + &mut self, + task_id: u64, + ) -> Result { + assert!(self.task_id_desc.contains_key(&task_id)); + let key = self.task_id_desc.get(&task_id).unwrap(); + let task_status = self.enqueue_task.get(key).unwrap(); + Ok(task_status.clone()) + } + + fn get_task_proof( + &mut self, + chain_id: ChainId, + blockhash: B256, + proof_system: ProofType, + prover: Option, + ) -> Result, TaskManagerError> { + let key: B256 = keccak( + TaskDescriptor { + chain_id, + blockhash, + proof_system, + prover: prover.unwrap_or_default().to_owned(), + } + .to_vec(), + ) + .into(); + assert!(self.enqueue_task.contains_key(&key)); + + let proving_status_records = self.enqueue_task.get(&key).unwrap(); + let task_status = proving_status_records.last().unwrap(); + if task_status.0 == TaskStatus::Success { + let proof = task_status.1.clone().unwrap(); + Ok(hex::decode(proof).unwrap()) + } else { + Err(TaskManagerError::SqlError("working in process".to_owned())) + } + } + + fn get_task_proof_by_id(&mut self, task_id: u64) -> Result, TaskManagerError> { + assert!(self.task_id_desc.contains_key(&task_id)); + let key = self.task_id_desc.get(&task_id).unwrap(); + let task_records = self.enqueue_task.get(key).unwrap(); + let task_status = task_records.last().unwrap(); + if task_status.0 == TaskStatus::Success { + let proof = task_status.1.clone().unwrap(); + Ok(hex::decode(proof).unwrap()) + } else { + Err(TaskManagerError::SqlError("working in process".to_owned())) + } + } + + fn size(&mut self) -> TaskManagerResult<(usize, Vec<(String, usize)>)> { + Ok((self.enqueue_task.len() + self.task_id_desc.len(), vec![])) + } + + #[allow(dead_code)] + fn prune(&mut self) { + todo!() + } +} + +impl TaskManager for InMemoryTaskManager { + fn new(_opts: &TaskManagerOpts) -> Self { + InMemoryTaskManager { + db: InMemoryTaskDb::new(), + } + } + + fn enqueue_task( + &mut self, + params: &EnqueueTaskParams, + ) -> TaskManagerResult { + if let Ok(proving_status) = self.db.get_task_proving_status( + params.chain_id, + params.blockhash, + params.proof_system, + Some(params.prover.to_string()), + ) { + Ok(proving_status) + } else { + self.db.enqueue_task(params); + let proving_status = self.db.get_task_proving_status( + params.chain_id, + params.blockhash, + params.proof_system, + Some(params.prover.clone()), + )?; + Ok(proving_status) + } + } + + fn update_task_progress( + &mut self, + chain_id: ChainId, + blockhash: B256, + proof_system: ProofType, + prover: Option, + status: TaskStatus, + proof: Option<&[u8]>, + ) -> TaskManagerResult<()> { + self.db + .update_task_progress(chain_id, blockhash, proof_system, prover, status, proof); + Ok(()) + } + + /// Returns the latest triplet (submitter or fulfiller, status, last update time) + fn get_task_proving_status( + &mut self, + chain_id: ChainId, + blockhash: B256, + proof_system: ProofType, + prover: Option, + ) -> TaskManagerResult { + self.db + .get_task_proving_status(chain_id, blockhash, proof_system, prover) + } + + /// Returns the latest triplet (submitter or fulfiller, status, last update time) + fn get_task_proving_status_by_id( + &mut self, + task_id: u64, + ) -> TaskManagerResult { + let proving_status = self.db.get_task_proving_status_by_id(task_id)?; + Ok(proving_status) + } + + fn get_task_proof( + &mut self, + chain_id: ChainId, + blockhash: B256, + proof_system: ProofType, + prover: Option, + ) -> TaskManagerResult> { + let proof = self + .db + .get_task_proof(chain_id, blockhash, proof_system, prover)?; + Ok(proof) + } + + fn get_task_proof_by_id(&mut self, task_id: u64) -> TaskManagerResult> { + let proof = self.db.get_task_proof_by_id(task_id)?; + Ok(proof) + } + + /// Returns the total and detailed database size + fn get_db_size(&mut self) -> TaskManagerResult<(usize, Vec<(String, usize)>)> { + self.db.size() + } + + fn prune_db(&mut self) -> TaskManagerResult<()> { + todo!() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::ProofType; + + #[test] + fn test_db_open() { + assert!(InMemoryTaskDb::new().size().is_ok()); + } + + #[test] + fn test_db_enqueue() { + let mut db = InMemoryTaskDb::new(); + let params = EnqueueTaskParams { + chain_id: 1, + blockhash: B256::default(), + proof_system: ProofType::Native, + prover: "0x1234".to_owned(), + ..Default::default() + }; + db.enqueue_task(¶ms); + let status = db.get_task_proving_status( + params.chain_id, + params.blockhash, + params.proof_system, + Some(params.prover.clone()), + ); + assert!(status.is_ok()); + } +} diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index e4a8144e7..11d6d3f93 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -15,7 +15,9 @@ mod tests { use rand_chacha::ChaCha8Rng; use raiko_lib::primitives::B256; - use raiko_task_manager::{TaskDb, TaskStatus}; + use raiko_task_manager::{ + get_task_manager, EnqueueTaskParams, TaskManager, TaskManagerOpts, TaskStatus, + }; fn create_random_task(rng: &mut ChaCha8Rng) -> (u64, B256, ProofRequest) { let chain_id = 100; @@ -60,14 +62,23 @@ mod tests { let dir = tempdir().unwrap(); let file = dir.path().join("test_enqueue_task.sqlite"); + let binding = get_task_manager(&TaskManagerOpts { + sqlite_file: file, + max_db_size: 1_000_000, + }); #[allow(unused_mut)] - let mut db = TaskDb::open_or_create(&file).unwrap(); - // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); - let mut tama = db.manage().unwrap(); + let mut tama = binding.lock().unwrap(); let (chain_id, block_hash, request) = create_random_task(&mut ChaCha8Rng::seed_from_u64(123)); - tama.enqueue_task(chain_id, block_hash, &request).unwrap(); + tama.enqueue_task(&EnqueueTaskParams { + chain_id, + blockhash: block_hash, + proof_system: request.proof_type, + prover: request.prover.to_string(), + block_number: request.block_number, + }) + .unwrap(); } #[test] @@ -86,10 +97,12 @@ mod tests { // let dir = tempdir().unwrap(); // let file = dir.path().join("test_update_task_progress.sqlite"); + let binding = get_task_manager(&TaskManagerOpts { + sqlite_file: file, + max_db_size: 1_000_000, + }); #[allow(unused_mut)] - let mut db = TaskDb::open_or_create(&file).unwrap(); - // db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); - let mut tama = db.manage().unwrap(); + let mut tama = binding.lock().unwrap(); let mut rng = ChaCha8Rng::seed_from_u64(123); let mut tasks = vec![]; @@ -97,22 +110,35 @@ mod tests { for _ in 0..5 { let (chain_id, block_hash, request) = create_random_task(&mut rng); - tama.enqueue_task(chain_id, block_hash, &request).unwrap(); + tama.enqueue_task(&EnqueueTaskParams { + chain_id, + blockhash: block_hash, + proof_system: request.proof_type, + prover: request.prover.to_string(), + block_number: request.block_number, + }) + .unwrap(); let task_status = tama - .get_task_proving_status(chain_id, block_hash, request.proof_type) + .get_task_proving_status( + chain_id, + block_hash, + request.proof_type, + Some(request.prover.to_string()), + ) .unwrap(); assert_eq!(task_status.len(), 1); - let (status, _) = task_status + let status = task_status .first() .expect("Already confirmed there is exactly 1 element"); - assert_eq!(status, &TaskStatus::Registered); + assert_eq!(status.0, TaskStatus::Registered); tasks.push(( chain_id, block_hash, request.block_number, request.proof_type, + request.prover, )); } @@ -120,25 +146,36 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[0].0, tasks[0].1, tasks[0].3) + .get_task_proving_status( + tasks[0].0, + tasks[0].1, + tasks[0].3, + Some(tasks[0].4.to_string()), + ) .unwrap(); println!("{task_status:?}"); tama.update_task_progress( tasks[0].0, tasks[0].1, tasks[0].3, + Some(tasks[0].4.to_string()), TaskStatus::Cancelled_NeverStarted, None, ) .unwrap(); let task_status = tama - .get_task_proving_status(tasks[0].0, tasks[0].1, tasks[0].3) + .get_task_proving_status( + tasks[0].0, + tasks[0].1, + tasks[0].3, + Some(tasks[0].4.to_string()), + ) .unwrap(); println!("{task_status:?}"); assert_eq!(task_status.len(), 2); - assert_eq!(task_status[0].0, TaskStatus::Cancelled_NeverStarted); - assert_eq!(task_status[1].0, TaskStatus::Registered); + assert_eq!(task_status[1].0, TaskStatus::Cancelled_NeverStarted); + assert_eq!(task_status[0].0, TaskStatus::Registered); } // ----------------------- { @@ -146,6 +183,7 @@ mod tests { tasks[1].0, tasks[1].1, tasks[1].3, + Some(tasks[1].4.to_string()), TaskStatus::WorkInProgress, None, ) @@ -153,11 +191,16 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[1].0, tasks[1].1, tasks[1].3) + .get_task_proving_status( + tasks[1].0, + tasks[1].1, + tasks[1].3, + Some(tasks[1].4.to_string()), + ) .unwrap(); assert_eq!(task_status.len(), 2); - assert_eq!(task_status[0].0, TaskStatus::WorkInProgress); - assert_eq!(task_status[1].0, TaskStatus::Registered); + assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[0].0, TaskStatus::Registered); } std::thread::sleep(Duration::from_millis(1)); @@ -166,6 +209,7 @@ mod tests { tasks[1].0, tasks[1].1, tasks[1].3, + Some(tasks[1].4.to_string()), TaskStatus::CancellationInProgress, None, ) @@ -173,12 +217,17 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[1].0, tasks[1].1, tasks[1].3) + .get_task_proving_status( + tasks[1].0, + tasks[1].1, + tasks[1].3, + Some(tasks[1].4.to_string()), + ) .unwrap(); assert_eq!(task_status.len(), 3); - assert_eq!(task_status[0].0, TaskStatus::CancellationInProgress); + assert_eq!(task_status[2].0, TaskStatus::CancellationInProgress); assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); - assert_eq!(task_status[2].0, TaskStatus::Registered); + assert_eq!(task_status[0].0, TaskStatus::Registered); } std::thread::sleep(Duration::from_millis(1)); @@ -187,6 +236,7 @@ mod tests { tasks[1].0, tasks[1].1, tasks[1].3, + Some(tasks[1].4.to_string()), TaskStatus::Cancelled, None, ) @@ -194,13 +244,18 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[1].0, tasks[1].1, tasks[1].3) + .get_task_proving_status( + tasks[1].0, + tasks[1].1, + tasks[1].3, + Some(tasks[1].4.to_string()), + ) .unwrap(); assert_eq!(task_status.len(), 4); - assert_eq!(task_status[0].0, TaskStatus::Cancelled); - assert_eq!(task_status[1].0, TaskStatus::CancellationInProgress); - assert_eq!(task_status[2].0, TaskStatus::WorkInProgress); - assert_eq!(task_status[3].0, TaskStatus::Registered); + assert_eq!(task_status[3].0, TaskStatus::Cancelled); + assert_eq!(task_status[2].0, TaskStatus::CancellationInProgress); + assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[0].0, TaskStatus::Registered); } } @@ -210,6 +265,7 @@ mod tests { tasks[2].0, tasks[2].1, tasks[2].3, + Some(tasks[2].4.to_string()), TaskStatus::WorkInProgress, None, ) @@ -217,11 +273,16 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[2].0, tasks[2].1, tasks[2].3) + .get_task_proving_status( + tasks[2].0, + tasks[2].1, + tasks[2].3, + Some(tasks[2].4.to_string()), + ) .unwrap(); assert_eq!(task_status.len(), 2); - assert_eq!(task_status[0].0, TaskStatus::WorkInProgress); - assert_eq!(task_status[1].0, TaskStatus::Registered); + assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[0].0, TaskStatus::Registered); } std::thread::sleep(Duration::from_millis(1)); @@ -231,6 +292,7 @@ mod tests { tasks[2].0, tasks[2].1, tasks[2].3, + Some(tasks[2].4.to_string()), TaskStatus::Success, Some(&proof), ) @@ -238,18 +300,28 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[2].0, tasks[2].1, tasks[2].3) + .get_task_proving_status( + tasks[2].0, + tasks[2].1, + tasks[2].3, + Some(tasks[2].4.to_string()), + ) .unwrap(); assert_eq!(task_status.len(), 3); - assert_eq!(task_status[0].0, TaskStatus::Success); + assert_eq!(task_status[2].0, TaskStatus::Success); assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); - assert_eq!(task_status[2].0, TaskStatus::Registered); + assert_eq!(task_status[0].0, TaskStatus::Registered); } assert_eq!( proof, - tama.get_task_proof(tasks[2].0, tasks[2].1, tasks[2].3) - .unwrap() + tama.get_task_proof( + tasks[2].0, + tasks[2].1, + tasks[2].3, + Some(tasks[2].4.to_string()) + ) + .unwrap() ); } @@ -259,6 +331,7 @@ mod tests { tasks[3].0, tasks[3].1, tasks[3].3, + Some(tasks[3].4.to_string()), TaskStatus::WorkInProgress, None, ) @@ -266,11 +339,16 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].3) + .get_task_proving_status( + tasks[3].0, + tasks[3].1, + tasks[3].3, + Some(tasks[3].4.to_string()), + ) .unwrap(); assert_eq!(task_status.len(), 2); - assert_eq!(task_status[0].0, TaskStatus::WorkInProgress); - assert_eq!(task_status[1].0, TaskStatus::Registered); + assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[0].0, TaskStatus::Registered); } std::thread::sleep(Duration::from_millis(1)); @@ -279,6 +357,7 @@ mod tests { tasks[3].0, tasks[3].1, tasks[3].3, + Some(tasks[3].4.to_string()), TaskStatus::NetworkFailure, None, ) @@ -286,12 +365,17 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].3) + .get_task_proving_status( + tasks[3].0, + tasks[3].1, + tasks[3].3, + Some(tasks[3].4.to_string()), + ) .unwrap(); assert_eq!(task_status.len(), 3); - assert_eq!(task_status[0].0, TaskStatus::NetworkFailure); + assert_eq!(task_status[2].0, TaskStatus::NetworkFailure); assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); - assert_eq!(task_status[2].0, TaskStatus::Registered); + assert_eq!(task_status[0].0, TaskStatus::Registered); } std::thread::sleep(Duration::from_millis(1)); @@ -300,6 +384,7 @@ mod tests { tasks[3].0, tasks[3].1, tasks[3].3, + Some(tasks[3].4.to_string()), TaskStatus::WorkInProgress, None, ) @@ -307,13 +392,18 @@ mod tests { { let task_status = tama - .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].3) + .get_task_proving_status( + tasks[3].0, + tasks[3].1, + tasks[3].3, + Some(tasks[3].4.to_string()), + ) .unwrap(); assert_eq!(task_status.len(), 4); - assert_eq!(task_status[0].0, TaskStatus::WorkInProgress); - assert_eq!(task_status[1].0, TaskStatus::NetworkFailure); - assert_eq!(task_status[2].0, TaskStatus::WorkInProgress); - assert_eq!(task_status[3].0, TaskStatus::Registered); + assert_eq!(task_status[3].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[2].0, TaskStatus::NetworkFailure); + assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[0].0, TaskStatus::Registered); } std::thread::sleep(Duration::from_millis(1)); @@ -323,27 +413,38 @@ mod tests { tasks[3].0, tasks[3].1, tasks[3].3, + Some(tasks[3].4.to_string()), TaskStatus::Success, - Some(&proof), + Some(proof.as_slice()), ) .unwrap(); { let task_status = tama - .get_task_proving_status(tasks[3].0, tasks[3].1, tasks[3].3) + .get_task_proving_status( + tasks[3].0, + tasks[3].1, + tasks[3].3, + Some(tasks[3].4.to_string()), + ) .unwrap(); assert_eq!(task_status.len(), 5); - assert_eq!(task_status[0].0, TaskStatus::Success); - assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); - assert_eq!(task_status[2].0, TaskStatus::NetworkFailure); + assert_eq!(task_status[4].0, TaskStatus::Success); assert_eq!(task_status[3].0, TaskStatus::WorkInProgress); - assert_eq!(task_status[4].0, TaskStatus::Registered); + assert_eq!(task_status[2].0, TaskStatus::NetworkFailure); + assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); + assert_eq!(task_status[0].0, TaskStatus::Registered); } assert_eq!( proof, - tama.get_task_proof(tasks[3].0, tasks[3].1, tasks[3].3) - .unwrap() + tama.get_task_proof( + tasks[3].0, + tasks[3].1, + tasks[3].3, + Some(tasks[3].4.to_string()) + ) + .unwrap() ); } } From 7e48839c975a8cabba0d6d60646a30e41f6212f9 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Thu, 27 Jun 2024 13:47:22 +0200 Subject: [PATCH 36/44] fix: throw error instead of panicing on runtime checks --- Cargo.lock | 1 + core/src/preflight.rs | 12 ++++++------ lib/src/protocol_instance.rs | 29 ++++++++++++++--------------- task_manager/Cargo.toml | 1 + task_manager/src/mem_db.rs | 9 +++++---- 5 files changed, 27 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 767c368bd..2060853d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5895,6 +5895,7 @@ name = "raiko-task-manager" version = "0.1.0" dependencies = [ "alloy-primitives", + "anyhow", "chrono", "hex", "num_enum 0.7.2", diff --git a/core/src/preflight.rs b/core/src/preflight.rs index bd2ab6039..36fbcc920 100644 --- a/core/src/preflight.rs +++ b/core/src/preflight.rs @@ -7,7 +7,7 @@ pub use alloy_primitives::*; use alloy_provider::{Provider, ReqwestProvider}; use alloy_rpc_types::{Block, BlockTransactions, Filter, Transaction as AlloyRpcTransaction}; use alloy_sol_types::{SolCall, SolEvent}; -use anyhow::{anyhow, bail, Result}; +use anyhow::{anyhow, bail, ensure, Result}; use c_kzg::{Blob, KzgCommitment}; use raiko_lib::{ builder::{OptimisticDatabase, RethBlockBuilder}, @@ -276,7 +276,7 @@ async fn prepare_taiko_chain_input( debug!("blob active"); // Get the blob hashes attached to the propose tx let blob_hashes = proposal_tx.blob_versioned_hashes.unwrap_or_default(); - assert!(!blob_hashes.is_empty()); + ensure!(!blob_hashes.is_empty()); // Currently the protocol enforces the first blob hash to be used let blob_hash = blob_hashes[0]; // Get the blob data for this block @@ -305,7 +305,7 @@ async fn prepare_taiko_chain_input( Some(anchor_tx.clone()), ); // Do a sanity check using the transactions returned by the node - assert!( + ensure!( transactions.len() >= block.transactions.len(), "unexpected number of transactions" ); @@ -409,7 +409,7 @@ async fn get_blob_data_beacon( let response = reqwest::get(url.clone()).await?; if response.status().is_success() { let blobs: GetBlobsResponse = response.json().await?; - assert!(!blobs.data.is_empty(), "blob data not available anymore"); + ensure!(!blobs.data.is_empty(), "blob data not available anymore"); // Get the blob data for the blob storing the tx list let tx_blob = blobs .data @@ -419,7 +419,7 @@ async fn get_blob_data_beacon( blob_hash == calc_blob_versioned_hash(&blob.blob) }) .cloned(); - assert!(tx_blob.is_some()); + ensure!(tx_blob.is_some()); Ok(blob_to_bytes(&tx_blob.unwrap().blob)) } else { warn!( @@ -521,7 +521,7 @@ fn get_transactions_from_block(block: &Block) -> RaikoResult> { }, _ => unreachable!("Block is too old, please connect to an archive node or use a block that is at most 128 blocks old."), }; - assert!( + ensure!( transactions.len() == block.transactions.len(), "unexpected number of transactions" ); diff --git a/lib/src/protocol_instance.rs b/lib/src/protocol_instance.rs index 0d919da0e..2876bb08c 100644 --- a/lib/src/protocol_instance.rs +++ b/lib/src/protocol_instance.rs @@ -58,9 +58,8 @@ impl ProtocolInstance { ) .expect("Fail to calculate KZG commitment"); let versioned_hash = kzg_to_versioned_hash(&kzg_commit); - assert_eq!( - versioned_hash, - input.taiko.tx_blob_hash.unwrap(), + ensure!( + versioned_hash == input.taiko.tx_blob_hash.unwrap(), "Blob version hash not matching" ); drop(aligned_vec); @@ -80,28 +79,28 @@ impl ProtocolInstance { if let Some(verified_chain_spec) = SupportedChainSpecs::default().get_chain_spec_with_chain_id(input.chain_spec.chain_id) { - assert_eq!( - input.chain_spec.max_spec_id, verified_chain_spec.max_spec_id, + ensure!( + input.chain_spec.max_spec_id == verified_chain_spec.max_spec_id, "unexpected max_spec_id" ); - assert_eq!( - input.chain_spec.hard_forks, verified_chain_spec.hard_forks, + ensure!( + input.chain_spec.hard_forks == verified_chain_spec.hard_forks, "unexpected hard_forks" ); - assert_eq!( - input.chain_spec.eip_1559_constants, verified_chain_spec.eip_1559_constants, + ensure!( + input.chain_spec.eip_1559_constants == verified_chain_spec.eip_1559_constants, "unexpected eip_1559_constants" ); - assert_eq!( - input.chain_spec.l1_contract, verified_chain_spec.l1_contract, + ensure!( + input.chain_spec.l1_contract == verified_chain_spec.l1_contract, "unexpected l1_contract" ); - assert_eq!( - input.chain_spec.l2_contract, verified_chain_spec.l2_contract, + ensure!( + input.chain_spec.l2_contract == verified_chain_spec.l2_contract, "unexpected l2_contract" ); - assert_eq!( - input.chain_spec.is_taiko, verified_chain_spec.is_taiko, + ensure!( + input.chain_spec.is_taiko == verified_chain_spec.is_taiko, "unexpected eip_1559_constants" ); } diff --git a/task_manager/Cargo.toml b/task_manager/Cargo.toml index 9b411b776..8b39a07a1 100644 --- a/task_manager/Cargo.toml +++ b/task_manager/Cargo.toml @@ -15,6 +15,7 @@ serde = { workspace = true } serde_json = { workspace = true } hex = { workspace = true } tracing = { workspace = true } +anyhow = { workspace = true } [dev-dependencies] rand = "0.9.0-alpha.1" # This is an alpha version, that has rng.gen_iter::() diff --git a/task_manager/src/mem_db.rs b/task_manager/src/mem_db.rs index 019ba2243..a86528396 100644 --- a/task_manager/src/mem_db.rs +++ b/task_manager/src/mem_db.rs @@ -14,6 +14,7 @@ use crate::{ TaskManagerResult, TaskProvingStatus, TaskProvingStatusRecords, TaskStatus, }; +use anyhow::ensure; use chrono::Utc; use raiko_core::interfaces::ProofType; use raiko_lib::primitives::{keccak::keccak, ChainId, B256}; @@ -82,7 +83,7 @@ impl InMemoryTaskDb { } .into(); let key = keccak(td_data).into(); - assert!(self.enqueue_task.contains_key(&key)); + ensure!(self.enqueue_task.contains_key(&key)); let task_proving_records = self.enqueue_task.get(&key).unwrap(); let task_status = task_proving_records.last().unwrap().0; @@ -128,7 +129,7 @@ impl InMemoryTaskDb { &mut self, task_id: u64, ) -> Result { - assert!(self.task_id_desc.contains_key(&task_id)); + ensure!(self.task_id_desc.contains_key(&task_id)); let key = self.task_id_desc.get(&task_id).unwrap(); let task_status = self.enqueue_task.get(key).unwrap(); Ok(task_status.clone()) @@ -151,7 +152,7 @@ impl InMemoryTaskDb { .to_vec(), ) .into(); - assert!(self.enqueue_task.contains_key(&key)); + ensure!(self.enqueue_task.contains_key(&key)); let proving_status_records = self.enqueue_task.get(&key).unwrap(); let task_status = proving_status_records.last().unwrap(); @@ -164,7 +165,7 @@ impl InMemoryTaskDb { } fn get_task_proof_by_id(&mut self, task_id: u64) -> Result, TaskManagerError> { - assert!(self.task_id_desc.contains_key(&task_id)); + ensure!(self.task_id_desc.contains_key(&task_id)); let key = self.task_id_desc.get(&task_id).unwrap(); let task_records = self.enqueue_task.get(key).unwrap(); let task_status = task_records.last().unwrap(); From 15044af3339ffcb35c9cbaadadef642d92810316 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Thu, 27 Jun 2024 14:25:29 +0200 Subject: [PATCH 37/44] fix(core,task_manager): add custom ensure and require fns --- core/src/lib.rs | 18 +++++++++++------- core/src/preflight.rs | 18 ++++++++++-------- task_manager/src/lib.rs | 15 +++++++++++++++ task_manager/src/mem_db.rs | 24 ++++++++++++------------ 4 files changed, 48 insertions(+), 27 deletions(-) diff --git a/core/src/lib.rs b/core/src/lib.rs index 26f3c63a5..7c81b461b 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -158,18 +158,22 @@ fn check_eq(expected: &T, actual: &T, let _ = black_box(require_eq(expected, actual, message)); } +fn require(expression: bool, message: &str) -> RaikoResult<()> { + if !expression { + let msg = format!("Assertion failed: {message}"); + error!("{msg}"); + return Err(anyhow::Error::msg(msg).into()); + } + Ok(()) +} + fn require_eq( expected: &T, actual: &T, message: &str, ) -> RaikoResult<()> { - if expected != actual { - let msg = - format!("Assertion failed: {message} - Expected: {expected:?}, Found: {actual:?}",); - error!("{}", msg); - return Err(anyhow::Error::msg(msg).into()); - } - Ok(()) + let msg = format!("{message} - Expected: {expected:?}, Found: {actual:?}"); + require(expected == actual, &msg) } /// Merges two json's together, overwriting `a` with the values of `b` diff --git a/core/src/preflight.rs b/core/src/preflight.rs index 36fbcc920..3ebefa042 100644 --- a/core/src/preflight.rs +++ b/core/src/preflight.rs @@ -1,6 +1,7 @@ use crate::{ interfaces::{RaikoError, RaikoResult}, provider::{db::ProviderDb, rpc::RpcBlockDataProvider, BlockDataProvider}, + require, require_eq, }; use alloy_consensus::TxEnvelope; pub use alloy_primitives::*; @@ -276,7 +277,7 @@ async fn prepare_taiko_chain_input( debug!("blob active"); // Get the blob hashes attached to the propose tx let blob_hashes = proposal_tx.blob_versioned_hashes.unwrap_or_default(); - ensure!(!blob_hashes.is_empty()); + require(!blob_hashes.is_empty(), "blob hashes are empty")?; // Currently the protocol enforces the first blob hash to be used let blob_hash = blob_hashes[0]; // Get the blob data for this block @@ -305,10 +306,10 @@ async fn prepare_taiko_chain_input( Some(anchor_tx.clone()), ); // Do a sanity check using the transactions returned by the node - ensure!( + require( transactions.len() >= block.transactions.len(), - "unexpected number of transactions" - ); + "unexpected number of transactions", + )?; // Create the input struct without the block data set Ok(TaikoGuestInput { @@ -521,10 +522,11 @@ fn get_transactions_from_block(block: &Block) -> RaikoResult> { }, _ => unreachable!("Block is too old, please connect to an archive node or use a block that is at most 128 blocks old."), }; - ensure!( - transactions.len() == block.transactions.len(), - "unexpected number of transactions" - ); + require_eq( + &transactions.len(), + &block.transactions.len(), + "unexpected number of transactions", + )?; } Ok(transactions) } diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index eadf99a58..6364b8bf0 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -20,6 +20,8 @@ pub enum TaskManagerError { IOError(IOErrorKind), #[error("SQL Error {0}")] SqlError(String), + #[error("Anyhow error: {0}")] + Anyhow(String), } pub type TaskManagerResult = Result; @@ -42,6 +44,12 @@ impl From for TaskManagerError { } } +impl From for TaskManagerError { + fn from(value: anyhow::Error) -> Self { + TaskManagerError::Anyhow(value.to_string()) + } +} + #[allow(non_camel_case_types)] #[rustfmt::skip] #[derive(PartialEq, Debug, Copy, Clone, IntoPrimitive, FromPrimitive, Serialize)] @@ -175,6 +183,13 @@ pub trait TaskManager { fn prune_db(&mut self) -> TaskManagerResult<()>; } +pub fn ensure(expression: bool, message: &str) -> TaskManagerResult<()> { + if !expression { + return Err(TaskManagerError::Anyhow(message.to_string())); + } + Ok(()) +} + use std::sync::{Arc, Mutex, Once}; // todo: use feature to switch between sqlite and memory db diff --git a/task_manager/src/mem_db.rs b/task_manager/src/mem_db.rs index a86528396..e869a129f 100644 --- a/task_manager/src/mem_db.rs +++ b/task_manager/src/mem_db.rs @@ -10,11 +10,10 @@ use std::collections::HashMap; use crate::{ - EnqueueTaskParams, TaskDescriptor, TaskManager, TaskManagerError, TaskManagerOpts, + ensure, EnqueueTaskParams, TaskDescriptor, TaskManager, TaskManagerError, TaskManagerOpts, TaskManagerResult, TaskProvingStatus, TaskProvingStatusRecords, TaskStatus, }; -use anyhow::ensure; use chrono::Utc; use raiko_core::interfaces::ProofType; use raiko_lib::primitives::{keccak::keccak, ChainId, B256}; @@ -74,7 +73,7 @@ impl InMemoryTaskDb { prover: Option, status: TaskStatus, proof: Option<&[u8]>, - ) { + ) -> TaskManagerResult<()> { let td_data: Vec = TaskDescriptor { chain_id, blockhash, @@ -83,7 +82,7 @@ impl InMemoryTaskDb { } .into(); let key = keccak(td_data).into(); - ensure!(self.enqueue_task.contains_key(&key)); + ensure(self.enqueue_task.contains_key(&key), "no task found")?; let task_proving_records = self.enqueue_task.get(&key).unwrap(); let task_status = task_proving_records.last().unwrap().0; @@ -99,6 +98,7 @@ impl InMemoryTaskDb { .collect(); self.enqueue_task.insert(key, new_records); } + Ok(()) } fn get_task_proving_status( @@ -107,7 +107,7 @@ impl InMemoryTaskDb { blockhash: B256, proof_system: ProofType, prover: Option, - ) -> Result { + ) -> TaskManagerResult { let key: B256 = keccak( TaskDescriptor { chain_id, @@ -128,8 +128,8 @@ impl InMemoryTaskDb { fn get_task_proving_status_by_id( &mut self, task_id: u64, - ) -> Result { - ensure!(self.task_id_desc.contains_key(&task_id)); + ) -> TaskManagerResult { + ensure(self.task_id_desc.contains_key(&task_id), "no task found")?; let key = self.task_id_desc.get(&task_id).unwrap(); let task_status = self.enqueue_task.get(key).unwrap(); Ok(task_status.clone()) @@ -141,7 +141,7 @@ impl InMemoryTaskDb { blockhash: B256, proof_system: ProofType, prover: Option, - ) -> Result, TaskManagerError> { + ) -> TaskManagerResult> { let key: B256 = keccak( TaskDescriptor { chain_id, @@ -152,7 +152,7 @@ impl InMemoryTaskDb { .to_vec(), ) .into(); - ensure!(self.enqueue_task.contains_key(&key)); + ensure(self.enqueue_task.contains_key(&key), "no task found")?; let proving_status_records = self.enqueue_task.get(&key).unwrap(); let task_status = proving_status_records.last().unwrap(); @@ -164,8 +164,8 @@ impl InMemoryTaskDb { } } - fn get_task_proof_by_id(&mut self, task_id: u64) -> Result, TaskManagerError> { - ensure!(self.task_id_desc.contains_key(&task_id)); + fn get_task_proof_by_id(&mut self, task_id: u64) -> TaskManagerResult> { + ensure(self.task_id_desc.contains_key(&task_id), "no task found")?; let key = self.task_id_desc.get(&task_id).unwrap(); let task_records = self.enqueue_task.get(key).unwrap(); let task_status = task_records.last().unwrap(); @@ -227,7 +227,7 @@ impl TaskManager for InMemoryTaskManager { proof: Option<&[u8]>, ) -> TaskManagerResult<()> { self.db - .update_task_progress(chain_id, blockhash, proof_system, prover, status, proof); + .update_task_progress(chain_id, blockhash, proof_system, prover, status, proof)?; Ok(()) } From 9e905ed2263097629ba5e5b0e4ac7f5864b35a20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Petar=20Vujovi=C4=87?= Date: Tue, 2 Jul 2024 11:00:31 +0200 Subject: [PATCH 38/44] feat(task_db): sqlite and in memory abstraction (#301) * enable sqlite db by feature Signed-off-by: smtmfft * debug lifetime Signed-off-by: smtmfft * resolve lifetime issue and make all tests pass Signed-off-by: smtmfft * refactor(task_db): simplify structure for sqlite and use cached statements * feat(task_db): abstract task db implementation into wrapper * fix(task_db): add await to test call * fix(task_db): fix import declaration * fix(task_db): add async and mutable variables * fix(host): fix task manager usage * fix(task_db): fix test for async * Update Cargo.toml use in-mem as default. --------- Signed-off-by: smtmfft Co-authored-by: smtmfft Co-authored-by: smtmfft <99081233+smtmfft@users.noreply.github.com> --- Cargo.lock | 2 + Cargo.toml | 1 + host/src/lib.rs | 93 +++--- host/src/server/api/v2/proof.rs | 47 +-- task_manager/Cargo.toml | 8 +- task_manager/src/adv_sqlite.rs | 557 ++++++++++++++++---------------- task_manager/src/lib.rs | 201 ++++++++++-- task_manager/src/mem_db.rs | 83 +++-- task_manager/tests/main.rs | 46 ++- 9 files changed, 606 insertions(+), 432 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2060853d6..05d6a2d82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5896,6 +5896,7 @@ version = "0.1.0" dependencies = [ "alloy-primitives", "anyhow", + "async-trait", "chrono", "hex", "num_enum 0.7.2", @@ -5908,6 +5909,7 @@ dependencies = [ "serde_json", "tempfile", "thiserror", + "tokio", "tracing", ] diff --git a/Cargo.toml b/Cargo.toml index 0a52a5011..ba69468ab 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -134,6 +134,7 @@ tokio = { version = "^1.23", features = ["full"] } hyper = { version = "0.14.27", features = ["server"] } reqwest = { version = "0.11.22", features = ["json"] } url = "2.5.0" +async-trait = "0.1.80" # crypto c-kzg = { git = "https://github.com/brechtpd/c-kzg-4844", branch = "for-alpha7", default-features = false, features = [ diff --git a/host/src/lib.rs b/host/src/lib.rs index ac94ee2d9..bc3672c76 100644 --- a/host/src/lib.rs +++ b/host/src/lib.rs @@ -180,16 +180,17 @@ impl ProverState { }; let proof_result: HostResult = async move { { - let manager_binding = get_task_manager(task_manager_opts); - let mut manager = manager_binding.lock().unwrap(); - manager.update_task_progress( - chain_id, - blockhash, - proof_request.proof_type, - Some(proof_request.prover.to_string()), - TaskStatus::WorkInProgress, - None, - )?; + let mut manager = get_task_manager(task_manager_opts); + manager + .update_task_progress( + chain_id, + blockhash, + proof_request.proof_type, + Some(proof_request.prover.to_string()), + TaskStatus::WorkInProgress, + None, + ) + .await?; } handle_proof(&proof_request_clone, &opts_clone, &chain_specs_clone).await } @@ -199,47 +200,49 @@ impl ProverState { let _: HostResult<()> = async move { let proof = proof.proof.unwrap(); let proof = proof.as_bytes(); - let manager_binding = get_task_manager(task_manager_opts); - let mut manager = manager_binding.lock().unwrap(); - manager.update_task_progress( - chain_id, - blockhash, - proof_request.proof_type, - Some(proof_request.prover.to_string()), - TaskStatus::WorkInProgress, - Some(proof), - )?; + let mut manager = get_task_manager(task_manager_opts); + manager + .update_task_progress( + chain_id, + blockhash, + proof_request.proof_type, + Some(proof_request.prover.to_string()), + TaskStatus::WorkInProgress, + Some(proof), + ) + .await?; Ok(()) } .await; } Err(error) => { let _: HostResult<()> = async move { - let manager_binding = get_task_manager(task_manager_opts); - let mut manager = manager_binding.lock().unwrap(); - manager.update_task_progress( - chain_id, - blockhash, - proof_request.proof_type, - Some(proof_request.prover.to_string()), - match error { - HostError::HandleDropped - | HostError::CapacityFull - | HostError::JoinHandle(_) - | HostError::InvalidAddress(_) - | HostError::InvalidRequestConfig(_) => unreachable!(), - HostError::Conversion(_) - | HostError::Serde(_) - | HostError::Core(_) - | HostError::Anyhow(_) - | HostError::FeatureNotSupportedError(_) - | HostError::Io(_) => TaskStatus::UnspecifiedFailureReason, - HostError::RPC(_) => TaskStatus::NetworkFailure, - HostError::Guest(_) => TaskStatus::ProofFailure_Generic, - HostError::TaskManager(_) => TaskStatus::SqlDbCorruption, - }, - None, - )?; + let mut manager = get_task_manager(task_manager_opts); + manager + .update_task_progress( + chain_id, + blockhash, + proof_request.proof_type, + Some(proof_request.prover.to_string()), + match error { + HostError::HandleDropped + | HostError::CapacityFull + | HostError::JoinHandle(_) + | HostError::InvalidAddress(_) + | HostError::InvalidRequestConfig(_) => unreachable!(), + HostError::Conversion(_) + | HostError::Serde(_) + | HostError::Core(_) + | HostError::Anyhow(_) + | HostError::FeatureNotSupportedError(_) + | HostError::Io(_) => TaskStatus::UnspecifiedFailureReason, + HostError::RPC(_) => TaskStatus::NetworkFailure, + HostError::Guest(_) => TaskStatus::ProofFailure_Generic, + HostError::TaskManager(_) => TaskStatus::SqlDbCorruption, + }, + None, + ) + .await?; Ok(()) } .await; diff --git a/host/src/server/api/v2/proof.rs b/host/src/server/api/v2/proof.rs index bd864c438..2fbe16a73 100644 --- a/host/src/server/api/v2/proof.rs +++ b/host/src/server/api/v2/proof.rs @@ -58,17 +58,18 @@ async fn proof_handler( ) .await?; - let manager_binding = get_task_manager(&TaskManagerOpts { + let mut manager = get_task_manager(&TaskManagerOpts { sqlite_file: prover_state.opts.sqlite_file.clone(), max_db_size: prover_state.opts.max_db_size, }); - let mut manager = manager_binding.lock().unwrap(); - let status = manager.get_task_proving_status( - chain_id, - block_hash, - proof_request.proof_type, - Some(proof_request.prover.to_string()), - )?; + let status = manager + .get_task_proving_status( + chain_id, + block_hash, + proof_request.proof_type, + Some(proof_request.prover.to_string()), + ) + .await?; if status.is_empty() { prover_state.task_channel.try_send(( @@ -77,25 +78,29 @@ async fn proof_handler( prover_state.chain_specs, ))?; - manager.enqueue_task(&EnqueueTaskParams { - chain_id, - blockhash: block_hash, - proof_system: proof_request.proof_type, - prover: proof_request.prover.to_string(), - block_number: proof_request.block_number, - })?; + manager + .enqueue_task(&EnqueueTaskParams { + chain_id, + blockhash: block_hash, + proof_type: proof_request.proof_type, + prover: proof_request.prover.to_string(), + block_number: proof_request.block_number, + }) + .await?; return Ok(Json(serde_json::json!("{}"))); } let status = status.first().unwrap().0; if matches!(status, TaskStatus::Success) { - let proof = manager.get_task_proof( - chain_id, - block_hash, - proof_request.proof_type, - Some(proof_request.prover.to_string()), - )?; + let proof = manager + .get_task_proof( + chain_id, + block_hash, + proof_request.proof_type, + Some(proof_request.prover.to_string()), + ) + .await?; let response = ProofResponse { proof: Some(String::from_utf8(proof).unwrap()), diff --git a/task_manager/Cargo.toml b/task_manager/Cargo.toml index 8b39a07a1..ec888c35e 100644 --- a/task_manager/Cargo.toml +++ b/task_manager/Cargo.toml @@ -16,15 +16,21 @@ serde_json = { workspace = true } hex = { workspace = true } tracing = { workspace = true } anyhow = { workspace = true } +tokio = { workspace = true } +async-trait = { workspace = true } [dev-dependencies] rand = "0.9.0-alpha.1" # This is an alpha version, that has rng.gen_iter::() rand_chacha = "0.9.0-alpha.1" tempfile = "3.10.1" alloy-primitives = { workspace = true, features = ["getrandom"] } - rusqlite = { workspace = true, features = ["trace"] } +[features] +default = ["in-memory"] +sqlite = [] +in-memory = [] + [[test]] name = "task_manager_tests" path = "tests/main.rs" diff --git a/task_manager/src/adv_sqlite.rs b/task_manager/src/adv_sqlite.rs index 672c7cec9..25cd215a4 100644 --- a/task_manager/src/adv_sqlite.rs +++ b/task_manager/src/adv_sqlite.rs @@ -154,113 +154,33 @@ // ---------------------------------------------------------------- use std::{ fs::File, - io::{Error as IOError, ErrorKind as IOErrorKind}, path::Path, + sync::{Arc, Once}, }; use chrono::{DateTime, Utc}; -use num_enum::{FromPrimitive, IntoPrimitive}; -use raiko_core::interfaces::{ProofRequest, ProofType}; +use raiko_core::interfaces::ProofType; use raiko_lib::primitives::{ChainId, B256}; use rusqlite::{ - Error as SqlError, {named_params, Statement}, {Connection, OpenFlags}, + named_params, {Connection, OpenFlags}, +}; +use tokio::sync::Mutex; + +use crate::{ + EnqueueTaskParams, TaskManager, TaskManagerError, TaskManagerOpts, TaskManagerResult, + TaskProvingStatus, TaskProvingStatusRecords, TaskStatus, }; -use serde::Serialize; // Types // ---------------------------------------------------------------- -#[derive(Debug, thiserror::Error)] -pub enum TaskManagerError { - #[error("IO Error {0}")] - IOError(IOErrorKind), - #[error("SQL Error {0}")] - SqlError(String), - #[error("Serde Error {0}")] - SerdeError(#[from] serde_json::error::Error), -} - -pub type TaskManagerResult = Result; - -impl From for TaskManagerError { - fn from(error: IOError) -> TaskManagerError { - TaskManagerError::IOError(error.kind()) - } -} - -impl From for TaskManagerError { - fn from(error: SqlError) -> TaskManagerError { - TaskManagerError::SqlError(error.to_string()) - } -} - #[derive(Debug)] pub struct TaskDb { conn: Connection, } -#[derive(Debug)] -pub struct TaskManager<'db> { - enqueue_task: Statement<'db>, - update_task_progress: Statement<'db>, - get_task_proof: Statement<'db>, - get_task_proof_by_id: Statement<'db>, - get_task_proving_status: Statement<'db>, - get_task_proving_status_by_id: Statement<'db>, - #[allow(dead_code)] - get_tasks_unfinished: Statement<'db>, - get_db_size: Statement<'db>, -} - -#[derive(Debug, Copy, Clone)] -pub enum TaskProofsys { - Native = 0, - Risc0 = 1, - SP1 = 2, - SGX = 3, -} - -impl From for TaskProofsys { - fn from(value: ProofType) -> Self { - match value { - ProofType::Sp1 => Self::SP1, - ProofType::Sgx => Self::SGX, - ProofType::Risc0 => Self::Risc0, - ProofType::Native => Self::Native, - } - } -} - -impl From for ProofType { - fn from(val: TaskProofsys) -> Self { - match val { - TaskProofsys::Native => ProofType::Native, - TaskProofsys::Risc0 => ProofType::Risc0, - TaskProofsys::SP1 => ProofType::Sp1, - TaskProofsys::SGX => ProofType::Sgx, - } - } -} - -#[allow(non_camel_case_types)] -#[rustfmt::skip] -#[derive(PartialEq, Debug, Copy, Clone, IntoPrimitive, FromPrimitive, Serialize)] -#[repr(i32)] -pub enum TaskStatus { - Success = 0, - Registered = 1000, - WorkInProgress = 2000, - ProofFailure_Generic = -1000, - ProofFailure_OutOfMemory = -1100, - NetworkFailure = -2000, - Cancelled = -3000, - Cancelled_NeverStarted = -3100, - Cancelled_Aborted = -3200, - CancellationInProgress = -3210, - InvalidOrUnsupportedBlock = -4000, - UnspecifiedFailureReason = -9999, - #[num_enum(default)] - SqlDbCorruption = -99999, +pub struct SqliteTaskManager { + arc_task_db: Arc>, } // Implementation @@ -370,7 +290,7 @@ impl TaskDb { chain_id INTEGER NOT NULL, blockhash BLOB NOT NULL, proofsys_id INTEGER NOT NULL, - request BLOB, + prover TEXT NOT NULL, FOREIGN KEY(proofsys_id) REFERENCES proofsys(id), UNIQUE (chain_id, blockhash, proofsys_id) ); @@ -406,7 +326,7 @@ impl TaskDb { t.chain_id, t.blockhash, t.proofsys_id, - t.request + t.prover FROM tasks t LEFT JOIN task_status ts on ts.task_id = t.id; @@ -417,6 +337,7 @@ impl TaskDb { t.chain_id, t.blockhash, t.proofsys_id, + t.prover, ts.status_id, tpf.proof FROM @@ -433,11 +354,12 @@ impl TaskDb { /// for example: /// db.set_tracer(Some(|stmt| println!("sqlite:\n-------\n{}\n=======", stmt))); #[cfg(test)] + #[allow(dead_code)] pub fn set_tracer(&mut self, trace_fn: Option) { self.conn.trace(trace_fn); } - pub fn manage(&self) -> TaskManagerResult> { + pub fn manage(&self) -> TaskManagerResult<()> { // To update all the tables with the task_id assigned by Sqlite // we require row IDs for the tasks table // and we use last_insert_rowid() which is not reentrant and need a transaction lock @@ -468,25 +390,23 @@ impl TaskDb { // otherwise they can't access the temporary table: // 6. https://sqlite.org/forum/info/4f998eeec510bceee69404541e5c9ca0a301868d59ec7c3486ecb8084309bba1 // "Triggers in any schema other than temp may only access objects in their own schema. However, triggers in temp may access any object by name, even cross-schema." - - let conn = &self.conn; - conn.execute_batch( + self.conn.execute_batch( r#" -- PRAGMA temp_store = 'MEMORY'; - CREATE TEMPORARY TABLE temp.current_task(task_id INTEGER); + CREATE TEMPORARY TABLE IF NOT EXISTS temp.current_task(task_id INTEGER); - CREATE TEMPORARY TRIGGER enqueue_task_insert_trigger INSTEAD OF + CREATE TEMPORARY TRIGGER IF NOT EXISTS enqueue_task_insert_trigger INSTEAD OF INSERT ON enqueue_task BEGIN INSERT INTO - tasks(chain_id, blockhash, proofsys_id, request) + tasks(chain_id, blockhash, proofsys_id, prover) VALUES ( new.chain_id, new.blockhash, new.proofsys_id, - new.request + new.prover ); INSERT INTO @@ -514,7 +434,7 @@ impl TaskDb { current_task; END; - CREATE TEMPORARY TRIGGER update_task_progress_trigger INSTEAD OF + CREATE TEMPORARY TRIGGER IF NOT EXISTS update_task_progress_trigger INSTEAD OF INSERT ON update_task_progress BEGIN @@ -560,33 +480,69 @@ impl TaskDb { "#, )?; - let enqueue_task = conn.prepare( - " + Ok(()) + } + + pub fn enqueue_task( + &self, + EnqueueTaskParams { + chain_id, + blockhash, + proof_type, + prover, + .. + }: &EnqueueTaskParams, + ) -> TaskManagerResult> { + let mut statement = self.conn.prepare_cached( + r#" INSERT INTO enqueue_task( chain_id, blockhash, proofsys_id, - request + prover ) VALUES ( :chain_id, :blockhash, :proofsys_id, - :request + :prover ); - ", + "#, )?; + statement.execute(named_params! { + ":chain_id": chain_id, + ":blockhash": blockhash.to_vec(), + ":proofsys_id": *proof_type as u8, + ":prover": prover, + })?; - let update_task_progress = conn.prepare( - " + Ok(vec![TaskProvingStatus( + TaskStatus::Registered, + Some(prover.clone()), + Utc::now(), + )]) + } + + pub fn update_task_progress( + &self, + chain_id: ChainId, + blockhash: B256, + proof_type: ProofType, + prover: Option, + status: TaskStatus, + proof: Option<&[u8]>, + ) -> TaskManagerResult<()> { + let mut statement = self.conn.prepare_cached( + r#" INSERT INTO update_task_progress( chain_id, blockhash, proofsys_id, status_id, + prover, proof ) VALUES @@ -595,32 +551,111 @@ impl TaskDb { :blockhash, :proofsys_id, :status_id, + :prover, :proof ); - ", + "#, )?; + statement.execute(named_params! { + ":chain_id": chain_id, + ":blockhash": blockhash.to_vec(), + ":proofsys_id": proof_type as u8, + ":status_id": status as i32, + ":prover": prover.unwrap_or_default(), + ":proof": proof + })?; - // The requires sqlite to be compiled with dbstat support: - // https://www.sqlite.org/dbstat.html - // which is the case for rusqlite - // https://github.com/rusqlite/rusqlite/blob/v0.31.0/libsqlite3-sys/build.rs#L126 - // but may not be the case for system-wide sqlite when debugging. - let get_db_size = conn.prepare( - " + Ok(()) + } + + pub fn get_task_proving_status( + &self, + chain_id: ChainId, + blockhash: B256, + proof_type: ProofType, + prover: Option, + ) -> TaskManagerResult { + let mut statement = self.conn.prepare_cached( + r#" SELECT - name as table_name, - SUM(pgsize) as table_size + ts.status_id, + t.prover, + timestamp FROM - dbstat - GROUP BY - table_name + task_status ts + LEFT JOIN tasks t ON ts.task_id = t.id + WHERE + t.chain_id = :chain_id + AND t.blockhash = :blockhash + AND t.proofsys_id = :proofsys_id + AND t.prover = :prover ORDER BY - SUM(pgsize) DESC; - ", + ts.timestamp; + "#, + )?; + let query = statement.query_map( + named_params! { + ":chain_id": chain_id, + ":blockhash": blockhash.to_vec(), + ":proofsys_id": proof_type as u8, + ":prover": prover.unwrap_or_default(), + }, + |row| { + Ok(TaskProvingStatus( + TaskStatus::from(row.get::<_, i32>(0)?), + Some(row.get::<_, String>(1)?), + row.get::<_, DateTime>(2)?, + )) + }, )?; - let get_task_proof = conn.prepare( - " + Ok(query.collect::, _>>()?) + } + + pub fn get_task_proving_status_by_id( + &self, + task_id: u64, + ) -> TaskManagerResult { + let mut statement = self.conn.prepare_cached( + r#" + SELECT + ts.status_id, + t.prover, + timestamp + FROM + task_status ts + LEFT JOIN tasks t ON ts.task_id = t.id + WHERE + t.id = :task_id + ORDER BY + ts.timestamp; + "#, + )?; + let query = statement.query_map( + named_params! { + ":task_id": task_id, + }, + |row| { + Ok(TaskProvingStatus( + TaskStatus::from(row.get::<_, i32>(0)?), + Some(row.get::<_, String>(1)?), + row.get::<_, DateTime>(2)?, + )) + }, + )?; + + Ok(query.collect::, _>>()?) + } + + pub fn get_task_proof( + &self, + chain_id: ChainId, + blockhash: B256, + proof_type: ProofType, + prover: Option, + ) -> TaskManagerResult> { + let mut statement = self.conn.prepare_cached( + r#" SELECT proof FROM @@ -628,228 +663,176 @@ impl TaskDb { LEFT JOIN tasks t ON tp.task_id = t.id WHERE t.chain_id = :chain_id + AND t.prover = :prover AND t.blockhash = :blockhash AND t.proofsys_id = :proofsys_id LIMIT 1; - ", + "#, + )?; + let query = statement.query_row( + named_params! { + ":chain_id": chain_id, + ":blockhash": blockhash.to_vec(), + ":proofsys_id": proof_type as u8, + ":prover": prover.unwrap_or_default(), + }, + |row| row.get(0), )?; - let get_task_proof_by_id = conn.prepare( - " + Ok(query) + } + + pub fn get_task_proof_by_id(&self, task_id: u64) -> TaskManagerResult> { + let mut statement = self.conn.prepare_cached( + r#" SELECT proof FROM task_proofs tp LEFT JOIN tasks t ON tp.task_id = t.id WHERE - t.id= :task_id + t.id = :task_id LIMIT 1; - ", + "#, )?; - - let get_task_proving_status = conn.prepare( - " - SELECT - ts.status_id, - timestamp - FROM - task_status ts - LEFT JOIN tasks t ON ts.task_id = t.id - WHERE - t.chain_id = :chain_id - AND t.blockhash = :blockhash - AND t.proofsys_id = :proofsys_id - ORDER BY - ts.timestamp; - ", + let query = statement.query_row( + named_params! { + ":task_id": task_id, + }, + |row| row.get(0), )?; - let get_task_proving_status_by_id = conn.prepare( - " + Ok(query) + } + + pub fn get_db_size(&self) -> TaskManagerResult<(usize, Vec<(String, usize)>)> { + let mut statement = self.conn.prepare_cached( + r#" SELECT - ts.status_id, - timestamp + name as table_name, + SUM(pgsize) as table_size FROM - task_status ts - LEFT JOIN tasks t ON ts.task_id = t.id - WHERE - t.id = :task_id + dbstat + GROUP BY + table_name ORDER BY - ts.timestamp; - ", + SUM(pgsize) DESC; + "#, )?; + let query = statement.query_map([], |row| Ok((row.get(0)?, row.get(1)?)))?; + let details = query.collect::, _>>()?; + let total = details.iter().fold(0, |acc, (_, size)| acc + size); - let get_tasks_unfinished = conn.prepare( - " - SELECT - t.chain_id, - t.blockhash, - t.proofsys_id, - ts.status_id, - timestamp - FROM - task_status ts - LEFT JOIN tasks t ON ts.task_id = t.id - WHERE - status_id NOT IN ( - 0, -- Success - -3000, -- Cancelled - -3100, -- Cancelled (never started) - -3200 -- Cancelled (aborted) - -- What do we do with -4000 Invalid/unsupported blocks? - -- And -9999 Unspecified failure reason? - -- For now we return them until we know more of the failure modes - ); - ", + Ok((total, details)) + } + + pub fn prune_db(&self) -> TaskManagerResult<()> { + let mut statement = self.conn.prepare_cached( + r#" + DELETE FROM + tasks; + + DELETE FROM + task_proofs; + + DELETE FROM + task_status; + "#, )?; + statement.execute([])?; - Ok(TaskManager { - enqueue_task, - update_task_progress, - get_task_proof, - get_task_proof_by_id, - get_task_proving_status, - get_task_proving_status_by_id, - get_tasks_unfinished, - get_db_size, - }) + Ok(()) } } -#[derive(Debug, Clone)] -pub struct EnqueueTaskParams { - pub chain_id: ChainId, - pub blockhash: B256, - pub proof_system: TaskProofsys, - pub submitter: String, - pub parent_hash: B256, - pub state_root: B256, - pub num_transactions: u64, - pub gas_used: u64, - pub payload: Vec, -} - -pub type TaskProvingStatus = Vec<(TaskStatus, DateTime)>; +#[async_trait::async_trait] +impl TaskManager for SqliteTaskManager { + fn new(opts: &TaskManagerOpts) -> Self { + static INIT: Once = Once::new(); + static mut CONN: Option>> = None; + INIT.call_once(|| { + unsafe { + CONN = Some(Arc::new(Mutex::new({ + let db = TaskDb::open_or_create(&opts.sqlite_file).unwrap(); + db.manage().unwrap(); + db + }))) + }; + }); + Self { + arc_task_db: unsafe { CONN.clone().unwrap() }, + } + } -impl<'db> TaskManager<'db> { - pub fn enqueue_task( + async fn enqueue_task( &mut self, - chain_id: u64, - blockhash: B256, - request: &ProofRequest, - ) -> TaskManagerResult<()> { - self.enqueue_task.execute(named_params! { - ":chain_id": chain_id, - ":blockhash": blockhash.to_vec(), - ":proofsys_id": TaskProofsys::from(request.proof_type) as u8, - ":request": serde_json::to_vec(&request)?, - })?; - - Ok(()) + params: &EnqueueTaskParams, + ) -> Result, TaskManagerError> { + let task_db = self.arc_task_db.lock().await; + task_db.enqueue_task(params) } - pub fn update_task_progress( + async fn update_task_progress( &mut self, chain_id: ChainId, blockhash: B256, proof_type: ProofType, + prover: Option, status: TaskStatus, proof: Option<&[u8]>, ) -> TaskManagerResult<()> { - self.update_task_progress.execute(named_params! { - ":chain_id": chain_id, - ":blockhash": blockhash.to_vec(), - ":proofsys_id": TaskProofsys::from(proof_type) as u8, - ":status_id": status as i32, - ":proof": proof - })?; - Ok(()) + let task_db = self.arc_task_db.lock().await; + task_db.update_task_progress(chain_id, blockhash, proof_type, prover, status, proof) } /// Returns the latest triplet (submitter or fulfiller, status, last update time) - pub fn get_task_proving_status( + async fn get_task_proving_status( &mut self, chain_id: ChainId, blockhash: B256, proof_type: ProofType, - ) -> TaskManagerResult { - let rows = self.get_task_proving_status.query_map( - named_params! { - ":chain_id": chain_id, - ":blockhash": blockhash.to_vec(), - ":proofsys_id": TaskProofsys::from(proof_type) as u8, - }, - |row| { - Ok(( - TaskStatus::from(row.get::<_, i32>(0)?), - row.get::<_, DateTime>(1)?, - )) - }, - )?; - - Ok(rows.collect::, _>>()?) + prover: Option, + ) -> TaskManagerResult { + let task_db = self.arc_task_db.lock().await; + task_db.get_task_proving_status(chain_id, blockhash, proof_type, prover) } /// Returns the latest triplet (submitter or fulfiller, status, last update time) - pub fn get_task_proving_status_by_id( + async fn get_task_proving_status_by_id( &mut self, task_id: u64, - ) -> TaskManagerResult { - let rows = self.get_task_proving_status_by_id.query_map( - named_params! { - ":task_id": task_id, - }, - |row| { - Ok(( - TaskStatus::from(row.get::<_, i32>(0)?), - row.get::<_, DateTime>(1)?, - )) - }, - )?; - let proving_status = rows.collect::, _>>()?; - - Ok(proving_status) + ) -> TaskManagerResult { + let task_db = self.arc_task_db.lock().await; + task_db.get_task_proving_status_by_id(task_id) } - pub fn get_task_proof( + async fn get_task_proof( &mut self, chain_id: ChainId, blockhash: B256, proof_type: ProofType, + prover: Option, ) -> TaskManagerResult> { - let proof = self.get_task_proof.query_row( - named_params! { - ":chain_id": chain_id, - ":blockhash": blockhash.to_vec(), - ":proofsys_id": TaskProofsys::from(proof_type) as u8, - }, - |r| r.get(0), - )?; - - Ok(proof) + let task_db = self.arc_task_db.lock().await; + task_db.get_task_proof(chain_id, blockhash, proof_type, prover) } - pub fn get_task_proof_by_id(&mut self, task_id: u64) -> TaskManagerResult> { - let proof = self.get_task_proof_by_id.query_row( - named_params! { - ":task_id": task_id, - }, - |r| r.get(0), - )?; - - Ok(proof) + async fn get_task_proof_by_id(&mut self, task_id: u64) -> TaskManagerResult> { + let task_db = self.arc_task_db.lock().await; + task_db.get_task_proof_by_id(task_id) } /// Returns the total and detailed database size - pub fn get_db_size(&mut self) -> TaskManagerResult<(usize, Vec<(String, usize)>)> { - let rows = self - .get_db_size - .query_map([], |row| Ok((row.get(0)?, row.get(1)?)))?; - let details = rows.collect::, _>>()?; - let total = details.iter().fold(0, |acc, item| acc + item.1); - Ok((total, details)) + async fn get_db_size(&mut self) -> TaskManagerResult<(usize, Vec<(String, usize)>)> { + let task_db = self.arc_task_db.lock().await; + task_db.get_db_size() + } + + async fn prune_db(&mut self) -> TaskManagerResult<()> { + let task_db = self.arc_task_db.lock().await; + task_db.prune_db() } } @@ -875,6 +858,7 @@ mod tests { let _db = TaskDb::create(&file).unwrap(); assert!(TaskDb::open(&file).is_err()); + std::fs::remove_file(&file).unwrap(); } #[test] @@ -884,5 +868,6 @@ mod tests { let _db = TaskDb::create(&file).unwrap(); assert!(TaskDb::create(&file).is_err()); + std::fs::remove_file(&file).unwrap(); } } diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index 6364b8bf0..5f3cb8adb 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -1,15 +1,18 @@ -use std::io::{Error as IOError, ErrorKind as IOErrorKind}; -use std::path::PathBuf; +use std::{ + io::{Error as IOError, ErrorKind as IOErrorKind}, + path::PathBuf, +}; use chrono::{DateTime, Utc}; -use mem_db::InMemoryTaskManager; use num_enum::{FromPrimitive, IntoPrimitive}; use raiko_core::interfaces::ProofType; use raiko_lib::primitives::{ChainId, B256}; use rusqlite::Error as SqlError; use serde::Serialize; -// mod adv_sqlite; +use crate::{adv_sqlite::SqliteTaskManager, mem_db::InMemoryTaskManager}; + +mod adv_sqlite; mod mem_db; // Types @@ -58,7 +61,6 @@ pub enum TaskStatus { Success = 0, Registered = 1000, WorkInProgress = 2000, - WorkReported = 3000, ProofFailure_Generic = -1000, ProofFailure_OutOfMemory = -1100, NetworkFailure = -2000, @@ -76,7 +78,7 @@ pub enum TaskStatus { pub struct EnqueueTaskParams { pub chain_id: ChainId, pub blockhash: B256, - pub proof_system: ProofType, + pub proof_type: ProofType, pub prover: String, pub block_number: u64, } @@ -112,7 +114,7 @@ impl From<&EnqueueTaskParams> for TaskDescriptor { TaskDescriptor { chain_id: params.chain_id, blockhash: params.blockhash, - proof_system: params.proof_system, + proof_system: params.proof_type, prover: params.prover.clone(), } } @@ -129,18 +131,19 @@ pub struct TaskManagerOpts { pub max_db_size: usize, } +#[async_trait::async_trait] pub trait TaskManager { /// new a task manager fn new(opts: &TaskManagerOpts) -> Self; /// enqueue_task - fn enqueue_task( + async fn enqueue_task( &mut self, request: &EnqueueTaskParams, ) -> TaskManagerResult; /// Update the task progress - fn update_task_progress( + async fn update_task_progress( &mut self, chain_id: ChainId, blockhash: B256, @@ -151,7 +154,7 @@ pub trait TaskManager { ) -> TaskManagerResult<()>; /// Returns the latest triplet (submitter or fulfiller, status, last update time) - fn get_task_proving_status( + async fn get_task_proving_status( &mut self, chain_id: ChainId, blockhash: B256, @@ -160,13 +163,13 @@ pub trait TaskManager { ) -> TaskManagerResult; /// Returns the latest triplet (submitter or fulfiller, status, last update time) - fn get_task_proving_status_by_id( + async fn get_task_proving_status_by_id( &mut self, task_id: u64, ) -> TaskManagerResult; /// Returns the proof for the given task - fn get_task_proof( + async fn get_task_proof( &mut self, chain_id: ChainId, blockhash: B256, @@ -174,13 +177,13 @@ pub trait TaskManager { prover: Option, ) -> TaskManagerResult>; - fn get_task_proof_by_id(&mut self, task_id: u64) -> TaskManagerResult>; + async fn get_task_proof_by_id(&mut self, task_id: u64) -> TaskManagerResult>; /// Returns the total and detailed database size - fn get_db_size(&mut self) -> TaskManagerResult<(usize, Vec<(String, usize)>)>; + async fn get_db_size(&mut self) -> TaskManagerResult<(usize, Vec<(String, usize)>)>; /// Prune old tasks - fn prune_db(&mut self) -> TaskManagerResult<()>; + async fn prune_db(&mut self) -> TaskManagerResult<()>; } pub fn ensure(expression: bool, message: &str) -> TaskManagerResult<()> { @@ -190,47 +193,175 @@ pub fn ensure(expression: bool, message: &str) -> TaskManagerResult<()> { Ok(()) } -use std::sync::{Arc, Mutex, Once}; +enum TaskManagerInstance { + InMemory(InMemoryTaskManager), + Sqlite(SqliteTaskManager), +} + +pub struct TaskManagerWrapper { + manager: TaskManagerInstance, +} + +#[async_trait::async_trait] +impl TaskManager for TaskManagerWrapper { + fn new(opts: &TaskManagerOpts) -> Self { + let manager = if cfg!(feature = "sqlite") { + TaskManagerInstance::Sqlite(SqliteTaskManager::new(opts)) + } else { + TaskManagerInstance::InMemory(InMemoryTaskManager::new(opts)) + }; -// todo: use feature to switch between sqlite and memory db -pub fn get_task_manager(opts: &TaskManagerOpts) -> Arc> { - static INIT: Once = Once::new(); - static mut SHARED_TASK_MANAGER: Option>> = None; + Self { manager } + } - INIT.call_once(|| { - let task_manager: Arc> = - Arc::new(Mutex::new(InMemoryTaskManager::new(opts))); - unsafe { - SHARED_TASK_MANAGER = Some(Arc::clone(&task_manager)); + async fn enqueue_task( + &mut self, + request: &EnqueueTaskParams, + ) -> TaskManagerResult { + match &mut self.manager { + TaskManagerInstance::InMemory(ref mut manager) => manager.enqueue_task(request).await, + TaskManagerInstance::Sqlite(ref mut manager) => manager.enqueue_task(request).await, } - }); + } - unsafe { SHARED_TASK_MANAGER.as_ref().unwrap().clone() } + async fn update_task_progress( + &mut self, + chain_id: ChainId, + blockhash: B256, + proof_system: ProofType, + prover: Option, + status: TaskStatus, + proof: Option<&[u8]>, + ) -> TaskManagerResult<()> { + match &mut self.manager { + TaskManagerInstance::InMemory(ref mut manager) => { + manager + .update_task_progress(chain_id, blockhash, proof_system, prover, status, proof) + .await + } + TaskManagerInstance::Sqlite(ref mut manager) => { + manager + .update_task_progress(chain_id, blockhash, proof_system, prover, status, proof) + .await + } + } + } + + async fn get_task_proving_status( + &mut self, + chain_id: ChainId, + blockhash: B256, + proof_system: ProofType, + prover: Option, + ) -> TaskManagerResult { + match &mut self.manager { + TaskManagerInstance::InMemory(ref mut manager) => { + manager + .get_task_proving_status(chain_id, blockhash, proof_system, prover) + .await + } + TaskManagerInstance::Sqlite(ref mut manager) => { + manager + .get_task_proving_status(chain_id, blockhash, proof_system, prover) + .await + } + } + } + + async fn get_task_proving_status_by_id( + &mut self, + task_id: u64, + ) -> TaskManagerResult { + match &mut self.manager { + TaskManagerInstance::InMemory(ref mut manager) => { + manager.get_task_proving_status_by_id(task_id).await + } + TaskManagerInstance::Sqlite(ref mut manager) => { + manager.get_task_proving_status_by_id(task_id).await + } + } + } + + async fn get_task_proof( + &mut self, + chain_id: ChainId, + blockhash: B256, + proof_system: ProofType, + prover: Option, + ) -> TaskManagerResult> { + match &mut self.manager { + TaskManagerInstance::InMemory(ref mut manager) => { + manager + .get_task_proof(chain_id, blockhash, proof_system, prover) + .await + } + TaskManagerInstance::Sqlite(ref mut manager) => { + manager + .get_task_proof(chain_id, blockhash, proof_system, prover) + .await + } + } + } + + async fn get_task_proof_by_id(&mut self, task_id: u64) -> TaskManagerResult> { + match &mut self.manager { + TaskManagerInstance::InMemory(ref mut manager) => { + manager.get_task_proof_by_id(task_id).await + } + TaskManagerInstance::Sqlite(ref mut manager) => { + manager.get_task_proof_by_id(task_id).await + } + } + } + + async fn get_db_size(&mut self) -> TaskManagerResult<(usize, Vec<(String, usize)>)> { + match &mut self.manager { + TaskManagerInstance::InMemory(ref mut manager) => manager.get_db_size().await, + TaskManagerInstance::Sqlite(ref mut manager) => manager.get_db_size().await, + } + } + + async fn prune_db(&mut self) -> TaskManagerResult<()> { + match &mut self.manager { + TaskManagerInstance::InMemory(ref mut manager) => manager.prune_db().await, + TaskManagerInstance::Sqlite(ref mut manager) => manager.prune_db().await, + } + } +} + +pub fn get_task_manager(opts: &TaskManagerOpts) -> TaskManagerWrapper { + TaskManagerWrapper::new(opts) } #[cfg(test)] mod test { use super::*; + use std::path::Path; + + #[tokio::test] + async fn test_new_taskmanager() { + let sqlite_file: &Path = Path::new("test.db"); + // remove existed one + if sqlite_file.exists() { + std::fs::remove_file(sqlite_file).unwrap(); + } - #[test] - fn test_new_taskmanager() { let opts = TaskManagerOpts { - sqlite_file: "test.db".to_string().into(), - max_db_size: 1024, + sqlite_file: sqlite_file.to_path_buf(), + max_db_size: 1024 * 1024, }; - let binding = get_task_manager(&opts); - let mut task_manager = binding.lock().unwrap(); - assert_eq!(task_manager.get_db_size().unwrap().0, 0); + let mut task_manager = get_task_manager(&opts); assert_eq!( task_manager .enqueue_task(&EnqueueTaskParams { chain_id: 1, blockhash: B256::default(), - proof_system: ProofType::Native, + proof_type: ProofType::Native, prover: "test".to_string(), block_number: 1 }) + .await .unwrap() .len(), 1 diff --git a/task_manager/src/mem_db.rs b/task_manager/src/mem_db.rs index e869a129f..4b3287287 100644 --- a/task_manager/src/mem_db.rs +++ b/task_manager/src/mem_db.rs @@ -7,7 +7,10 @@ // Imports // ---------------------------------------------------------------- -use std::collections::HashMap; +use std::{ + collections::HashMap, + sync::{Arc, Once}, +}; use crate::{ ensure, EnqueueTaskParams, TaskDescriptor, TaskManager, TaskManagerError, TaskManagerOpts, @@ -17,11 +20,12 @@ use crate::{ use chrono::Utc; use raiko_core::interfaces::ProofType; use raiko_lib::primitives::{keccak::keccak, ChainId, B256}; +use tokio::sync::Mutex; use tracing::{debug, info}; #[derive(Debug)] pub struct InMemoryTaskManager { - db: InMemoryTaskDb, + db: Arc>, } #[derive(Debug)] @@ -181,43 +185,55 @@ impl InMemoryTaskDb { Ok((self.enqueue_task.len() + self.task_id_desc.len(), vec![])) } - #[allow(dead_code)] - fn prune(&mut self) { - todo!() + fn prune(&mut self) -> TaskManagerResult<()> { + Ok(()) } } +#[async_trait::async_trait] impl TaskManager for InMemoryTaskManager { fn new(_opts: &TaskManagerOpts) -> Self { + static INIT: Once = Once::new(); + static mut SHARED_TASK_MANAGER: Option>> = None; + + INIT.call_once(|| { + let task_manager: Arc> = + Arc::new(Mutex::new(InMemoryTaskDb::new())); + unsafe { + SHARED_TASK_MANAGER = Some(Arc::clone(&task_manager)); + } + }); + InMemoryTaskManager { - db: InMemoryTaskDb::new(), + db: unsafe { SHARED_TASK_MANAGER.clone().unwrap() }, } } - fn enqueue_task( + async fn enqueue_task( &mut self, params: &EnqueueTaskParams, ) -> TaskManagerResult { - if let Ok(proving_status) = self.db.get_task_proving_status( + let mut db = self.db.lock().await; + if let Ok(proving_status) = db.get_task_proving_status( params.chain_id, params.blockhash, - params.proof_system, + params.proof_type, Some(params.prover.to_string()), ) { Ok(proving_status) } else { - self.db.enqueue_task(params); - let proving_status = self.db.get_task_proving_status( + db.enqueue_task(params); + let proving_status = db.get_task_proving_status( params.chain_id, params.blockhash, - params.proof_system, + params.proof_type, Some(params.prover.clone()), )?; Ok(proving_status) } } - fn update_task_progress( + async fn update_task_progress( &mut self, chain_id: ChainId, blockhash: B256, @@ -226,57 +242,60 @@ impl TaskManager for InMemoryTaskManager { status: TaskStatus, proof: Option<&[u8]>, ) -> TaskManagerResult<()> { - self.db - .update_task_progress(chain_id, blockhash, proof_system, prover, status, proof)?; + let mut db = self.db.lock().await; + db.update_task_progress(chain_id, blockhash, proof_system, prover, status, proof)?; Ok(()) } /// Returns the latest triplet (submitter or fulfiller, status, last update time) - fn get_task_proving_status( + async fn get_task_proving_status( &mut self, chain_id: ChainId, blockhash: B256, proof_system: ProofType, prover: Option, ) -> TaskManagerResult { - self.db - .get_task_proving_status(chain_id, blockhash, proof_system, prover) + let mut db = self.db.lock().await; + db.get_task_proving_status(chain_id, blockhash, proof_system, prover) } /// Returns the latest triplet (submitter or fulfiller, status, last update time) - fn get_task_proving_status_by_id( + async fn get_task_proving_status_by_id( &mut self, task_id: u64, ) -> TaskManagerResult { - let proving_status = self.db.get_task_proving_status_by_id(task_id)?; + let mut db = self.db.lock().await; + let proving_status = db.get_task_proving_status_by_id(task_id)?; Ok(proving_status) } - fn get_task_proof( + async fn get_task_proof( &mut self, chain_id: ChainId, blockhash: B256, proof_system: ProofType, prover: Option, ) -> TaskManagerResult> { - let proof = self - .db - .get_task_proof(chain_id, blockhash, proof_system, prover)?; + let mut db = self.db.lock().await; + let proof = db.get_task_proof(chain_id, blockhash, proof_system, prover)?; Ok(proof) } - fn get_task_proof_by_id(&mut self, task_id: u64) -> TaskManagerResult> { - let proof = self.db.get_task_proof_by_id(task_id)?; + async fn get_task_proof_by_id(&mut self, task_id: u64) -> TaskManagerResult> { + let mut db = self.db.lock().await; + let proof = db.get_task_proof_by_id(task_id)?; Ok(proof) } /// Returns the total and detailed database size - fn get_db_size(&mut self) -> TaskManagerResult<(usize, Vec<(String, usize)>)> { - self.db.size() + async fn get_db_size(&mut self) -> TaskManagerResult<(usize, Vec<(String, usize)>)> { + let mut db = self.db.lock().await; + db.size() } - fn prune_db(&mut self) -> TaskManagerResult<()> { - todo!() + async fn prune_db(&mut self) -> TaskManagerResult<()> { + let mut db = self.db.lock().await; + db.prune() } } @@ -296,7 +315,7 @@ mod tests { let params = EnqueueTaskParams { chain_id: 1, blockhash: B256::default(), - proof_system: ProofType::Native, + proof_type: ProofType::Native, prover: "0x1234".to_owned(), ..Default::default() }; @@ -304,7 +323,7 @@ mod tests { let status = db.get_task_proving_status( params.chain_id, params.blockhash, - params.proof_system, + params.proof_type, Some(params.prover.clone()), ); assert!(status.is_ok()); diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index 11d6d3f93..9b1578401 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -48,8 +48,8 @@ mod tests { ) } - #[test] - fn test_enqueue_task() { + #[tokio::test] + async fn test_enqueue_task() { // // Materialized local DB // let dir = std::env::current_dir().unwrap().join("tests"); // let file = dir.as_path().join("test_enqueue_task.sqlite"); @@ -62,27 +62,26 @@ mod tests { let dir = tempdir().unwrap(); let file = dir.path().join("test_enqueue_task.sqlite"); - let binding = get_task_manager(&TaskManagerOpts { + let mut tama = get_task_manager(&TaskManagerOpts { sqlite_file: file, max_db_size: 1_000_000, }); - #[allow(unused_mut)] - let mut tama = binding.lock().unwrap(); let (chain_id, block_hash, request) = create_random_task(&mut ChaCha8Rng::seed_from_u64(123)); tama.enqueue_task(&EnqueueTaskParams { chain_id, blockhash: block_hash, - proof_system: request.proof_type, + proof_type: request.proof_type, prover: request.prover.to_string(), block_number: request.block_number, }) + .await .unwrap(); } - #[test] - fn test_update_query_tasks_progress() { + #[tokio::test] + async fn test_update_query_tasks_progress() { // Materialized local DB let dir = std::env::current_dir().unwrap().join("tests"); let file = dir @@ -97,12 +96,10 @@ mod tests { // let dir = tempdir().unwrap(); // let file = dir.path().join("test_update_task_progress.sqlite"); - let binding = get_task_manager(&TaskManagerOpts { + let mut tama = get_task_manager(&TaskManagerOpts { sqlite_file: file, max_db_size: 1_000_000, }); - #[allow(unused_mut)] - let mut tama = binding.lock().unwrap(); let mut rng = ChaCha8Rng::seed_from_u64(123); let mut tasks = vec![]; @@ -113,10 +110,11 @@ mod tests { tama.enqueue_task(&EnqueueTaskParams { chain_id, blockhash: block_hash, - proof_system: request.proof_type, + proof_type: request.proof_type, prover: request.prover.to_string(), block_number: request.block_number, }) + .await .unwrap(); let task_status = tama @@ -126,6 +124,7 @@ mod tests { request.proof_type, Some(request.prover.to_string()), ) + .await .unwrap(); assert_eq!(task_status.len(), 1); let status = task_status @@ -152,6 +151,7 @@ mod tests { tasks[0].3, Some(tasks[0].4.to_string()), ) + .await .unwrap(); println!("{task_status:?}"); tama.update_task_progress( @@ -162,6 +162,7 @@ mod tests { TaskStatus::Cancelled_NeverStarted, None, ) + .await .unwrap(); let task_status = tama @@ -171,6 +172,7 @@ mod tests { tasks[0].3, Some(tasks[0].4.to_string()), ) + .await .unwrap(); println!("{task_status:?}"); assert_eq!(task_status.len(), 2); @@ -187,6 +189,7 @@ mod tests { TaskStatus::WorkInProgress, None, ) + .await .unwrap(); { @@ -197,6 +200,7 @@ mod tests { tasks[1].3, Some(tasks[1].4.to_string()), ) + .await .unwrap(); assert_eq!(task_status.len(), 2); assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); @@ -213,6 +217,7 @@ mod tests { TaskStatus::CancellationInProgress, None, ) + .await .unwrap(); { @@ -223,6 +228,7 @@ mod tests { tasks[1].3, Some(tasks[1].4.to_string()), ) + .await .unwrap(); assert_eq!(task_status.len(), 3); assert_eq!(task_status[2].0, TaskStatus::CancellationInProgress); @@ -240,6 +246,7 @@ mod tests { TaskStatus::Cancelled, None, ) + .await .unwrap(); { @@ -250,6 +257,7 @@ mod tests { tasks[1].3, Some(tasks[1].4.to_string()), ) + .await .unwrap(); assert_eq!(task_status.len(), 4); assert_eq!(task_status[3].0, TaskStatus::Cancelled); @@ -269,6 +277,7 @@ mod tests { TaskStatus::WorkInProgress, None, ) + .await .unwrap(); { @@ -279,6 +288,7 @@ mod tests { tasks[2].3, Some(tasks[2].4.to_string()), ) + .await .unwrap(); assert_eq!(task_status.len(), 2); assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); @@ -296,6 +306,7 @@ mod tests { TaskStatus::Success, Some(&proof), ) + .await .unwrap(); { @@ -306,6 +317,7 @@ mod tests { tasks[2].3, Some(tasks[2].4.to_string()), ) + .await .unwrap(); assert_eq!(task_status.len(), 3); assert_eq!(task_status[2].0, TaskStatus::Success); @@ -321,6 +333,7 @@ mod tests { tasks[2].3, Some(tasks[2].4.to_string()) ) + .await .unwrap() ); } @@ -335,6 +348,7 @@ mod tests { TaskStatus::WorkInProgress, None, ) + .await .unwrap(); { @@ -345,6 +359,7 @@ mod tests { tasks[3].3, Some(tasks[3].4.to_string()), ) + .await .unwrap(); assert_eq!(task_status.len(), 2); assert_eq!(task_status[1].0, TaskStatus::WorkInProgress); @@ -361,6 +376,7 @@ mod tests { TaskStatus::NetworkFailure, None, ) + .await .unwrap(); { @@ -371,6 +387,7 @@ mod tests { tasks[3].3, Some(tasks[3].4.to_string()), ) + .await .unwrap(); assert_eq!(task_status.len(), 3); assert_eq!(task_status[2].0, TaskStatus::NetworkFailure); @@ -388,6 +405,7 @@ mod tests { TaskStatus::WorkInProgress, None, ) + .await .unwrap(); { @@ -398,6 +416,7 @@ mod tests { tasks[3].3, Some(tasks[3].4.to_string()), ) + .await .unwrap(); assert_eq!(task_status.len(), 4); assert_eq!(task_status[3].0, TaskStatus::WorkInProgress); @@ -417,6 +436,7 @@ mod tests { TaskStatus::Success, Some(proof.as_slice()), ) + .await .unwrap(); { @@ -427,6 +447,7 @@ mod tests { tasks[3].3, Some(tasks[3].4.to_string()), ) + .await .unwrap(); assert_eq!(task_status.len(), 5); assert_eq!(task_status[4].0, TaskStatus::Success); @@ -444,6 +465,7 @@ mod tests { tasks[3].3, Some(tasks[3].4.to_string()) ) + .await .unwrap() ); } From bb57b50a6c52392ae88a963d132efc294ebd8ae4 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Tue, 2 Jul 2024 11:25:08 +0200 Subject: [PATCH 39/44] feat(task_manager): return empty list on key not found --- host/src/lib.rs | 23 +++++++++++++++++++---- host/src/server/api/v2/proof.rs | 9 ++------- task_manager/src/mem_db.rs | 2 +- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/host/src/lib.rs b/host/src/lib.rs index bc3672c76..acd8a9f1e 100644 --- a/host/src/lib.rs +++ b/host/src/lib.rs @@ -140,6 +140,24 @@ pub struct ProverState { pub task_channel: mpsc::Sender, } +impl From for TaskManagerOpts { + fn from(val: Cli) -> Self { + Self { + sqlite_file: val.sqlite_file, + max_db_size: val.max_db_size, + } + } +} + +impl From<&Cli> for TaskManagerOpts { + fn from(val: &Cli) -> Self { + Self { + sqlite_file: val.sqlite_file.clone(), + max_db_size: val.max_db_size, + } + } +} + impl ProverState { pub fn init() -> HostResult { // Read the command line arguments; @@ -174,10 +192,7 @@ impl ProverState { ) .await .unwrap(); - let task_manager_opts = &TaskManagerOpts { - sqlite_file: opts.sqlite_file.clone(), - max_db_size: opts.max_db_size, - }; + let task_manager_opts = &opts.into(); let proof_result: HostResult = async move { { let mut manager = get_task_manager(task_manager_opts); diff --git a/host/src/server/api/v2/proof.rs b/host/src/server/api/v2/proof.rs index 2fbe16a73..521280890 100644 --- a/host/src/server/api/v2/proof.rs +++ b/host/src/server/api/v2/proof.rs @@ -1,9 +1,7 @@ use axum::{debug_handler, extract::State, routing::post, Json, Router}; use raiko_core::interfaces::ProofRequest; use raiko_core::provider::get_task_data; -use raiko_task_manager::{ - get_task_manager, EnqueueTaskParams, TaskManager, TaskManagerOpts, TaskStatus, -}; +use raiko_task_manager::{get_task_manager, EnqueueTaskParams, TaskManager, TaskStatus}; use serde_json::Value; use tracing::info; use utoipa::OpenApi; @@ -58,10 +56,7 @@ async fn proof_handler( ) .await?; - let mut manager = get_task_manager(&TaskManagerOpts { - sqlite_file: prover_state.opts.sqlite_file.clone(), - max_db_size: prover_state.opts.max_db_size, - }); + let mut manager = get_task_manager(&(&prover_state.opts).into()); let status = manager .get_task_proving_status( chain_id, diff --git a/task_manager/src/mem_db.rs b/task_manager/src/mem_db.rs index 4b3287287..d8ddc591e 100644 --- a/task_manager/src/mem_db.rs +++ b/task_manager/src/mem_db.rs @@ -125,7 +125,7 @@ impl InMemoryTaskDb { match self.enqueue_task.get(&key) { Some(proving_status_records) => Ok(proving_status_records.clone()), - None => Err(TaskManagerError::SqlError("Key not found".to_owned())), + None => Ok(vec![]), } } From ec40dfc37ab982532e2ac16bb6db5843030e65c5 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Tue, 2 Jul 2024 13:07:01 +0200 Subject: [PATCH 40/44] feat(host,task_manager): add tracing and handle workers --- host/src/interfaces.rs | 23 ++++++- host/src/lib.rs | 112 ++++++++++++++------------------ host/src/server/api/v2/proof.rs | 23 +++---- task_manager/src/lib.rs | 13 ++++ task_manager/src/mem_db.rs | 55 +++++----------- 5 files changed, 111 insertions(+), 115 deletions(-) diff --git a/host/src/interfaces.rs b/host/src/interfaces.rs index df620349b..69d42cfa6 100644 --- a/host/src/interfaces.rs +++ b/host/src/interfaces.rs @@ -1,7 +1,7 @@ use axum::response::IntoResponse; use raiko_core::interfaces::ProofType; use raiko_lib::prover::ProverError; -use raiko_task_manager::TaskManagerError; +use raiko_task_manager::{TaskManagerError, TaskStatus}; use tokio::sync::mpsc::error::TrySendError; use utoipa::ToSchema; @@ -108,3 +108,24 @@ impl From> for HostError { /// A type alias for the standardized result type returned by the Raiko host. pub type HostResult = axum::response::Result; + +impl From for TaskStatus { + fn from(value: HostError) -> Self { + match value { + HostError::HandleDropped + | HostError::CapacityFull + | HostError::JoinHandle(_) + | HostError::InvalidAddress(_) + | HostError::InvalidRequestConfig(_) => unreachable!(), + HostError::Conversion(_) + | HostError::Serde(_) + | HostError::Core(_) + | HostError::Anyhow(_) + | HostError::FeatureNotSupportedError(_) + | HostError::Io(_) => TaskStatus::UnspecifiedFailureReason, + HostError::RPC(_) => TaskStatus::NetworkFailure, + HostError::Guest(_) => TaskStatus::ProofFailure_Generic, + HostError::TaskManager(_) => TaskStatus::SqlDbCorruption, + } + } +} diff --git a/host/src/lib.rs b/host/src/lib.rs index acd8a9f1e..80b59fe20 100644 --- a/host/src/lib.rs +++ b/host/src/lib.rs @@ -18,7 +18,7 @@ use raiko_task_manager::{get_task_manager, TaskManager, TaskManagerOpts, TaskSta use serde::{Deserialize, Serialize}; use serde_json::Value; use tokio::sync::mpsc; -use tracing::info; +use tracing::{error, info}; use crate::{ interfaces::{HostError, HostResult}, @@ -182,85 +182,67 @@ impl ProverState { let _spawn = tokio::spawn(async move { while let Some((proof_request, opts, chain_specs)) = receiver.recv().await { - let proof_request_clone = proof_request.clone(); - let opts_clone = opts.clone(); - let chain_specs_clone = chain_specs.clone(); - let (chain_id, blockhash) = get_task_data( + let Ok((chain_id, blockhash)) = get_task_data( &proof_request.network, proof_request.block_number, &chain_specs, ) .await - .unwrap(); - let task_manager_opts = &opts.into(); - let proof_result: HostResult = async move { - { - let mut manager = get_task_manager(task_manager_opts); - manager + else { + error!("Could not retrieve chain ID and blockhash"); + continue; + }; + let mut manager = get_task_manager(&opts.clone().into()); + if manager + .update_task_progress( + chain_id, + blockhash, + proof_request.proof_type, + Some(proof_request.prover.to_string()), + TaskStatus::WorkInProgress, + None, + ) + .await + .is_err() + { + error!("Could not update task to work in progress via task manager"); + } + match handle_proof(&proof_request, &opts, &chain_specs).await { + Ok(proof) => { + let proof = proof.proof.unwrap_or_default(); + let proof = proof.as_bytes(); + if manager .update_task_progress( chain_id, blockhash, proof_request.proof_type, Some(proof_request.prover.to_string()), - TaskStatus::WorkInProgress, - None, + TaskStatus::Success, + Some(proof), ) - .await?; - } - handle_proof(&proof_request_clone, &opts_clone, &chain_specs_clone).await - } - .await; - match proof_result { - Ok(proof) => { - let _: HostResult<()> = async move { - let proof = proof.proof.unwrap(); - let proof = proof.as_bytes(); - let mut manager = get_task_manager(task_manager_opts); - manager - .update_task_progress( - chain_id, - blockhash, - proof_request.proof_type, - Some(proof_request.prover.to_string()), - TaskStatus::WorkInProgress, - Some(proof), - ) - .await?; - Ok(()) + .await + .is_err() + { + error!("Could not update task progress to success via task manager"); } - .await; } Err(error) => { - let _: HostResult<()> = async move { - let mut manager = get_task_manager(task_manager_opts); - manager - .update_task_progress( - chain_id, - blockhash, - proof_request.proof_type, - Some(proof_request.prover.to_string()), - match error { - HostError::HandleDropped - | HostError::CapacityFull - | HostError::JoinHandle(_) - | HostError::InvalidAddress(_) - | HostError::InvalidRequestConfig(_) => unreachable!(), - HostError::Conversion(_) - | HostError::Serde(_) - | HostError::Core(_) - | HostError::Anyhow(_) - | HostError::FeatureNotSupportedError(_) - | HostError::Io(_) => TaskStatus::UnspecifiedFailureReason, - HostError::RPC(_) => TaskStatus::NetworkFailure, - HostError::Guest(_) => TaskStatus::ProofFailure_Generic, - HostError::TaskManager(_) => TaskStatus::SqlDbCorruption, - }, - None, - ) - .await?; - Ok(()) + if manager + .update_task_progress( + chain_id, + blockhash, + proof_request.proof_type, + Some(proof_request.prover.to_string()), + error.into(), + None, + ) + .await + .is_err() + { + error!( + "Could not update task progress to error state via task manager" + ); } - .await; } } } diff --git a/host/src/server/api/v2/proof.rs b/host/src/server/api/v2/proof.rs index 521280890..b7c422a2a 100644 --- a/host/src/server/api/v2/proof.rs +++ b/host/src/server/api/v2/proof.rs @@ -44,11 +44,6 @@ async fn proof_handler( inc_host_req_count(proof_request.block_number); inc_guest_req_count(&proof_request.proof_type, proof_request.block_number); - info!( - "# Generating proof for block {} on {}", - proof_request.block_number, proof_request.network - ); - let (chain_id, block_hash) = get_task_data( &proof_request.network, proof_request.block_number, @@ -67,11 +62,10 @@ async fn proof_handler( .await?; if status.is_empty() { - prover_state.task_channel.try_send(( - proof_request.clone(), - prover_state.opts, - prover_state.chain_specs, - ))?; + info!( + "# Generating proof for block {} on {}", + proof_request.block_number, proof_request.network + ); manager .enqueue_task(&EnqueueTaskParams { @@ -82,10 +76,17 @@ async fn proof_handler( block_number: proof_request.block_number, }) .await?; + + prover_state.task_channel.try_send(( + proof_request.clone(), + prover_state.opts, + prover_state.chain_specs, + ))?; + return Ok(Json(serde_json::json!("{}"))); } - let status = status.first().unwrap().0; + let status = status.last().unwrap().0; if matches!(status, TaskStatus::Success) { let proof = manager diff --git a/task_manager/src/lib.rs b/task_manager/src/lib.rs index 5f3cb8adb..42275c20f 100644 --- a/task_manager/src/lib.rs +++ b/task_manager/src/lib.rs @@ -120,6 +120,19 @@ impl From<&EnqueueTaskParams> for TaskDescriptor { } } +impl From<(ChainId, B256, ProofType, Option)> for TaskDescriptor { + fn from( + (chain_id, blockhash, proof_system, prover): (ChainId, B256, ProofType, Option), + ) -> Self { + TaskDescriptor { + chain_id, + blockhash, + proof_system, + prover: prover.unwrap_or_default(), + } + } +} + #[derive(Debug, Clone)] pub struct TaskProvingStatus(pub TaskStatus, pub Option, pub DateTime); diff --git a/task_manager/src/mem_db.rs b/task_manager/src/mem_db.rs index d8ddc591e..e413b7ca0 100644 --- a/task_manager/src/mem_db.rs +++ b/task_manager/src/mem_db.rs @@ -45,8 +45,7 @@ impl InMemoryTaskDb { } fn enqueue_task(&mut self, params: &EnqueueTaskParams) { - let task_desc_data: Vec = TaskDescriptor::from(params).into(); - let key: B256 = keccak(task_desc_data).into(); + let key: B256 = keccak(TaskDescriptor::from(params).to_vec()).into(); let task_status = TaskProvingStatus( TaskStatus::Registered, Some(params.prover.clone()), @@ -78,14 +77,10 @@ impl InMemoryTaskDb { status: TaskStatus, proof: Option<&[u8]>, ) -> TaskManagerResult<()> { - let td_data: Vec = TaskDescriptor { - chain_id, - blockhash, - proof_system, - prover: prover.clone().unwrap_or_default().to_owned(), - } + let key: B256 = keccak( + TaskDescriptor::from((chain_id, blockhash, proof_system, prover.clone())).to_vec(), + ) .into(); - let key = keccak(td_data).into(); ensure(self.enqueue_task.contains_key(&key), "no task found")?; let task_proving_records = self.enqueue_task.get(&key).unwrap(); @@ -113,13 +108,7 @@ impl InMemoryTaskDb { prover: Option, ) -> TaskManagerResult { let key: B256 = keccak( - TaskDescriptor { - chain_id, - blockhash, - proof_system, - prover: prover.unwrap_or_default().to_owned(), - } - .to_vec(), + TaskDescriptor::from((chain_id, blockhash, proof_system, prover.clone())).to_vec(), ) .into(); @@ -147,13 +136,7 @@ impl InMemoryTaskDb { prover: Option, ) -> TaskManagerResult> { let key: B256 = keccak( - TaskDescriptor { - chain_id, - blockhash, - proof_system, - prover: prover.unwrap_or_default().to_owned(), - } - .to_vec(), + TaskDescriptor::from((chain_id, blockhash, proof_system, prover.clone())).to_vec(), ) .into(); ensure(self.enqueue_task.contains_key(&key), "no task found")?; @@ -214,22 +197,22 @@ impl TaskManager for InMemoryTaskManager { params: &EnqueueTaskParams, ) -> TaskManagerResult { let mut db = self.db.lock().await; - if let Ok(proving_status) = db.get_task_proving_status( + let status = db.get_task_proving_status( params.chain_id, params.blockhash, params.proof_type, Some(params.prover.to_string()), - ) { - Ok(proving_status) - } else { + )?; + if status.is_empty() { db.enqueue_task(params); - let proving_status = db.get_task_proving_status( + db.get_task_proving_status( params.chain_id, params.blockhash, params.proof_type, Some(params.prover.clone()), - )?; - Ok(proving_status) + ) + } else { + Ok(status) } } @@ -243,8 +226,7 @@ impl TaskManager for InMemoryTaskManager { proof: Option<&[u8]>, ) -> TaskManagerResult<()> { let mut db = self.db.lock().await; - db.update_task_progress(chain_id, blockhash, proof_system, prover, status, proof)?; - Ok(()) + db.update_task_progress(chain_id, blockhash, proof_system, prover, status, proof) } /// Returns the latest triplet (submitter or fulfiller, status, last update time) @@ -265,8 +247,7 @@ impl TaskManager for InMemoryTaskManager { task_id: u64, ) -> TaskManagerResult { let mut db = self.db.lock().await; - let proving_status = db.get_task_proving_status_by_id(task_id)?; - Ok(proving_status) + db.get_task_proving_status_by_id(task_id) } async fn get_task_proof( @@ -277,14 +258,12 @@ impl TaskManager for InMemoryTaskManager { prover: Option, ) -> TaskManagerResult> { let mut db = self.db.lock().await; - let proof = db.get_task_proof(chain_id, blockhash, proof_system, prover)?; - Ok(proof) + db.get_task_proof(chain_id, blockhash, proof_system, prover) } async fn get_task_proof_by_id(&mut self, task_id: u64) -> TaskManagerResult> { let mut db = self.db.lock().await; - let proof = db.get_task_proof_by_id(task_id)?; - Ok(proof) + db.get_task_proof_by_id(task_id) } /// Returns the total and detailed database size From ff7edf63f3d28c4759b56fa4a60ddd5e64e5dcb4 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Tue, 2 Jul 2024 16:19:24 +0200 Subject: [PATCH 41/44] feat(host): fix response structure --- host/src/server/api/v1/mod.rs | 15 ++++++++++----- host/src/server/api/v2/proof.rs | 11 +++++++++-- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/host/src/server/api/v1/mod.rs b/host/src/server/api/v1/mod.rs index 9141b508c..d84113816 100644 --- a/host/src/server/api/v1/mod.rs +++ b/host/src/server/api/v1/mod.rs @@ -60,13 +60,18 @@ pub struct ProofResponse { pub quote: Option, } -impl IntoResponse for ProofResponse { - fn into_response(self) -> axum::response::Response { - axum::Json(serde_json::json!({ +impl ProofResponse { + pub fn to_response(&self) -> Value { + serde_json::json!({ "status": "ok", "data": self - })) - .into_response() + }) + } +} + +impl IntoResponse for ProofResponse { + fn into_response(self) -> axum::response::Response { + axum::Json(self.to_response()).into_response() } } diff --git a/host/src/server/api/v2/proof.rs b/host/src/server/api/v2/proof.rs index b7c422a2a..6460ffddc 100644 --- a/host/src/server/api/v2/proof.rs +++ b/host/src/server/api/v2/proof.rs @@ -83,7 +83,14 @@ async fn proof_handler( prover_state.chain_specs, ))?; - return Ok(Json(serde_json::json!("{}"))); + return Ok(Json(serde_json::json!( + { + "status": "ok", + "data": { + "status": TaskStatus::Registered, + } + } + ))); } let status = status.last().unwrap().0; @@ -104,7 +111,7 @@ async fn proof_handler( quote: None, }; - return Ok(Json(serde_json::to_value(response)?)); + return Ok(Json(response.to_response())); } Ok(Json(serde_json::json!( From 5d31091b67f7a2c5733e1b63d0a16462fd6fca56 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Tue, 2 Jul 2024 18:44:30 +0200 Subject: [PATCH 42/44] chore(clippy): remove unused imports --- core/src/preflight.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/preflight.rs b/core/src/preflight.rs index b5b6b57d7..b426de656 100644 --- a/core/src/preflight.rs +++ b/core/src/preflight.rs @@ -1,7 +1,7 @@ use crate::{ interfaces::{RaikoError, RaikoResult}, provider::{db::ProviderDb, rpc::RpcBlockDataProvider, BlockDataProvider}, - require, require_eq, + require, }; pub use alloy_primitives::*; use alloy_provider::{Provider, ReqwestProvider}; From 867cc55752d387e4c1729c5994cb0390cdf00eb0 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Tue, 2 Jul 2024 19:02:23 +0200 Subject: [PATCH 43/44] fix(ci): remove git merge added lines --- .github/workflows/ci-risc0.yml | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci-risc0.yml b/.github/workflows/ci-risc0.yml index b50c9f917..5ac10b864 100644 --- a/.github/workflows/ci-risc0.yml +++ b/.github/workflows/ci-risc0.yml @@ -11,12 +11,10 @@ on: - "provers/risc0/**" merge_group: - -==== BASE ==== jobs: - build-test-risc0: - name: Build and test risc0 - uses: ./.github/workflows/ci-build-test-reusable.yml - with: - version_name: "risc0" - version_toolchain: "stable" + build-test-risc0: + name: Build and test risc0 + uses: ./.github/workflows/ci-build-test-reusable.yml + with: + version_name: "risc0" + version_toolchain: "stable" From 52b767db5c2a6c7df6e20da0609f350bdae7a870 Mon Sep 17 00:00:00 2001 From: Petar Vujovic Date: Tue, 2 Jul 2024 19:28:26 +0200 Subject: [PATCH 44/44] fix(task_manager): add blob proof type field --- task_manager/tests/main.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/task_manager/tests/main.rs b/task_manager/tests/main.rs index 9b1578401..e5b85addd 100644 --- a/task_manager/tests/main.rs +++ b/task_manager/tests/main.rs @@ -14,7 +14,7 @@ mod tests { use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; - use raiko_lib::primitives::B256; + use raiko_lib::{input::BlobProofType, primitives::B256}; use raiko_task_manager::{ get_task_manager, EnqueueTaskParams, TaskManager, TaskManagerOpts, TaskStatus, }; @@ -44,6 +44,7 @@ mod tests { prover, proof_type, prover_args, + blob_proof_type: BlobProofType::ProofOfEquivalence, }, ) }