From 5294cfd1907aa5b4790ab2156bab76286db9e59b Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 30 Sep 2025 11:43:06 +0100 Subject: [PATCH 01/81] feat: Cap bitcoin-da fee rate --- bin/citrea/src/rollup/bitcoin.rs | 8 ++++++-- bin/citrea/tests/bitcoin/utils.rs | 8 +++++++- crates/bitcoin-da/src/fee.rs | 15 +++++++++++++-- crates/bitcoin-da/src/service.rs | 6 ++++++ 4 files changed, 32 insertions(+), 5 deletions(-) diff --git a/bin/citrea/src/rollup/bitcoin.rs b/bin/citrea/src/rollup/bitcoin.rs index 2675264d81..3af826ee68 100644 --- a/bin/citrea/src/rollup/bitcoin.rs +++ b/bin/citrea/src/rollup/bitcoin.rs @@ -141,8 +141,12 @@ impl RollupBlueprint for BitcoinRollup { ); let monitoring_service = Arc::new(monitoring_service); - let fee_service = - FeeService::new(client.clone(), network, da_config.mempool_space_url.clone()); + let fee_service = FeeService::new( + client.clone(), + network, + da_config.mempool_space_url.clone(), + da_config.max_fee_rate_sat_vb, + ); let service = Arc::new( BitcoinService::from_config( diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index ee2afbf8a9..ed23ee6c75 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -188,6 +188,7 @@ pub async fn spawn_bitcoin_da_service( utxo_selection_mode, rpc_timeout_secs: None, rpc_connect_timeout_secs: None, + max_fee_rate_sat_vb: None, }; let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); @@ -225,7 +226,12 @@ pub async fn spawn_bitcoin_da_service( ); let monitoring_service = Arc::new(monitoring_service); - let fee_service = FeeService::new(client.clone(), network, da_config.mempool_space_url.clone()); + let fee_service = FeeService::new( + client.clone(), + network, + da_config.mempool_space_url.clone(), + da_config.max_fee_rate_sat_vb, + ); let service = Arc::new( BitcoinService::from_config( diff --git a/crates/bitcoin-da/src/fee.rs b/crates/bitcoin-da/src/fee.rs index a384e401b1..bada08aef2 100644 --- a/crates/bitcoin-da/src/fee.rs +++ b/crates/bitcoin-da/src/fee.rs @@ -26,6 +26,8 @@ const BASE_FEE_RATE_MULTIPLIER: f64 = 1.0; const FEE_RATE_MULTIPLIER_FACTOR: f64 = 1.1; const MAX_FEE_RATE_MULTIPLIER: f64 = 2.0; +const DEFAULT_MAX_FEE_RATE_SAT_VB: u64 = 15; + /// Type alias for a Partially Signed Bitcoin Transaction (PSBT). pub type Psbt = String; @@ -46,6 +48,7 @@ pub struct FeeService { client: Arc, network: Network, mempool_space_url: String, + max_fee_rate_sat_vb: u64, } impl FeeService { @@ -54,13 +57,17 @@ impl FeeService { client: Arc, network: bitcoin::Network, mempool_space_url: Option, + max_fee_rate_sat_vb: Option, ) -> Self { let mempool_space_url = mempool_space_url.unwrap_or_else(|| DEFAULT_MEMPOOL_SPACE_URL.to_string()); + + let max_fee_rate_sat_vb = max_fee_rate_sat_vb.unwrap_or(DEFAULT_MAX_FEE_RATE_SAT_VB); Self { client, network, mempool_space_url, + max_fee_rate_sat_vb, } } @@ -97,10 +104,14 @@ impl FeeService { .fee_rate } }; + let sat_vkb = smart_fee.map_or(1000, |rate| rate.to_sat()); + let sat_vb = sat_vkb / 1000; + let capped_fee_rate = sat_vb.min(self.max_fee_rate_sat_vb); + + tracing::debug!("Fee rate: {capped_fee_rate} sat/vb"); - tracing::debug!("Fee rate: {} sat/vb", sat_vkb / 1000); - Ok(sat_vkb / 1000) + Ok(capped_fee_rate) } /// Bump TX fee via cpfp. diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 1948702d2e..9a294e67cb 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -124,6 +124,9 @@ pub struct BitcoinServiceConfig { /// Connection timeout for RPC in seconds pub rpc_connect_timeout_secs: Option, + + /// Max fee rate in sat/vb + pub max_fee_rate_sat_vb: Option, } impl citrea_common::FromEnv for BitcoinServiceConfig { @@ -149,6 +152,9 @@ impl citrea_common::FromEnv for BitcoinServiceConfig { rpc_connect_timeout_secs: read_env("BITCOIN_RPC_CONNECT_TIMEOUT_SECS") .ok() .and_then(|v| v.parse::().ok()), + max_fee_rate_sat_vb: read_env("BITCOIN_MAX_FEE_RATE_SAT_VB") + .ok() + .and_then(|v| v.parse::().ok()), }) } } From 568e8522ffc9a11bea8bc0cfe58046e9446f8218 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 30 Sep 2025 14:20:31 +0100 Subject: [PATCH 02/81] Revert capping at the FeeService level --- bin/citrea/src/rollup/bitcoin.rs | 8 ++------ bin/citrea/tests/bitcoin/utils.rs | 7 +------ crates/bitcoin-da/src/fee.rs | 11 ++--------- 3 files changed, 5 insertions(+), 21 deletions(-) diff --git a/bin/citrea/src/rollup/bitcoin.rs b/bin/citrea/src/rollup/bitcoin.rs index 3af826ee68..2675264d81 100644 --- a/bin/citrea/src/rollup/bitcoin.rs +++ b/bin/citrea/src/rollup/bitcoin.rs @@ -141,12 +141,8 @@ impl RollupBlueprint for BitcoinRollup { ); let monitoring_service = Arc::new(monitoring_service); - let fee_service = FeeService::new( - client.clone(), - network, - da_config.mempool_space_url.clone(), - da_config.max_fee_rate_sat_vb, - ); + let fee_service = + FeeService::new(client.clone(), network, da_config.mempool_space_url.clone()); let service = Arc::new( BitcoinService::from_config( diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index ed23ee6c75..be6b014467 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -226,12 +226,7 @@ pub async fn spawn_bitcoin_da_service( ); let monitoring_service = Arc::new(monitoring_service); - let fee_service = FeeService::new( - client.clone(), - network, - da_config.mempool_space_url.clone(), - da_config.max_fee_rate_sat_vb, - ); + let fee_service = FeeService::new(client.clone(), network, da_config.mempool_space_url.clone()); let service = Arc::new( BitcoinService::from_config( diff --git a/crates/bitcoin-da/src/fee.rs b/crates/bitcoin-da/src/fee.rs index bada08aef2..e6ec89afdf 100644 --- a/crates/bitcoin-da/src/fee.rs +++ b/crates/bitcoin-da/src/fee.rs @@ -26,8 +26,6 @@ const BASE_FEE_RATE_MULTIPLIER: f64 = 1.0; const FEE_RATE_MULTIPLIER_FACTOR: f64 = 1.1; const MAX_FEE_RATE_MULTIPLIER: f64 = 2.0; -const DEFAULT_MAX_FEE_RATE_SAT_VB: u64 = 15; - /// Type alias for a Partially Signed Bitcoin Transaction (PSBT). pub type Psbt = String; @@ -48,7 +46,6 @@ pub struct FeeService { client: Arc, network: Network, mempool_space_url: String, - max_fee_rate_sat_vb: u64, } impl FeeService { @@ -57,17 +54,14 @@ impl FeeService { client: Arc, network: bitcoin::Network, mempool_space_url: Option, - max_fee_rate_sat_vb: Option, ) -> Self { let mempool_space_url = mempool_space_url.unwrap_or_else(|| DEFAULT_MEMPOOL_SPACE_URL.to_string()); - let max_fee_rate_sat_vb = max_fee_rate_sat_vb.unwrap_or(DEFAULT_MAX_FEE_RATE_SAT_VB); Self { client, network, mempool_space_url, - max_fee_rate_sat_vb, } } @@ -107,11 +101,10 @@ impl FeeService { let sat_vkb = smart_fee.map_or(1000, |rate| rate.to_sat()); let sat_vb = sat_vkb / 1000; - let capped_fee_rate = sat_vb.min(self.max_fee_rate_sat_vb); - tracing::debug!("Fee rate: {capped_fee_rate} sat/vb"); + tracing::debug!("Fee rate: {sat_vb} sat/vb"); - Ok(capped_fee_rate) + Ok(sat_vb) } /// Bump TX fee via cpfp. From b7342e02e15342fc326fdd7feedf5a1b6e43b8c6 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 30 Sep 2025 14:29:35 +0100 Subject: [PATCH 03/81] Handle capping at the run_da_queue level and leverage existing retry mechanism --- crates/bitcoin-da/src/service.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 9a294e67cb..2306f2e09f 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -294,7 +294,7 @@ impl BitcoinService { loop { // Build and queue tx with retries: - let fee_sat_per_vbyte = match self.fee.get_fee_rate().await { + let mut fee_sat_per_vbyte = match self.fee.get_fee_rate().await { Ok(rate) => (rate as f64 * fee_rate_multiplier).ceil() as u64, Err(e) => { error!(?e, "Failed to call get_fee_rate. Retrying..."); @@ -302,6 +302,9 @@ impl BitcoinService { continue; } }; + + fee_sat_per_vbyte = fee_sat_per_vbyte.min(self.config.max_fee_rate_sat_vb); + match self .send_transaction_with_fee_rate( request.tx_request.clone(), From 6da2916dbc5fd4b3ab6ac243277a0272465b0ce1 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 30 Sep 2025 14:33:27 +0100 Subject: [PATCH 04/81] Set default value of 15sat/vb --- crates/bitcoin-da/src/fee.rs | 2 ++ crates/bitcoin-da/src/service.rs | 11 +++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/crates/bitcoin-da/src/fee.rs b/crates/bitcoin-da/src/fee.rs index e6ec89afdf..6829f892dd 100644 --- a/crates/bitcoin-da/src/fee.rs +++ b/crates/bitcoin-da/src/fee.rs @@ -26,6 +26,8 @@ const BASE_FEE_RATE_MULTIPLIER: f64 = 1.0; const FEE_RATE_MULTIPLIER_FACTOR: f64 = 1.1; const MAX_FEE_RATE_MULTIPLIER: f64 = 2.0; +pub(crate) const DEFAULT_MAX_FEE_RATE_SAT_VB: u64 = 15; + /// Type alias for a Partially Signed Bitcoin Transaction (PSBT). pub type Psbt = String; diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 2306f2e09f..58a6767fcd 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -40,7 +40,7 @@ use tokio::sync::Mutex; use tracing::{debug, error, info, instrument, trace, warn}; use crate::error::{BitcoinServiceError, MempoolRejection}; -use crate::fee::{validate_txs_fee_rate, BumpFeeMethod, FeeService}; +use crate::fee::{validate_txs_fee_rate, BumpFeeMethod, FeeService, DEFAULT_MAX_FEE_RATE_SAT_VB}; use crate::helpers::backup::backup_txs_to_file; use crate::helpers::builders::body_builders::{create_inscription_transactions, DaTxs, RawTxData}; use crate::helpers::builders::TxWithId; @@ -176,6 +176,7 @@ pub struct BitcoinService { tx_queue: Arc>>, pub(crate) tx_signer: TxSigner, utxo_selection_mode: UtxoSelectionMode, + max_fee_rate_sat_vb: u64, } impl BitcoinService { @@ -191,6 +192,7 @@ impl BitcoinService { reveal_tx_prefix: Vec, tx_backup_dir: PathBuf, utxo_selection_mode: UtxoSelectionMode, + max_fee_rate_sat_vb: u64, ) -> Self { Self { tx_signer: TxSigner::new(client.clone()), @@ -208,6 +210,7 @@ impl BitcoinService { ))), tx_queue: Arc::new(Mutex::new(VecDeque::new())), utxo_selection_mode, + max_fee_rate_sat_vb, } } @@ -248,6 +251,9 @@ impl BitcoinService { .context("Invalid private key")?; let utxo_selection_mode = config.utxo_selection_mode.clone().unwrap_or_default(); + let max_fee_rate_sat_vb = config + .max_fee_rate_sat_vb + .unwrap_or(DEFAULT_MAX_FEE_RATE_SAT_VB); Ok(Self::new( client, network, @@ -259,6 +265,7 @@ impl BitcoinService { chain_params.reveal_tx_prefix, tx_backup_dir.to_path_buf(), utxo_selection_mode, + max_fee_rate_sat_vb, )) } @@ -303,7 +310,7 @@ impl BitcoinService { } }; - fee_sat_per_vbyte = fee_sat_per_vbyte.min(self.config.max_fee_rate_sat_vb); + fee_sat_per_vbyte = fee_sat_per_vbyte.min(self.max_fee_rate_sat_vb); match self .send_transaction_with_fee_rate( From 48cd30e36567f609025dd16169c062969468ebaa Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 30 Sep 2025 14:37:04 +0100 Subject: [PATCH 05/81] Do not try sending tx with capped fee rate --- crates/bitcoin-da/src/service.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 58a6767fcd..fad4bf798e 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -301,7 +301,7 @@ impl BitcoinService { loop { // Build and queue tx with retries: - let mut fee_sat_per_vbyte = match self.fee.get_fee_rate().await { + let fee_sat_per_vbyte = match self.fee.get_fee_rate().await { Ok(rate) => (rate as f64 * fee_rate_multiplier).ceil() as u64, Err(e) => { error!(?e, "Failed to call get_fee_rate. Retrying..."); @@ -310,7 +310,12 @@ impl BitcoinService { } }; - fee_sat_per_vbyte = fee_sat_per_vbyte.min(self.max_fee_rate_sat_vb); + if fee_sat_per_vbyte > self.max_fee_rate_sat_vb { + warn!(?e, "Fee rate {} above cap of {}. Waiting before sending transaction", fee_sat_per_vbyte, self.max_fee_rate_sat_vb); + tokio::time::sleep(Duration::from_secs(10)).await; + continue; + } + match self .send_transaction_with_fee_rate( From 65caf04c450a12da5bac123ea6d3371cb8f4b828 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 30 Sep 2025 14:38:34 +0100 Subject: [PATCH 06/81] Lint --- crates/bitcoin-da/src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index fad4bf798e..2c64eb2c7e 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -311,7 +311,7 @@ impl BitcoinService { }; if fee_sat_per_vbyte > self.max_fee_rate_sat_vb { - warn!(?e, "Fee rate {} above cap of {}. Waiting before sending transaction", fee_sat_per_vbyte, self.max_fee_rate_sat_vb); + warn!("Fee rate {} above cap of {}. Waiting before sending transaction", fee_sat_per_vbyte, self.max_fee_rate_sat_vb); tokio::time::sleep(Duration::from_secs(10)).await; continue; } From 64b789f7d9c35fd0836f81e893e56821cb87d89b Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 1 Oct 2025 13:48:12 +0100 Subject: [PATCH 07/81] Temporary cap --- bin/citrea/tests/bitcoin/utils.rs | 1 + crates/bitcoin-da/src/fee.rs | 2 -- crates/bitcoin-da/src/service.rs | 35 ++++++++++++++++++++++++++++--- 3 files changed, 33 insertions(+), 5 deletions(-) diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index be6b014467..e1fa05b6db 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -189,6 +189,7 @@ pub async fn spawn_bitcoin_da_service( rpc_timeout_secs: None, rpc_connect_timeout_secs: None, max_fee_rate_sat_vb: None, + fee_rate_cap_duration_secs: None, }; let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); diff --git a/crates/bitcoin-da/src/fee.rs b/crates/bitcoin-da/src/fee.rs index 36ee781828..db57c98172 100644 --- a/crates/bitcoin-da/src/fee.rs +++ b/crates/bitcoin-da/src/fee.rs @@ -26,8 +26,6 @@ const BASE_FEE_RATE_MULTIPLIER: f64 = 1.0; const FEE_RATE_MULTIPLIER_FACTOR: f64 = 1.1; const MAX_FEE_RATE_MULTIPLIER: f64 = 2.0; -pub(crate) const DEFAULT_MAX_FEE_RATE_SAT_VB: u64 = 15; - /// Type alias for a Partially Signed Bitcoin Transaction (PSBT). pub type Psbt = String; diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index d4c1779d89..919187a4f7 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -40,7 +40,7 @@ use tokio::sync::Mutex; use tracing::{debug, error, info, instrument, trace, warn}; use crate::error::{BitcoinServiceError, MempoolRejection}; -use crate::fee::{validate_txs_fee_rate, BumpFeeMethod, FeeService, DEFAULT_MAX_FEE_RATE_SAT_VB}; +use crate::fee::{validate_txs_fee_rate, BumpFeeMethod, FeeService}; use crate::helpers::backup::backup_txs_to_file; use crate::helpers::builders::body_builders::{create_inscription_transactions, DaTxs, RawTxData}; use crate::helpers::builders::TxWithId; @@ -68,6 +68,9 @@ pub(crate) type Result = std::result::Result; const POLLING_INTERVAL: u64 = 10; // seconds +const DEFAULT_FEE_RATE_CAP_DURATION_SECS: u64 = 3600; // 1hour default cap duration +const DEFAULT_MAX_FEE_RATE_SAT_VB: u64 = 15; + /// Map sov Network to Bitcoin Network. pub fn network_to_bitcoin_network(network: &Network) -> bitcoin::Network { match network { @@ -127,6 +130,9 @@ pub struct BitcoinServiceConfig { /// Max fee rate in sat/vb pub max_fee_rate_sat_vb: Option, + + /// Fee rate cap duration in seconds + pub fee_rate_cap_duration_secs: Option, } impl citrea_common::FromEnv for BitcoinServiceConfig { @@ -155,6 +161,9 @@ impl citrea_common::FromEnv for BitcoinServiceConfig { max_fee_rate_sat_vb: read_env("BITCOIN_MAX_FEE_RATE_SAT_VB") .ok() .and_then(|v| v.parse::().ok()), + fee_rate_cap_duration_secs: read_env("BITCOIN_FEE_RATE_CAP_DURATION_SECS") + .ok() + .and_then(|v| v.parse::().ok()), }) } } @@ -177,6 +186,7 @@ pub struct BitcoinService { pub(crate) tx_signer: TxSigner, utxo_selection_mode: UtxoSelectionMode, max_fee_rate_sat_vb: u64, + fee_rate_cap_duration_secs: u64, } impl BitcoinService { @@ -193,6 +203,7 @@ impl BitcoinService { tx_backup_dir: PathBuf, utxo_selection_mode: UtxoSelectionMode, max_fee_rate_sat_vb: u64, + fee_rate_cap_duration_secs: u64, ) -> Self { Self { tx_signer: TxSigner::new(client.clone()), @@ -211,6 +222,7 @@ impl BitcoinService { tx_queue: Arc::new(Mutex::new(VecDeque::new())), utxo_selection_mode, max_fee_rate_sat_vb, + fee_rate_cap_duration_secs, } } @@ -254,6 +266,9 @@ impl BitcoinService { let max_fee_rate_sat_vb = config .max_fee_rate_sat_vb .unwrap_or(DEFAULT_MAX_FEE_RATE_SAT_VB); + let fee_rate_cap_duration_secs = config + .fee_rate_cap_duration_secs + .unwrap_or(DEFAULT_FEE_RATE_CAP_DURATION_SECS); Ok(Self::new( client, network, @@ -266,6 +281,7 @@ impl BitcoinService { tx_backup_dir.to_path_buf(), utxo_selection_mode, max_fee_rate_sat_vb, + fee_rate_cap_duration_secs, )) } @@ -299,6 +315,7 @@ impl BitcoinService { if let Some(request) = request_opt { trace!("A new request is received"); + let start = std::time::Instant::now(); loop { // Build and queue tx with retries: let fee_sat_per_vbyte = match self.fee.get_fee_rate().await { @@ -310,12 +327,24 @@ impl BitcoinService { } }; - if fee_sat_per_vbyte > self.max_fee_rate_sat_vb { - warn!("Fee rate {} above cap of {}. Waiting before sending transaction", fee_sat_per_vbyte, self.max_fee_rate_sat_vb); + // Cap fee at self.max_fee_rate_sat_vb for a maximum of `self.fee_rate_cap_duration_secs`. + // If `self.fee_rate_cap_duration_secs` is exceeded, send transaction with fee rate above `self.max_fee_rate_sat_vb` anyway + let elapsed = start.elapsed().as_secs(); + + if fee_sat_per_vbyte > self.max_fee_rate_sat_vb + && elapsed < self.fee_rate_cap_duration_secs { + warn!("Fee rate {} sat/vb above cap of {}. Waiting (elapsed: {}s / max: {}s)", fee_sat_per_vbyte, self.max_fee_rate_sat_vb, elapsed, self.fee_rate_cap_duration_secs); tokio::time::sleep(Duration::from_secs(10)).await; continue; } + if fee_sat_per_vbyte > self.max_fee_rate_sat_vb + && elapsed >= self.fee_rate_cap_duration_secs { + warn!( + "Fee rate {} sat/vb above cap of {} sat/vb, but cap duration of {}s exceeded. Sending transaction anyway", + fee_sat_per_vbyte, self.max_fee_rate_sat_vb, self.fee_rate_cap_duration_secs + ); + } match self .send_transaction_with_fee_rate( From 7e4e781c5600ff0e84a8aef25ed91722ddc77d6f Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 1 Oct 2025 13:59:39 +0100 Subject: [PATCH 08/81] Comment --- crates/bitcoin-da/src/service.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 919187a4f7..be002d0871 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -66,10 +66,10 @@ use crate::REVEAL_OUTPUT_AMOUNT; pub(crate) type Result = std::result::Result; -const POLLING_INTERVAL: u64 = 10; // seconds +const POLLING_INTERVAL: u64 = 10; // 10 seconds -const DEFAULT_FEE_RATE_CAP_DURATION_SECS: u64 = 3600; // 1hour default cap duration -const DEFAULT_MAX_FEE_RATE_SAT_VB: u64 = 15; +const DEFAULT_FEE_RATE_CAP_DURATION_SECS: u64 = 3600; // 1 hour default cap duration +const DEFAULT_MAX_FEE_RATE_SAT_VB: u64 = 15; // 15sat/vb default max fee rate /// Map sov Network to Bitcoin Network. pub fn network_to_bitcoin_network(network: &Network) -> bitcoin::Network { From 23a4ee89e9583fdf2702854b018a3cfee37069d6 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Thu, 2 Oct 2025 14:04:10 +0100 Subject: [PATCH 09/81] Rename to avoid confusion with sequencer config --- bin/citrea/tests/bitcoin/utils.rs | 2 +- crates/bitcoin-da/src/service.rs | 28 ++++++++++++++-------------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index e1fa05b6db..4ef2085a49 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -188,7 +188,7 @@ pub async fn spawn_bitcoin_da_service( utxo_selection_mode, rpc_timeout_secs: None, rpc_connect_timeout_secs: None, - max_fee_rate_sat_vb: None, + max_fee_rate_sat_to_pay: None, fee_rate_cap_duration_secs: None, }; diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index be002d0871..42d8db5613 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -129,7 +129,7 @@ pub struct BitcoinServiceConfig { pub rpc_connect_timeout_secs: Option, /// Max fee rate in sat/vb - pub max_fee_rate_sat_vb: Option, + pub max_fee_rate_sat_to_pay: Option, /// Fee rate cap duration in seconds pub fee_rate_cap_duration_secs: Option, @@ -158,7 +158,7 @@ impl citrea_common::FromEnv for BitcoinServiceConfig { rpc_connect_timeout_secs: read_env("BITCOIN_RPC_CONNECT_TIMEOUT_SECS") .ok() .and_then(|v| v.parse::().ok()), - max_fee_rate_sat_vb: read_env("BITCOIN_MAX_FEE_RATE_SAT_VB") + max_fee_rate_sat_to_pay: read_env("BITCOIN_MAX_FEE_RATE_SAT_TO_PAY") .ok() .and_then(|v| v.parse::().ok()), fee_rate_cap_duration_secs: read_env("BITCOIN_FEE_RATE_CAP_DURATION_SECS") @@ -185,7 +185,7 @@ pub struct BitcoinService { tx_queue: Arc>>, pub(crate) tx_signer: TxSigner, utxo_selection_mode: UtxoSelectionMode, - max_fee_rate_sat_vb: u64, + max_fee_rate_sat_to_pay: u64, fee_rate_cap_duration_secs: u64, } @@ -202,7 +202,7 @@ impl BitcoinService { reveal_tx_prefix: Vec, tx_backup_dir: PathBuf, utxo_selection_mode: UtxoSelectionMode, - max_fee_rate_sat_vb: u64, + max_fee_rate_sat_to_pay: u64, fee_rate_cap_duration_secs: u64, ) -> Self { Self { @@ -221,7 +221,7 @@ impl BitcoinService { ))), tx_queue: Arc::new(Mutex::new(VecDeque::new())), utxo_selection_mode, - max_fee_rate_sat_vb, + max_fee_rate_sat_to_pay, fee_rate_cap_duration_secs, } } @@ -263,8 +263,8 @@ impl BitcoinService { .map_err(|_| BitcoinServiceError::InvalidPrivateKey)?; let utxo_selection_mode = config.utxo_selection_mode.clone().unwrap_or_default(); - let max_fee_rate_sat_vb = config - .max_fee_rate_sat_vb + let max_fee_rate_sat_to_pay = config + .max_fee_rate_sat_to_pay .unwrap_or(DEFAULT_MAX_FEE_RATE_SAT_VB); let fee_rate_cap_duration_secs = config .fee_rate_cap_duration_secs @@ -280,7 +280,7 @@ impl BitcoinService { chain_params.reveal_tx_prefix, tx_backup_dir.to_path_buf(), utxo_selection_mode, - max_fee_rate_sat_vb, + max_fee_rate_sat_to_pay, fee_rate_cap_duration_secs, )) } @@ -327,22 +327,22 @@ impl BitcoinService { } }; - // Cap fee at self.max_fee_rate_sat_vb for a maximum of `self.fee_rate_cap_duration_secs`. - // If `self.fee_rate_cap_duration_secs` is exceeded, send transaction with fee rate above `self.max_fee_rate_sat_vb` anyway + // Cap fee at self.max_fee_rate_sat_to_pay for a maximum of `self.fee_rate_cap_duration_secs`. + // If `self.fee_rate_cap_duration_secs` is exceeded, send transaction with fee rate above `self.max_fee_rate_sat_to_pay` anyway let elapsed = start.elapsed().as_secs(); - if fee_sat_per_vbyte > self.max_fee_rate_sat_vb + if fee_sat_per_vbyte > self.max_fee_rate_sat_to_pay && elapsed < self.fee_rate_cap_duration_secs { - warn!("Fee rate {} sat/vb above cap of {}. Waiting (elapsed: {}s / max: {}s)", fee_sat_per_vbyte, self.max_fee_rate_sat_vb, elapsed, self.fee_rate_cap_duration_secs); + warn!("Fee rate {} sat/vb above cap of {}. Waiting (elapsed: {}s / max: {}s)", fee_sat_per_vbyte, self.max_fee_rate_sat_to_pay, elapsed, self.fee_rate_cap_duration_secs); tokio::time::sleep(Duration::from_secs(10)).await; continue; } - if fee_sat_per_vbyte > self.max_fee_rate_sat_vb + if fee_sat_per_vbyte > self.max_fee_rate_sat_to_pay && elapsed >= self.fee_rate_cap_duration_secs { warn!( "Fee rate {} sat/vb above cap of {} sat/vb, but cap duration of {}s exceeded. Sending transaction anyway", - fee_sat_per_vbyte, self.max_fee_rate_sat_vb, self.fee_rate_cap_duration_secs + fee_sat_per_vbyte, self.max_fee_rate_sat_to_pay, self.fee_rate_cap_duration_secs ); } From eeb202fb9b7de6004dcc0d61624c89be52271ba0 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 8 Oct 2025 11:24:39 +0100 Subject: [PATCH 10/81] DA job service initial implementation with test passing --- Cargo.lock | 4 + bin/citrea/src/main.rs | 1 + bin/citrea/src/rollup/bitcoin.rs | 13 +- bin/citrea/src/rollup/mock.rs | 1 + bin/citrea/src/rollup/mod.rs | 2 + bin/citrea/tests/bitcoin/batch_prover_test.rs | 2 +- bin/citrea/tests/bitcoin/bitcoin_service.rs | 2 +- bin/citrea/tests/bitcoin/bitcoin_test.rs | 178 +++--- bin/citrea/tests/bitcoin/bitcoin_verifier.rs | 2 +- bin/citrea/tests/bitcoin/da_queue.rs | 95 ++-- bin/citrea/tests/bitcoin/full_node.rs | 180 ++---- bin/citrea/tests/bitcoin/light_client_test.rs | 365 +++++------- .../tests/bitcoin/sequencer_commitments.rs | 7 +- bin/citrea/tests/bitcoin/utils.rs | 12 +- bin/citrea/tests/common/helpers.rs | 1 + bin/citrea/tests/mock/proving.rs | 2 + crates/batch-prover/src/prover.rs | 24 +- crates/batch-prover/src/rpc.rs | 11 +- crates/bitcoin-da/Cargo.toml | 6 + crates/bitcoin-da/src/error.rs | 12 +- .../src/helpers/builders/body_builders.rs | 71 ++- .../bitcoin-da/src/helpers/builders/chunks.rs | 385 +++++++++++++ crates/bitcoin-da/src/helpers/builders/mod.rs | 1 + .../bitcoin-da/src/helpers/builders/tests.rs | 2 + crates/bitcoin-da/src/helpers/mod.rs | 12 +- crates/bitcoin-da/src/job/error.rs | 35 ++ crates/bitcoin-da/src/job/mod.rs | 12 + crates/bitcoin-da/src/job/rpc.rs | 1 + crates/bitcoin-da/src/job/service.rs | 295 ++++++++++ crates/bitcoin-da/src/lib.rs | 3 + crates/bitcoin-da/src/monitoring.rs | 33 +- crates/bitcoin-da/src/service.rs | 526 ++++++++++-------- crates/bitcoin-da/src/test_utils.rs | 4 +- crates/bitcoin-da/src/tx_signer.rs | 28 +- crates/prover-services/src/parallel.rs | 21 +- crates/prover-services/tests/prover_tests.rs | 4 +- crates/sequencer/src/commitment/service.rs | 26 +- .../sovereign-sdk/adapters/mock-da/Cargo.toml | 3 + .../adapters/mock-da/src/service.rs | 31 +- .../full-node/db/sov-db/src/ledger_db/mod.rs | 50 +- .../db/sov-db/src/ledger_db/traits.rs | 16 + .../full-node/db/sov-db/src/schema/tables.rs | 16 + .../sov-modules-rollup-blueprint/src/lib.rs | 1 + .../rollup-interface/src/node/services/da.rs | 22 +- 44 files changed, 1661 insertions(+), 857 deletions(-) create mode 100644 crates/bitcoin-da/src/helpers/builders/chunks.rs create mode 100644 crates/bitcoin-da/src/job/error.rs create mode 100644 crates/bitcoin-da/src/job/mod.rs create mode 100644 crates/bitcoin-da/src/job/rpc.rs create mode 100644 crates/bitcoin-da/src/job/service.rs diff --git a/Cargo.lock b/Cargo.lock index 306a22a998..e3974eb2fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1589,6 +1589,7 @@ dependencies = [ "anyhow", "async-trait", "backoff", + "bincode", "bitcoin", "bitcoincore-rpc", "borsh", @@ -1610,10 +1611,12 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", + "sov-db", "sov-rollup-interface", "thiserror 2.0.12", "tokio", "tracing", + "uuid", ] [[package]] @@ -9983,6 +9986,7 @@ dependencies = [ "tempfile", "tokio", "tracing", + "uuid", ] [[package]] diff --git a/bin/citrea/src/main.rs b/bin/citrea/src/main.rs index c9a517e3df..0a258f5193 100644 --- a/bin/citrea/src/main.rs +++ b/bin/citrea/src/main.rs @@ -203,6 +203,7 @@ where matches!(node_type, NodeWithConfig::Sequencer(_)) || matches!(node_type, NodeWithConfig::BatchProver(_)), network, + ledger_db.clone(), ) .await?; diff --git a/bin/citrea/src/rollup/bitcoin.rs b/bin/citrea/src/rollup/bitcoin.rs index 2675264d81..45a2e7d3f1 100644 --- a/bin/citrea/src/rollup/bitcoin.rs +++ b/bin/citrea/src/rollup/bitcoin.rs @@ -7,9 +7,7 @@ use bitcoin_da::fee::FeeService; use bitcoin_da::monitoring::MonitoringService; use bitcoin_da::network_constants::get_network_constants; use bitcoin_da::rpc::create_rpc_module as create_da_rpc_module; -use bitcoin_da::service::{ - network_to_bitcoin_network, BitcoinService, BitcoinServiceConfig, TxidWrapper, -}; +use bitcoin_da::service::{network_to_bitcoin_network, BitcoinService, BitcoinServiceConfig}; use bitcoin_da::spec::{BitcoinSpec, RollupParams}; use bitcoin_da::verifier::BitcoinVerifier; use bitcoincore_rpc::{Auth, Client}; @@ -29,9 +27,7 @@ use sov_modules_api::default_context::DefaultContext; use sov_modules_api::{Address, SpecId, Zkvm}; use sov_modules_rollup_blueprint::RollupBlueprint; use sov_prover_storage_manager::ProverStorageManager; -use sov_rollup_interface::services::da::TxRequestWithNotifier; use sov_state::ProverStorage; -use tokio::sync::mpsc::unbounded_channel; use tracing::instrument; use crate::guests::{ @@ -106,9 +102,8 @@ impl RollupBlueprint for BitcoinRollup { require_wallet_check: bool, task_executor: TaskExecutor, network: Network, + ledger_db: LedgerDB, ) -> Result, anyhow::Error> { - let (tx, rx) = unbounded_channel::>(); - let chain_params = RollupParams { reveal_tx_prefix: REVEAL_TX_PREFIX.to_vec(), network, @@ -154,7 +149,7 @@ impl RollupBlueprint for BitcoinRollup { monitoring_service, fee_service, require_wallet_check, - tx, + ledger_db, ) .await?, ); @@ -166,7 +161,7 @@ impl RollupBlueprint for BitcoinRollup { service.monitoring.restore().await?; task_executor.spawn_with_graceful_shutdown_signal(|tk| { - Arc::clone(&service).run_da_queue(rx, block_rx, tk) + Arc::clone(&service).run_da_queue(block_rx, tk) }); task_executor .spawn_with_graceful_shutdown_signal(|tk| Arc::clone(&service.monitoring).run(tk)); diff --git a/bin/citrea/src/rollup/mock.rs b/bin/citrea/src/rollup/mock.rs index 9616b2e742..59e0a16d69 100644 --- a/bin/citrea/src/rollup/mock.rs +++ b/bin/citrea/src/rollup/mock.rs @@ -70,6 +70,7 @@ impl RollupBlueprint for MockDemoRollup { _require_wallet_check: bool, _task_manager: TaskExecutor, _network: Network, + _ledger_db: LedgerDB, ) -> Result, anyhow::Error> { Ok(Arc::new(MockDaService::new( rollup_config.da.sender_address.clone(), diff --git a/bin/citrea/src/rollup/mod.rs b/bin/citrea/src/rollup/mod.rs index 681b430a05..73444d8e99 100644 --- a/bin/citrea/src/rollup/mod.rs +++ b/bin/citrea/src/rollup/mod.rs @@ -77,6 +77,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { rollup_config: &FullNodeConfig, require_da_wallet: bool, network: Network, + ledger_db: LedgerDB, ) -> Result> { let task_manager = TaskManager::current(); let da_service = self @@ -85,6 +86,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { require_da_wallet, task_manager.executor(), network, + ledger_db, ) .await?; let (l2_block_tx, l2_block_rx) = broadcast::channel(10); diff --git a/bin/citrea/tests/bitcoin/batch_prover_test.rs b/bin/citrea/tests/bitcoin/batch_prover_test.rs index 7dc5850b21..f72137fb2b 100644 --- a/bin/citrea/tests/bitcoin/batch_prover_test.rs +++ b/bin/citrea/tests/bitcoin/batch_prover_test.rs @@ -311,7 +311,7 @@ async fn basic_prover_test() -> Result<()> { // // Send the same commitment that was already proven. // bitcoin_da_service -// .send_transaction_with_fee_rate( +// .send_transaction( // DaTxRequest::SequencerCommitment(commitments.first().unwrap().clone()), // 1, // ) diff --git a/bin/citrea/tests/bitcoin/bitcoin_service.rs b/bin/citrea/tests/bitcoin/bitcoin_service.rs index 00f35b8462..d40f2502fc 100644 --- a/bin/citrea/tests/bitcoin/bitcoin_service.rs +++ b/bin/citrea/tests/bitcoin/bitcoin_service.rs @@ -162,7 +162,7 @@ impl TestCase for BitcoinServiceTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_bitcoin_service() -> Result<()> { TestCaseRunner::new(BitcoinServiceTest { task_manager: TaskManager::current(), diff --git a/bin/citrea/tests/bitcoin/bitcoin_test.rs b/bin/citrea/tests/bitcoin/bitcoin_test.rs index 4d736d59f4..b5d2a84530 100644 --- a/bin/citrea/tests/bitcoin/bitcoin_test.rs +++ b/bin/citrea/tests/bitcoin/bitcoin_test.rs @@ -3,13 +3,13 @@ use std::time::Duration; use anyhow::bail; use async_trait::async_trait; use bitcoin::hashes::Hash; -use bitcoin::{Amount, Txid}; +use bitcoin::Txid; use bitcoin_da::monitoring::TxStatus; use bitcoin_da::rpc::DaRpcClient; -use bitcoincore_rpc::{Client, RpcApi}; +use bitcoincore_rpc::RpcApi; use citrea_batch_prover::rpc::BatchProverRpcClient; use citrea_e2e::bitcoin::{BitcoinNode, DEFAULT_FINALITY_DEPTH}; -use citrea_e2e::config::{BitcoinConfig, TestCaseConfig}; +use citrea_e2e::config::TestCaseConfig; use citrea_e2e::framework::TestFramework; use citrea_e2e::test_case::{TestCase, TestCaseRunner}; use citrea_e2e::traits::Restart; @@ -489,89 +489,89 @@ async fn test_cpfp_fee_bump() -> Result<()> { .await } -struct MinRelayFeeTest; - -impl MinRelayFeeTest { - async fn drain_wallet( - &self, - da: &BitcoinNode, - client: &Client, - amount_to_keep: Amount, - ) -> Result<()> { - let balance = da.get_balance(None, None).await?; - - let amount_to_send = balance - amount_to_keep; - - if amount_to_send <= Amount::ZERO { - return Ok(()); - } - - let drain_address = da.get_new_address(None, None).await?.assume_checked(); - - client - .send_to_address( - &drain_address, - amount_to_send, - None, - None, - None, - None, - None, - None, - ) - .await?; - da.generate(1).await?; - - Ok(()) - } -} - -#[async_trait] -impl TestCase for MinRelayFeeTest { - fn test_config() -> TestCaseConfig { - TestCaseConfig { - with_sequencer: true, - with_batch_prover: false, - ..Default::default() - } - } - - fn bitcoin_config() -> BitcoinConfig { - BitcoinConfig { - extra_args: vec!["-fallbackfee=0.00001", "-minrelaytxfee=0.00002"], - ..Default::default() - } - } - - async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { - let da = f.bitcoin_nodes.get(0).unwrap(); - let sequencer = f.sequencer.as_mut().unwrap(); - - self.drain_wallet(da, &sequencer.da, Amount::from_sat(8000)) - .await?; - - let max_l2_blocks_per_commitment = sequencer.max_l2_blocks_per_commitment(); - - // Generate seqcommitments - for _ in 0..max_l2_blocks_per_commitment { - sequencer.client.send_publish_batch_request().await?; - } - - da.wait_mempool_len(2, None).await?; - - // Assert that we hit MinRelayFeeNotMet error but recover and end up sending the tx by increasing fee_rate_multiplier - let sequencer_stdout = - std::fs::read_to_string(sequencer.config.base.dir.join("stdout.log"))?; - assert!(sequencer_stdout.contains("MinRelayFeeNotMet")); - - Ok(()) - } -} - -#[tokio::test] -async fn test_min_relay_fee_handling() -> Result<()> { - TestCaseRunner::new(MinRelayFeeTest) - .set_citrea_path(get_citrea_path()) - .run() - .await -} +// struct MinRelayFeeTest; + +// impl MinRelayFeeTest { +// async fn drain_wallet( +// &self, +// da: &BitcoinNode, +// client: &Client, +// amount_to_keep: Amount, +// ) -> Result<()> { +// let balance = da.get_balance(None, None).await?; + +// let amount_to_send = balance - amount_to_keep; + +// if amount_to_send <= Amount::ZERO { +// return Ok(()); +// } + +// let drain_address = da.get_new_address(None, None).await?.assume_checked(); + +// client +// .send_to_address( +// &drain_address, +// amount_to_send, +// None, +// None, +// None, +// None, +// None, +// None, +// ) +// .await?; +// da.generate(1).await?; + +// Ok(()) +// } +// } + +// #[async_trait] +// impl TestCase for MinRelayFeeTest { +// fn test_config() -> TestCaseConfig { +// TestCaseConfig { +// with_sequencer: true, +// with_batch_prover: false, +// ..Default::default() +// } +// } + +// fn bitcoin_config() -> BitcoinConfig { +// BitcoinConfig { +// extra_args: vec!["-fallbackfee=0.00001", "-minrelaytxfee=0.00002"], +// ..Default::default() +// } +// } + +// async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { +// let da = f.bitcoin_nodes.get(0).unwrap(); +// let sequencer = f.sequencer.as_mut().unwrap(); + +// self.drain_wallet(da, &sequencer.da, Amount::from_sat(8000)) +// .await?; + +// let max_l2_blocks_per_commitment = sequencer.max_l2_blocks_per_commitment(); + +// // Generate seqcommitments +// for _ in 0..max_l2_blocks_per_commitment { +// sequencer.client.send_publish_batch_request().await?; +// } + +// da.wait_mempool_len(2, None).await?; + +// // Assert that we hit MinRelayFeeNotMet error but recover and end up sending the tx by increasing fee_rate_multiplier +// let sequencer_stdout = +// std::fs::read_to_string(sequencer.config.base.dir.join("stdout.log"))?; +// assert!(sequencer_stdout.contains("MinRelayFeeNotMet")); + +// Ok(()) +// } +// } + +// #[tokio::test] +// async fn test_min_relay_fee_handling() -> Result<()> { +// TestCaseRunner::new(MinRelayFeeTest) +// .set_citrea_path(get_citrea_path()) +// .run() +// .await +// } diff --git a/bin/citrea/tests/bitcoin/bitcoin_verifier.rs b/bin/citrea/tests/bitcoin/bitcoin_verifier.rs index a97674338b..978d8e88b7 100644 --- a/bin/citrea/tests/bitcoin/bitcoin_verifier.rs +++ b/bin/citrea/tests/bitcoin/bitcoin_verifier.rs @@ -744,7 +744,7 @@ impl BitcoinVerifierTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_bitcoin_verifier() -> Result<()> { TestCaseRunner::new(BitcoinVerifierTest { task_manager: TaskManager::current(), diff --git a/bin/citrea/tests/bitcoin/da_queue.rs b/bin/citrea/tests/bitcoin/da_queue.rs index a0beb3ec7d..913c58ef73 100644 --- a/bin/citrea/tests/bitcoin/da_queue.rs +++ b/bin/citrea/tests/bitcoin/da_queue.rs @@ -63,19 +63,13 @@ impl DaTransactionQueueingTest { // Fill mempool for i in 1..=3 { da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone()), - 1, - ) + .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await?; da.wait_mempool_len(8 * i, None).await?; } da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone()), - 1, - ) + .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await?; // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit @@ -83,30 +77,22 @@ impl DaTransactionQueueingTest { da.wait_mempool_len(8 * 3 + 2, None).await?; assert_eq!(da.get_raw_mempool().await?.len(), 26); - // Assert that all queued txs are monitored + // Assert that all sent txs are monitored let monitored_txs = da_service.monitoring.get_monitored_txs().await; - assert_eq!(monitored_txs.len(), 32); + assert_eq!(monitored_txs.len(), 26); // Try to send when queue is already filled up. // This is to test that utxos is correctly selected and that it's doesn't hang on waiting for list of queued txids to be returned let res = da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone()), - 1, - ) + .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await; - assert!(matches!(res, Err(BitcoinServiceError::QueueNotEmpty))); + assert!(matches!( + res, + Err(BitcoinServiceError::PreviousJobInProgress) + )); - // Send transaction hangs until a new block is detected - // Tests that transactions properly waits for block notification - tokio::select! { - _ = tokio::time::sleep(std::time::Duration::from_secs(2)) => { - da.generate(1).await?; - } - _ = da_service.send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) => { - } - } + da.generate(1).await?; // We mine the first three proofs + the 1 chunk pair and make sure that the remaining chunks and aggregate // and the extra proof is properly queued and sent on next block when mempool size is freed @@ -120,6 +106,14 @@ impl DaTransactionQueueingTest { assert_eq!(relevant_txs.len(), 13); + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + // Send additional proof and make sure it doesn't hit PreviousJobInProgress error + let res = da_service + .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) + .await; + + assert!(res.is_ok()); + // Remaining chunks and aggregate + extra queued proof should now hit the mempool da.wait_mempool_len(8 + 6, None).await?; assert_eq!(da.get_raw_mempool().await?.len(), 8 + 6); @@ -170,25 +164,22 @@ impl DaTransactionQueueingTest { // This over the mempool limit proof should be accepted and split up over multiple blocks let res = da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_400kb_batch_proof.clone()), - 1, - ) + .send_transaction(DaTxRequest::ZKProof(verifiable_400kb_batch_proof.clone())) .await; assert!(res.is_ok()); // Queue is already not empty and proof cannot be sent. let res = da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_400kb_batch_proof), 1) + .send_transaction(DaTxRequest::ZKProof(verifiable_400kb_batch_proof)) .await; assert!(res.is_err()); da.wait_mempool_len(18, None).await?; assert_eq!(da.get_raw_mempool().await?.len(), 18); - // Assert that all queued txs are monitored + // Assert that all sent txs are monitored let monitored_txs = da_service.monitoring.get_monitored_txs().await; - assert_eq!(monitored_txs.len(), 64); + assert_eq!(monitored_txs.len(), 58); da.generate(1).await?; // Assert that all chunks were mined and mempool space is freed @@ -380,7 +371,7 @@ impl TestCase for DaTransactionQueueingTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_queue_da_transactions() -> Result<()> { TestCaseRunner::new(DaTransactionQueueingTest { task_manager: TaskManager::current(), @@ -429,19 +420,13 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { // Fill mempool for i in 1..=3 { da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone()), - 1, - ) + .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await?; da.wait_mempool_len(8 * i, None).await?; } da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone()), - 1, - ) + .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await?; // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit @@ -449,23 +434,20 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { da.wait_mempool_len(8 * 3 + 2, None).await?; assert_eq!(da.get_raw_mempool().await?.len(), 26); - // Assert that all queued txs are monitored + // Assert that all sent txs are monitored let monitored_txs = da_service.monitoring.get_monitored_txs().await; - assert_eq!(monitored_txs.len(), 32); + assert_eq!(monitored_txs.len(), 26); // Try to send when queue is already filled up. // This is to test that utxos is correctly selected and that it's doesn't hang on waiting for list of queued txids to be returned let res = da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone()), - 1, - ) + .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await; assert!(res.is_ok()); let monitored_txs = da_service.monitoring.get_monitored_txs().await; - assert_eq!(monitored_txs.len(), 40); + assert_eq!(monitored_txs.len(), 34); // Txs starting from a new chain should be accepted to mempool da.wait_mempool_len(8 * 3 + 2 + 8, None).await?; @@ -533,27 +515,25 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { // This over the mempool limit proof should be accepted and split up over multiple blocks let res = da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_400kb_batch_proof.clone()), - 1, - ) + .send_transaction(DaTxRequest::ZKProof(verifiable_400kb_batch_proof.clone())) .await; assert!(res.is_ok()); // Should be able to send another proof that is also split up over multiple blocks let res = da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_400kb_batch_proof), 1) + .send_transaction(DaTxRequest::ZKProof(verifiable_400kb_batch_proof)) .await; assert!(res.is_ok()); da.wait_mempool_len(18 * 2, None).await?; assert_eq!(da.get_raw_mempool().await?.len(), 18 * 2); - // Assert that all queued txs are monitored + // Assert that all sent txs are monitored let monitored_txs = da_service.monitoring.get_monitored_txs().await; - assert_eq!(monitored_txs.len(), 88); + assert_eq!(monitored_txs.len(), 76); da.generate(1).await?; + // Assert that all chunks were mined and mempool space is freed assert_eq!(da.get_raw_mempool().await?.len(), 0); @@ -567,8 +547,10 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { let rollback_first_hash = hash; da.wait_mempool_len(6 * 2, None).await?; + assert_eq!(da.get_raw_mempool().await?.len(), 6 * 2); da.generate(1).await?; + // Assert that all chunks and aggregate were mined assert_eq!(da.get_raw_mempool().await?.len(), 0); @@ -589,6 +571,7 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { let dropped_txs = &da.get_raw_mempool().await?[2..]; da.invalidate_block(&rollback_first_hash).await?; + // Should be (6 + 18) * 2 if all mined txs were restored to mempool but 5 * 2 txs are dropped due to being over mempool policy limit assert_eq!(da.get_raw_mempool().await?.len(), (18 + 1) * 2); let remaining_txs = da.get_raw_mempool().await?; @@ -599,6 +582,7 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { // Make sure txs are rebroadcasted from monitoring service da.wait_mempool_len(5 * 2, None).await?; + let raw_mempool = da.get_raw_mempool().await?; assert_eq!(dropped_txs, raw_mempool); @@ -696,6 +680,7 @@ impl TestCase for DaTransactionQueueingUtxoSelectionModeOldestTest { } da.wait_mempool_len(2, None).await?; + da.generate(DEFAULT_FINALITY_DEPTH).await?; let finalized_height = da.get_finalized_height(None).await?; @@ -749,7 +734,7 @@ impl TestCase for DaTransactionQueueingUtxoSelectionModeOldestTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_queue_da_transactions_oldest_mode() -> Result<()> { TestCaseRunner::new(DaTransactionQueueingUtxoSelectionModeOldestTest { task_manager: TaskManager::current(), diff --git a/bin/citrea/tests/bitcoin/full_node.rs b/bin/citrea/tests/bitcoin/full_node.rs index c442101007..11ed882b04 100644 --- a/bin/citrea/tests/bitcoin/full_node.rs +++ b/bin/citrea/tests/bitcoin/full_node.rs @@ -23,6 +23,7 @@ use sov_ledger_rpc::LedgerRpcClient; use sov_modules_api::BatchProofCircuitOutputV3; use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; use sov_rollup_interface::rpc::block::L2BlockResponse; +use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::zk::batch_proof::output::{BatchProofCircuitOutput, CumulativeStateDiff}; use tokio::time::sleep; @@ -156,7 +157,7 @@ impl TestCase for PreStateRootMismatchTest { // Send the first proof prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(proof), 1) + .send_transaction(DaTxRequest::ZKProof(proof)) .await .unwrap(); @@ -228,7 +229,7 @@ impl TestCase for PreStateRootMismatchTest { // Send the invalid proof prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(invalid_proof), 1) + .send_transaction(DaTxRequest::ZKProof(invalid_proof)) .await .unwrap(); @@ -278,7 +279,7 @@ impl TestCase for PreStateRootMismatchTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_pre_state_root_mismatch() -> Result<()> { TestCaseRunner::new(PreStateRootMismatchTest { task_manager: TaskManager::current(), @@ -374,10 +375,7 @@ impl TestCase for SequencerCommitmentHashMismatchTest { // Send the `correct_commitment` so it's stored and will trigger the pre-hash mismatch against `wrong_commitment` sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(correct_commitment.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(correct_commitment.clone())) .await .unwrap(); @@ -441,7 +439,7 @@ impl TestCase for SequencerCommitmentHashMismatchTest { None, ); prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(fake_proof), 1) + .send_transaction(DaTxRequest::ZKProof(fake_proof)) .await .unwrap(); @@ -475,7 +473,7 @@ impl TestCase for SequencerCommitmentHashMismatchTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_sequencer_commitment_hash_mismatch() -> Result<()> { TestCaseRunner::new(SequencerCommitmentHashMismatchTest { task_manager: TaskManager::current(), @@ -537,10 +535,9 @@ impl TestCase for PendingCommitmentHaltingErrorTest { }; bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(wrong_merkle_root_commitment.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + wrong_merkle_root_commitment.clone(), + )) .await .unwrap(); @@ -609,7 +606,7 @@ impl TestCase for PendingCommitmentHaltingErrorTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_halting_pending_commitment_merkle_root_mismatch() -> Result<()> { TestCaseRunner::new(PendingCommitmentHaltingErrorTest { task_manager: TaskManager::current(), @@ -1001,10 +998,9 @@ impl TestCase for OutOfOrderCommitmentsTest { // Send the zero index commitment first, should be ignored bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(zero_index_commitment.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + zero_index_commitment.clone(), + )) .await .unwrap(); @@ -1027,10 +1023,7 @@ impl TestCase for OutOfOrderCommitmentsTest { // Send the second commitment first bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(second_commitment.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(second_commitment.clone())) .await .unwrap(); @@ -1054,10 +1047,7 @@ impl TestCase for OutOfOrderCommitmentsTest { // Send the first commitment bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(first_commitment.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(first_commitment.clone())) .await .unwrap(); @@ -1090,7 +1080,7 @@ impl TestCase for OutOfOrderCommitmentsTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_out_of_order_commitments() -> Result<()> { TestCaseRunner::new(OutOfOrderCommitmentsTest { task_manager: TaskManager::current(), @@ -1187,10 +1177,7 @@ impl TestCase for ConflictingCommitmentsTest { // Send commitment A bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment_a.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment_a.clone())) .await .unwrap(); @@ -1212,10 +1199,9 @@ impl TestCase for ConflictingCommitmentsTest { // Send conflicting commitment with different merkle root, should be ignored bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(conflicting_commitment_different_root.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + conflicting_commitment_different_root.clone(), + )) .await .unwrap(); @@ -1238,10 +1224,7 @@ impl TestCase for ConflictingCommitmentsTest { // Send conflicting commitment B bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment_b.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment_b.clone())) .await .unwrap(); @@ -1286,10 +1269,7 @@ impl TestCase for ConflictingCommitmentsTest { // Send commitment C that follows A bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment_c.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment_c.clone())) .await .unwrap(); @@ -1316,7 +1296,7 @@ impl TestCase for ConflictingCommitmentsTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_conflicting_commitments() -> Result<()> { TestCaseRunner::new(ConflictingCommitmentsTest { task_manager: TaskManager::current(), @@ -1584,7 +1564,7 @@ impl TestCase for OutOfRangeProofTest { // Send the proof first. It should be discard as none of its commitments exist prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(proof1.clone()), 1) + .send_transaction(DaTxRequest::ZKProof(proof1.clone())) .await .unwrap(); @@ -1605,10 +1585,7 @@ impl TestCase for OutOfRangeProofTest { ); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment1.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment1.clone())) .await .unwrap(); @@ -1637,10 +1614,7 @@ impl TestCase for OutOfRangeProofTest { assert!(proven_height.is_none(), "Proof should have been discarded"); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment2.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment2.clone())) .await .unwrap(); @@ -1713,18 +1687,12 @@ impl TestCase for OutOfRangeProofTest { full_node.start(None, None).await?; sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment1.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment1.clone())) .await .unwrap(); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment2.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment2.clone())) .await .unwrap(); @@ -1747,7 +1715,7 @@ impl TestCase for OutOfRangeProofTest { // Send the proof first. It should be processed as its commitments exist prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(proof1), 1) + .send_transaction(DaTxRequest::ZKProof(proof1)) .await .unwrap(); @@ -1769,18 +1737,12 @@ impl TestCase for OutOfRangeProofTest { // Send commitments for proof 2 and proof 3 sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment3.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment3.clone())) .await .unwrap(); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment4.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment4.clone())) .await .unwrap(); @@ -1833,7 +1795,7 @@ impl TestCase for OutOfRangeProofTest { ); // Send the third proof first. It should be set as pending as its commitments exist but it's starting commitment index is not proven proof last commitment index + 1 prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(proof3), 1) + .send_transaction(DaTxRequest::ZKProof(proof3)) .await .unwrap(); @@ -1892,7 +1854,7 @@ impl TestCase for OutOfRangeProofTest { // Now send the second proof. It should be processed and trigger a processing of pending proof3 prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(proof2), 1) + .send_transaction(DaTxRequest::ZKProof(proof2)) .await .unwrap(); @@ -1935,7 +1897,7 @@ impl TestCase for OutOfRangeProofTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_out_of_range_proof() -> Result<()> { TestCaseRunner::new(OutOfRangeProofTest { task_manager: TaskManager::current(), @@ -2128,10 +2090,7 @@ impl TestCase for OverlappingProofRangesTest { full_node.start(None, None).await?; sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment1.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment1.clone())) .await .unwrap(); @@ -2162,18 +2121,12 @@ impl TestCase for OverlappingProofRangesTest { .state_root; sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment2.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment2.clone())) .await .unwrap(); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment3.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment3.clone())) .await .unwrap(); @@ -2276,34 +2229,22 @@ impl TestCase for OverlappingProofRangesTest { // Send all 4 commitments in order sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment1.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment1.clone())) .await .unwrap(); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment2.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment2.clone())) .await .unwrap(); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment3.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment3.clone())) .await .unwrap(); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment4.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment4.clone())) .await .unwrap(); @@ -2349,7 +2290,7 @@ impl TestCase for OverlappingProofRangesTest { // Send proof_a over commitments [1,2,3] prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(proof_a.clone()), 1) + .send_transaction(DaTxRequest::ZKProof(proof_a.clone())) .await .unwrap(); @@ -2436,7 +2377,7 @@ impl TestCase for OverlappingProofRangesTest { // Send proof_b with overlapping range of [2,3,4] prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(proof_b.clone()), 1) + .send_transaction(DaTxRequest::ZKProof(proof_b.clone())) .await .unwrap(); @@ -2500,7 +2441,7 @@ impl TestCase for OverlappingProofRangesTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_overlapping_proof_ranges() -> Result<()> { TestCaseRunner::new(OverlappingProofRangesTest { task_manager: TaskManager::current(), @@ -2641,10 +2582,7 @@ impl TestCase for UnsyncedCommitmentL2RangeTest { }; sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment_1.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment_1.clone())) .await .unwrap(); @@ -2687,10 +2625,7 @@ impl TestCase for UnsyncedCommitmentL2RangeTest { /*------- */ sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment_2.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment_2.clone())) .await .unwrap(); @@ -2732,10 +2667,7 @@ impl TestCase for UnsyncedCommitmentL2RangeTest { /*------- */ sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment_3.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment(commitment_3.clone())) .await .unwrap(); @@ -2920,7 +2852,7 @@ impl TestCase for UnsyncedCommitmentL2RangeTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_unsynced_commitment_l2_range_test() -> Result<()> { TestCaseRunner::new(UnsyncedCommitmentL2RangeTest { task_manager: TaskManager::current(), @@ -3654,7 +3586,8 @@ impl TestCase for FullNodeLcpChunkProofTest { Ok(()) } } -#[tokio::test] + +#[tokio::test(flavor = "multi_thread")] async fn test_full_node_lcp_chunk_proof() -> Result<()> { TestCaseRunner::new(FullNodeLcpChunkProofTest { task_manager: TaskManager::current(), @@ -3737,7 +3670,7 @@ impl TestCase for FullNodeL1SyncHaltOnMerkleRootMismatch { .await; sequencer_da_service - .send_transaction_with_fee_rate(DaTxRequest::SequencerCommitment(correct_commitment), 1) + .send_transaction(DaTxRequest::SequencerCommitment(correct_commitment)) .await .unwrap(); @@ -3762,10 +3695,9 @@ impl TestCase for FullNodeL1SyncHaltOnMerkleRootMismatch { }; sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(wrong_merkle_root_commitment), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + wrong_merkle_root_commitment, + )) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -3833,7 +3765,7 @@ impl TestCase for FullNodeL1SyncHaltOnMerkleRootMismatch { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_full_node_l1_sync_halt_on_merkle_root_mismatch() -> Result<()> { TestCaseRunner::new(FullNodeL1SyncHaltOnMerkleRootMismatch { task_manager: TaskManager::current(), diff --git a/bin/citrea/tests/bitcoin/light_client_test.rs b/bin/citrea/tests/bitcoin/light_client_test.rs index 08506c5535..c793818936 100644 --- a/bin/citrea/tests/bitcoin/light_client_test.rs +++ b/bin/citrea/tests/bitcoin/light_client_test.rs @@ -678,13 +678,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateTest { let signatures_with_index = create_valid_signatures(&signers, &prehash); bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body, - signatures_with_index, - }), - 1, - ) + .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body, + signatures_with_index, + })) .await .unwrap(); @@ -763,7 +760,7 @@ impl TestCase for LightClientBatchProofMethodIdUpdateTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_light_client_batch_proof_method_id_update() -> Result<()> { TestCaseRunner::new(LightClientBatchProofMethodIdUpdateTest { task_manager: TaskManager::current(), @@ -905,13 +902,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { let signatures_with_index = create_valid_signatures(&signers, &prehash); bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body.clone(), - signatures_with_index, - }), - 1, - ) + .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body.clone(), + signatures_with_index, + })) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -945,13 +939,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { signatures_with_index[0].0[0] ^= 0xFF; bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body2.clone(), - signatures_with_index, - }), - 1, - ) + .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body2.clone(), + signatures_with_index, + })) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -985,13 +976,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { // Corrupt one signature signatures_with_index[0].1 = signatures_with_index[2].1; bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body3.clone(), - signatures_with_index, - }), - 1, - ) + .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body3.clone(), + signatures_with_index, + })) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -1024,13 +1012,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { // Corrupt one signature signatures_with_index[2].1 = 5; // out of bounds bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body3.clone(), - signatures_with_index, - }), - 1, - ) + .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body3.clone(), + signatures_with_index, + })) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -1067,13 +1052,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { signatures_with_index[2].1 = tmp; bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body3.clone(), - signatures_with_index, - }), - 1, - ) + .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body3.clone(), + signatures_with_index, + })) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -1096,7 +1078,7 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_light_client_batch_proof_method_id_update_security_council() -> Result<()> { TestCaseRunner::new(LightClientBatchProofMethodIdUpdateSecurityCouncilTest { task_manager: TaskManager::current(), @@ -1188,10 +1170,9 @@ impl TestCase for LightClientUnverifiableBatchProofTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); @@ -1202,10 +1183,9 @@ impl TestCase for LightClientUnverifiableBatchProofTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment_2.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment_2.clone(), + )) .await .unwrap(); @@ -1216,10 +1196,9 @@ impl TestCase for LightClientUnverifiableBatchProofTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment_3.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment_3.clone(), + )) .await .unwrap(); @@ -1230,10 +1209,9 @@ impl TestCase for LightClientUnverifiableBatchProofTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment_4.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment_4.clone(), + )) .await .unwrap(); @@ -1253,7 +1231,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { None, ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1) + .send_transaction(DaTxRequest::ZKProof(verifiable_batch_proof)) .await .unwrap(); @@ -1268,7 +1246,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { Some(fake_sequencer_commitment_2.serialize_and_calculate_sha_256()), ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1) + .send_transaction(DaTxRequest::ZKProof(verifiable_batch_proof)) .await .unwrap(); @@ -1284,7 +1262,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { Some(fake_sequencer_commitment_3.serialize_and_calculate_sha_256()), ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(unparsable_batch_proof), 1) + .send_transaction(DaTxRequest::ZKProof(unparsable_batch_proof)) .await .unwrap(); @@ -1299,7 +1277,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { Some(fake_sequencer_commitment.serialize_and_calculate_sha_256()), ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1) + .send_transaction(DaTxRequest::ZKProof(verifiable_batch_proof)) .await .unwrap(); @@ -1316,7 +1294,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { Some(fake_sequencer_commitment_3.serialize_and_calculate_sha_256()), ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(unverifiable_batch_proof), 1) + .send_transaction(DaTxRequest::ZKProof(unverifiable_batch_proof)) .await .unwrap(); @@ -1352,7 +1330,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_light_client_unverifiable_batch_proof() -> Result<()> { TestCaseRunner::new(LightClientUnverifiableBatchProofTest { task_manager: TaskManager::current(), @@ -1424,10 +1402,9 @@ impl TestCase for VerifyChunkedTxsInLightClient { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); @@ -1438,10 +1415,9 @@ impl TestCase for VerifyChunkedTxsInLightClient { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment2.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment2.clone(), + )) .await .unwrap(); @@ -1452,10 +1428,9 @@ impl TestCase for VerifyChunkedTxsInLightClient { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment3.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment3.clone(), + )) .await .unwrap(); @@ -1508,7 +1483,7 @@ impl TestCase for VerifyChunkedTxsInLightClient { ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_100kb_batch_proof), 1) + .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof)) .await .unwrap(); @@ -1569,7 +1544,7 @@ impl TestCase for VerifyChunkedTxsInLightClient { ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_130kb_batch_proof), 1) + .send_transaction(DaTxRequest::ZKProof(verifiable_130kb_batch_proof)) .await .unwrap(); @@ -1681,7 +1656,7 @@ impl TestCase for VerifyChunkedTxsInLightClient { Some(fake_sequencer_commitment2.serialize_and_calculate_sha_256()), ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(unverifiable_100kb_batch_proof), 1) + .send_transaction(DaTxRequest::ZKProof(unverifiable_100kb_batch_proof)) .await .unwrap(); @@ -1723,7 +1698,7 @@ impl TestCase for VerifyChunkedTxsInLightClient { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_verify_chunked_txs_in_light_client() -> Result<()> { TestCaseRunner::new(VerifyChunkedTxsInLightClient { task_manager: TaskManager::current(), @@ -1796,10 +1771,9 @@ impl TestCase for UnchainedBatchProofsTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); @@ -1810,10 +1784,9 @@ impl TestCase for UnchainedBatchProofsTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment2.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment2.clone(), + )) .await .unwrap(); @@ -1824,10 +1797,9 @@ impl TestCase for UnchainedBatchProofsTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment3.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment3.clone(), + )) .await .unwrap(); @@ -1838,10 +1810,9 @@ impl TestCase for UnchainedBatchProofsTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment4.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment4.clone(), + )) .await .unwrap(); @@ -1919,39 +1890,24 @@ impl TestCase for UnchainedBatchProofsTest { Some(fake_sequencer_commitment.serialize_and_calculate_sha_256()), ); - let mut txs = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp1), 1) + bitcoin_da_service + .send_transaction(DaTxRequest::ZKProof(bp1)) .await .unwrap(); - txs.extend( - bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp2), 1) - .await - .unwrap(), - ); + bitcoin_da_service + .send_transaction(DaTxRequest::ZKProof(bp2)) + .await + .unwrap(); - txs.extend( - bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp3), 1) - .await - .unwrap(), - ); + bitcoin_da_service + .send_transaction(DaTxRequest::ZKProof(bp3)) + .await + .unwrap(); da.wait_mempool_len(6, None).await?; - da.generate_block( - da.get_new_address(None, None) - .await? - .assume_checked() - .to_string(), - txs.into_iter() - .flat_map(|tx| [tx[0].id.to_string(), tx[1].id.to_string()]) - .collect(), - ) - .await?; - - da.generate(DEFAULT_FINALITY_DEPTH - 1).await?; + da.generate(DEFAULT_FINALITY_DEPTH).await?; light_client_prover .wait_for_l1_height(start_l1_height + DEFAULT_FINALITY_DEPTH, None) @@ -1973,7 +1929,7 @@ impl TestCase for UnchainedBatchProofsTest { assert_eq!(lcp_output.last_sequencer_commitment_index, U32::from(1)); bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp4), 1) + .send_transaction(DaTxRequest::ZKProof(bp4)) .await .unwrap(); @@ -2003,7 +1959,7 @@ impl TestCase for UnchainedBatchProofsTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_unchained_batch_proofs_in_light_client() -> Result<()> { TestCaseRunner::new(UnchainedBatchProofsTest { task_manager: TaskManager::current(), @@ -2071,10 +2027,9 @@ impl TestCase for UnknownL1HashBatchProofTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -2117,7 +2072,7 @@ impl TestCase for UnknownL1HashBatchProofTest { ); bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp), 1) + .send_transaction(DaTxRequest::ZKProof(bp)) .await .unwrap(); @@ -2149,7 +2104,7 @@ impl TestCase for UnknownL1HashBatchProofTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_unknown_l1_hash_batch_proof_in_light_client() -> Result<()> { TestCaseRunner::new(UnknownL1HashBatchProofTest { task_manager: TaskManager::current(), @@ -2220,10 +2175,9 @@ impl TestCase for ChainProofByCommitmentIndex { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); @@ -2234,10 +2188,9 @@ impl TestCase for ChainProofByCommitmentIndex { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment2.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment2.clone(), + )) .await .unwrap(); @@ -2248,10 +2201,9 @@ impl TestCase for ChainProofByCommitmentIndex { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment3.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment3.clone(), + )) .await .unwrap(); @@ -2304,7 +2256,7 @@ impl TestCase for ChainProofByCommitmentIndex { ); bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp), 1) + .send_transaction(DaTxRequest::ZKProof(bp)) .await .unwrap(); @@ -2323,7 +2275,7 @@ impl TestCase for ChainProofByCommitmentIndex { ); bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp), 1) + .send_transaction(DaTxRequest::ZKProof(bp)) .await .unwrap(); @@ -2361,7 +2313,7 @@ impl TestCase for ChainProofByCommitmentIndex { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_chain_proof_by_commitment_index() -> Result<()> { TestCaseRunner::new(ChainProofByCommitmentIndex { task_manager: TaskManager::current(), @@ -2468,7 +2420,7 @@ impl TestCase for ProofWithMissingCommitment { ); bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp), 1) + .send_transaction(DaTxRequest::ZKProof(bp)) .await .unwrap(); @@ -2506,7 +2458,7 @@ impl TestCase for ProofWithMissingCommitment { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_proof_with_missing_commitment_is_discarded() -> Result<()> { TestCaseRunner::new(ProofWithMissingCommitment { task_manager: TaskManager::current(), @@ -2588,10 +2540,9 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { }; let _ = malicious_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); @@ -2632,25 +2583,14 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { None, ); - let txs = batch_prover_bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp1), 1) + batch_prover_bitcoin_da_service + .send_transaction(DaTxRequest::ZKProof(bp1)) .await .unwrap(); da.wait_mempool_len(2, None).await?; - da.generate_block( - da.get_new_address(None, None) - .await? - .assume_checked() - .to_string(), - txs.into_iter() - .flat_map(|tx| [tx[0].id.to_string(), tx[1].id.to_string()]) - .collect(), - ) - .await?; - - da.generate(DEFAULT_FINALITY_DEPTH - 1).await?; + da.generate(DEFAULT_FINALITY_DEPTH).await?; light_client_prover .wait_for_l1_height(start_l1_height + DEFAULT_FINALITY_DEPTH, None) @@ -2674,10 +2614,9 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { // Now send with the correct da service let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); @@ -2703,25 +2642,14 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { None, ); - let txs = batch_prover_bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp1), 1) + batch_prover_bitcoin_da_service + .send_transaction(DaTxRequest::ZKProof(bp1)) .await .unwrap(); da.wait_mempool_len(2, None).await?; - da.generate_block( - da.get_new_address(None, None) - .await? - .assume_checked() - .to_string(), - txs.into_iter() - .flat_map(|tx| [tx[0].id.to_string(), tx[1].id.to_string()]) - .collect(), - ) - .await?; - - da.generate(DEFAULT_FINALITY_DEPTH - 1).await?; + da.generate(DEFAULT_FINALITY_DEPTH).await?; light_client_prover .wait_for_l1_height(finalized_height + DEFAULT_FINALITY_DEPTH, None) @@ -2752,10 +2680,9 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { // Now send with the correct da service let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment2.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment2.clone(), + )) .await .unwrap(); @@ -2781,25 +2708,14 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { Some(fake_sequencer_commitment.serialize_and_calculate_sha_256()), ); - let txs = malicious_bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp2.clone()), 1) + malicious_bitcoin_da_service + .send_transaction(DaTxRequest::ZKProof(bp2.clone())) .await .unwrap(); da.wait_mempool_len(2, None).await?; - da.generate_block( - da.get_new_address(None, None) - .await? - .assume_checked() - .to_string(), - txs.into_iter() - .flat_map(|tx| [tx[0].id.to_string(), tx[1].id.to_string()]) - .collect(), - ) - .await?; - - da.generate(DEFAULT_FINALITY_DEPTH - 1).await?; + da.generate(DEFAULT_FINALITY_DEPTH).await?; light_client_prover .wait_for_l1_height(finalized_height + DEFAULT_FINALITY_DEPTH, None) @@ -2822,25 +2738,14 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { assert_eq!(lcp_output.last_sequencer_commitment_index, U32::from(1)); // Now send batch proof with the correct da pub key and expect it to transition - let txs = batch_prover_bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp2.clone()), 1) + batch_prover_bitcoin_da_service + .send_transaction(DaTxRequest::ZKProof(bp2.clone())) .await .unwrap(); da.wait_mempool_len(2, None).await?; - da.generate_block( - da.get_new_address(None, None) - .await? - .assume_checked() - .to_string(), - txs.into_iter() - .flat_map(|tx| [tx[0].id.to_string(), tx[1].id.to_string()]) - .collect(), - ) - .await?; - - da.generate(DEFAULT_FINALITY_DEPTH - 1).await?; + da.generate(DEFAULT_FINALITY_DEPTH).await?; let finalized_height = da.get_finalized_height(None).await?; light_client_prover @@ -2865,7 +2770,7 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_proof_and_commitment_with_wrong_da_pubkey() -> Result<()> { TestCaseRunner::new(ProofAndCommitmentWithWrongDaPubkey { task_manager: TaskManager::current(), @@ -2965,10 +2870,9 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); @@ -2979,10 +2883,9 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment_2.clone()), - 1, - ) + .send_transaction(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment_2.clone(), + )) .await .unwrap(); @@ -3002,7 +2905,7 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { None, ); let _ = batch_prover_bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1) + .send_transaction(DaTxRequest::ZKProof(verifiable_batch_proof)) .await .unwrap(); @@ -3041,7 +2944,7 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { ), ); let _ = batch_prover_bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(wrong_prev_hash_batch_proof), 1) + .send_transaction(DaTxRequest::ZKProof(wrong_prev_hash_batch_proof)) .await .unwrap(); @@ -3073,7 +2976,7 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { Some(fake_sequencer_commitment.serialize_and_calculate_sha_256()), ); let _ = batch_prover_bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(correct_prev_hash_proof), 1) + .send_transaction(DaTxRequest::ZKProof(correct_prev_hash_proof)) .await .unwrap(); @@ -3100,7 +3003,7 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_proof_with_wrong_previous_commitment_hash() -> Result<()> { TestCaseRunner::new(ProofWithWrongPreviousCommitmentHash { task_manager: TaskManager::current(), @@ -3323,6 +3226,8 @@ impl UndecompressableBlobTest { 2, bitcoin::Network::Regtest, REVEAL_TX_PREFIX, + vec![], + vec![], )? else { panic!("Wrong DaTxs kind"); diff --git a/bin/citrea/tests/bitcoin/sequencer_commitments.rs b/bin/citrea/tests/bitcoin/sequencer_commitments.rs index acedc0a24e..175e94f96b 100644 --- a/bin/citrea/tests/bitcoin/sequencer_commitments.rs +++ b/bin/citrea/tests/bitcoin/sequencer_commitments.rs @@ -22,6 +22,7 @@ use rs_merkle::MerkleTree; use sov_ledger_rpc::LedgerRpcClient; use sov_rollup_interface::da::{BlobReaderTrait, DaTxRequest, DataOnDa, SequencerCommitment}; use sov_rollup_interface::rpc::SequencerCommitmentResponse; +use sov_rollup_interface::services::da::DaService; use tokio::time::sleep; use super::get_citrea_path; @@ -370,7 +371,7 @@ impl TestCase for SequencerCommitmentsFromDaTest { index: 1, }; da_service - .send_transaction_with_fee_rate(DaTxRequest::SequencerCommitment(commitment), 1) + .send_transaction(DaTxRequest::SequencerCommitment(commitment)) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -383,7 +384,7 @@ impl TestCase for SequencerCommitmentsFromDaTest { index: 2, }; da_service - .send_transaction_with_fee_rate(DaTxRequest::SequencerCommitment(commitment), 1) + .send_transaction(DaTxRequest::SequencerCommitment(commitment)) .await .unwrap(); // Restart sequencer, it should fetch commitment with index 1 and 2 @@ -448,7 +449,7 @@ impl TestCase for SequencerCommitmentsFromDaTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_sequencer_commitments_from_da_layer() -> Result<()> { TestCaseRunner::new(SequencerCommitmentsFromDaTest { task_manager: TaskManager::current(), diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index ee2afbf8a9..37456cecb8 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -23,6 +23,8 @@ use citrea_e2e::node::{BatchProver, FullNode, NodeKind}; use citrea_e2e::traits::NodeT; use citrea_primitives::{MAX_TX_BODY_SIZE, REVEAL_TX_PREFIX}; use reth_tasks::TaskExecutor; +use sov_db::ledger_db::LedgerDB; +use sov_db::rocks_db_config::RocksdbConfig; use sov_ledger_rpc::LedgerRpcClient; use sov_rollup_interface::da::{ BatchProofMethodId, BatchProofMethodIdBody, DaTxRequest, SequencerCommitment, @@ -190,8 +192,6 @@ pub async fn spawn_bitcoin_da_service( rpc_connect_timeout_secs: None, }; - let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); - let network = Network::Nightly; let chain_params = RollupParams { reveal_tx_prefix, @@ -227,6 +227,10 @@ pub async fn spawn_bitcoin_da_service( let fee_service = FeeService::new(client.clone(), network, da_config.mempool_space_url.clone()); + let ledger_db_path = test_dir.join("da_ledger_db"); + let rocksdb_config = RocksdbConfig::new(&ledger_db_path, None, None); + let ledger_db = LedgerDB::with_config(&rocksdb_config).unwrap(); + let service = Arc::new( BitcoinService::from_config( &da_config, @@ -237,14 +241,14 @@ pub async fn spawn_bitcoin_da_service( monitoring_service, fee_service, true, - tx, + ledger_db, ) .await .unwrap(), ); task_executor - .spawn_with_graceful_shutdown_signal(|tk| service.clone().run_da_queue(rx, block_rx, tk)); + .spawn_with_graceful_shutdown_signal(|tk| service.clone().run_da_queue(block_rx, tk)); service.monitoring.restore().await.unwrap(); task_executor.spawn_with_graceful_shutdown_signal(|tk| Arc::clone(&service.monitoring).run(tk)); diff --git a/bin/citrea/tests/common/helpers.rs b/bin/citrea/tests/common/helpers.rs index 244761e30b..0a14374e32 100644 --- a/bin/citrea/tests/common/helpers.rs +++ b/bin/citrea/tests/common/helpers.rs @@ -145,6 +145,7 @@ pub async fn start_rollup( &rollup_config, sequencer_config.is_some() || rollup_prover_config.is_some(), network.unwrap_or(Network::Nightly), + ledger_db.clone(), ) .await .expect("Dependencies setup should work"); diff --git a/bin/citrea/tests/mock/proving.rs b/bin/citrea/tests/mock/proving.rs index fa0c3c5a06..b5a2e89645 100644 --- a/bin/citrea/tests/mock/proving.rs +++ b/bin/citrea/tests/mock/proving.rs @@ -356,6 +356,7 @@ async fn test_batch_prover_prove_rpcs() { assert_eq!(job_ids.len(), 1); let job_id = job_ids[0]; + println!("111"); let response = wait_for_prover_job(&prover_client, job_id, None) .await .unwrap(); @@ -377,6 +378,7 @@ async fn test_batch_prover_prove_rpcs() { wait_for_l2_block(&test_client, 8, None).await; wait_for_commitment(&da_service, 6, None).await; + println!("3"); // invoke proving from RPC, since paused, should not start any job let job_ids = prover_client.batch_prover_prove(None).await; assert_eq!(job_ids.len(), 0); diff --git a/crates/batch-prover/src/prover.rs b/crates/batch-prover/src/prover.rs index c80b51cae6..277444fb5f 100644 --- a/crates/batch-prover/src/prover.rs +++ b/crates/batch-prover/src/prover.rs @@ -735,16 +735,16 @@ where // submit the proof to the DA service in the background tokio::spawn(async move { - let tx_id = prover_service - .submit_proof(proof_with_duration.proof, job_id) + let txid = prover_service + .submit_proof(proof_with_duration.proof) .await .expect("Failed to submit proof"); - info!("Job {} proof sent to DA", job_id); + info!("Job {} proof submitted to DA", job_id); - // stores tx id and removes job from pending da submission + // // stores tx id and removes job from pending da submission ledger_db - .finalize_proving_job(job_id, tx_id.into()) + .finalize_proving_job(job_id, txid.into()) .expect("Should update proving job tx id"); }); } @@ -827,17 +827,17 @@ where info!("Submitting recovered proof for job {}", job_id); // submit in the background tokio::spawn(async move { - let tx_id = prover_service - .submit_proof(proof, job_id) + let id = prover_service + .submit_proof(proof) .await .expect("Failed to submit transaction"); info!("Recovered Job {} proof sent to DA", job_id); - // stores tx id and removes job from pending da submission - ledger_db - .finalize_proving_job(job_id, tx_id.into()) - .expect("Should update proving job tx id"); - info!("Finalized recovered proving job: {}", job_id); + // // stores tx id and removes job from pending da submission + // ledger_db + // .finalize_proving_job(job_id, tx_id.into()) + // .expect("Should update proving job tx id"); + // info!("Finalized recovered proving job: {}", job_id); }); } } diff --git a/crates/batch-prover/src/rpc.rs b/crates/batch-prover/src/rpc.rs index 0e54ee13f5..3b67ee1848 100644 --- a/crates/batch-prover/src/rpc.rs +++ b/crates/batch-prover/src/rpc.rs @@ -479,15 +479,22 @@ where let receipt = InnerReceipt::Fake(fake_receipt); let proof = bincode::serialize(&receipt).expect("Receipt serialization cannot fail"); - let tx_id = self + let job_id = self .context .da_service .send_transaction(DaTxRequest::ZKProof(proof.clone())) .await .map_err(internal_rpc_error)?; + let txid = self + .context + .da_service + .wait_for_completion(job_id, None) + .await + .map_err(internal_rpc_error)?; + Ok(BatchProofResponse { - l1_tx_id: Some(tx_id.into()), + l1_tx_id: Some(txid.into()), proof, proof_output: StoredBatchProofOutput::from(output).into(), }) diff --git a/crates/bitcoin-da/Cargo.toml b/crates/bitcoin-da/Cargo.toml index 6827c99197..24cded8fd1 100644 --- a/crates/bitcoin-da/Cargo.toml +++ b/crates/bitcoin-da/Cargo.toml @@ -13,11 +13,13 @@ repository = { workspace = true } [dependencies] citrea-common = { path = "../common", optional = true } citrea-primitives = { path = "../primitives" } +sov-db = { path = "../sovereign-sdk/full-node/db/sov-db", optional = true } sov-rollup-interface = { path = "../sovereign-sdk/rollup-interface" } anyhow = { workspace = true } async-trait = { workspace = true, optional = true } backoff = { workspace = true, optional = true } +bincode = { workspace = true, optional = true } bitcoin = { workspace = true } borsh = { workspace = true } crypto-bigint = { workspace = true } @@ -38,6 +40,7 @@ sha2 = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"], optional = true } tracing = { workspace = true, optional = true } +uuid = { workspace = true, optional = true } bitcoincore-rpc = { workspace = true, optional = true } @@ -49,6 +52,7 @@ default = [] native = [ "dep:async-trait", "dep:backoff", + "dep:bincode", "dep:futures", "dep:lru", "dep:tokio", @@ -63,5 +67,7 @@ native = [ "dep:reqwest", "dep:jsonrpsee", "dep:secp256k1", + "dep:sov-db", + "dep:uuid", ] testing = [] diff --git a/crates/bitcoin-da/src/error.rs b/crates/bitcoin-da/src/error.rs index ffd17cfaac..64ab50306b 100644 --- a/crates/bitcoin-da/src/error.rs +++ b/crates/bitcoin-da/src/error.rs @@ -6,6 +6,7 @@ use thiserror::Error; use tokio::task::JoinError; use crate::fee::FeeServiceError; +use crate::job::error::JobServiceError; use crate::monitoring::{MonitorError, TxStatus}; /// The top level error type that can be returned by the `BitcoinService`. @@ -44,9 +45,9 @@ pub enum BitcoinServiceError { /// Cannot bump fee for TX. #[error("Cannot bump fee for TX with status: {0:?}. Transaction must be pending")] WrongStatusForBumping(TxStatus), - /// Tx requested when queue is not empty. - #[error("Cannot create DA transaction while da queue is not empty")] - QueueNotEmpty, + /// Tx request when previous job is not fully sent. + #[error("Cannot create DA transaction while other job is in progress")] + PreviousJobInProgress, /// Transaction rejected by mempool. #[error(transparent)] MempoolRejection(#[from] MempoolRejection), @@ -110,6 +111,11 @@ pub enum BitcoinServiceError { /// Fee service operation failure. #[error("Fee service error: {0}")] FeeServiceError(#[from] FeeServiceError), + // #[error(transparent)] + // Other(#[from] anyhow::Error), + /// Job service error + #[error("Job service error: {0}")] + JobService(#[from] JobServiceError), } /// Error type for mempool rejections via testmempoolaccept method. diff --git a/crates/bitcoin-da/src/helpers/builders/body_builders.rs b/crates/bitcoin-da/src/helpers/builders/body_builders.rs index 8e1e15b7de..6c235c1e40 100644 --- a/crates/bitcoin-da/src/helpers/builders/body_builders.rs +++ b/crates/bitcoin-da/src/helpers/builders/body_builders.rs @@ -14,17 +14,21 @@ use bitcoin::secp256k1::{SecretKey, XOnlyPublicKey}; use bitcoin::{Address, Amount, Network, Transaction}; use metrics::histogram; use secp256k1::SECP256K1; -use serde::Serialize; -use sov_rollup_interface::da::DataOnDa; +use serde::{Deserialize, Serialize}; +use sov_rollup_interface::da::{DaTxRequest, DataOnDa}; use tracing::{info, instrument, trace, warn}; use super::{ build_commit_transaction, build_control_block, build_reveal_transaction, build_witness, get_size_reveal, sign_blob_with_private_key, update_witness, TransactionKind, TxWithId, }; +use crate::error::BitcoinServiceError; +use crate::job::service::SentChunks; +use crate::service::split_proof; use crate::spec::utxo::UTXO; use crate::{REVEAL_OUTPUT_AMOUNT, REVEAL_OUTPUT_THRESHOLD}; +#[derive(Debug, Clone, Serialize, Deserialize)] /// These are real blobs we put on DA. pub(crate) enum RawTxData { /// borsh(DataOnDa::Complete(compress(Proof))) @@ -39,6 +43,26 @@ pub(crate) enum RawTxData { SequencerCommitment(Vec), } +impl TryFrom for RawTxData { + type Error = BitcoinServiceError; + + fn try_from(request: DaTxRequest) -> Result { + match request { + DaTxRequest::ZKProof(zkproof) => split_proof(zkproof), + DaTxRequest::SequencerCommitment(comm) => { + let blob = borsh::to_vec(&DataOnDa::SequencerCommitment(comm)) + .expect("SequencerCommitment serialize must not fail"); + Ok(RawTxData::SequencerCommitment(blob)) + } + DaTxRequest::BatchProofMethodId(id) => { + let blob = borsh::to_vec(&DataOnDa::BatchProofMethodId(id)) + .expect("BatchProofMethodId serialize must not fail"); + Ok(RawTxData::BatchProofMethodId(blob)) + } + } + } +} + /// This is a list of txs we need to send to DA #[derive(Serialize, Clone, Debug)] pub enum DaTxs { @@ -76,6 +100,19 @@ pub enum DaTxs { }, } +impl DaTxs { + /// Number of commit/reveal pair + pub fn count(&self) -> usize { + match self { + // Number of required chunks + 1 for aggregate + DaTxs::Chunked { commit_chunks, .. } => commit_chunks.len() + 1, + DaTxs::Complete { .. } + | DaTxs::BatchProofMethodId { .. } + | DaTxs::SequencerCommitment { .. } => 1, + } + } +} + /// Creates the light client transactions (commit and reveal). /// Based on data type, the number of transactions may vary. /// In the end, reveal txs will be mined with a nonce to have @@ -84,6 +121,7 @@ pub enum DaTxs { #[instrument(level = "trace", skip_all, err)] pub fn create_inscription_transactions( data: RawTxData, + sent_chunks: SentChunks, da_private_key: SecretKey, prev_utxo: Option, utxos: Vec, @@ -105,8 +143,8 @@ pub fn create_inscription_transactions( network, &reveal_tx_prefix, ), - RawTxData::Chunks(body) => create_inscription_type_1( - body, + RawTxData::Chunks(data) => create_inscription_type_1( + data, &da_private_key, prev_utxo, utxos, @@ -115,6 +153,8 @@ pub fn create_inscription_transactions( reveal_fee_rate, network, &reveal_tx_prefix, + sent_chunks.commit_txs, + sent_chunks.reveal_txs, ), RawTxData::BatchProofMethodId(body) => create_inscription_type_3( body, @@ -326,17 +366,33 @@ pub fn create_inscription_type_1( reveal_fee_rate: u64, network: Network, reveal_tx_prefix: &[u8], + previous_commit_chunks: Vec, + previous_reveal_chunks: Vec, ) -> Result { // Create reveal key let key_pair = UntweakedKeypair::from_secret_key(SECP256K1, da_private_key); let (public_key, _parity) = XOnlyPublicKey::from_keypair(&key_pair); - let mut commit_chunks: Vec = vec![]; - let mut reveal_chunks: Vec = vec![]; + let current_idx = previous_commit_chunks.len(); + let mut commit_chunks = previous_commit_chunks; + let mut reveal_chunks = previous_reveal_chunks; + + if let Some(reveal_tx) = reveal_chunks.last() { + prev_utxo = Some(UTXO { + tx_id: reveal_tx.compute_txid(), + vout: 0, + script_pubkey: reveal_tx.output[0].script_pubkey.to_hex_string(), + address: None, + amount: reveal_tx.output[0].value.to_sat(), + confirmations: 0, + spendable: true, + solvable: true, + }); + } let start = Instant::now(); - for body in chunks { + for body in chunks.into_iter().skip(current_idx) { let kind = TransactionKind::Chunks; let kind_bytes = kind.to_bytes(); @@ -647,6 +703,7 @@ pub fn create_inscription_type_1( if let Some(root) = merkle_root { info!("Taproot merkle root for inscription - Aggregate: {}", root); } + return Ok(DaTxs::Chunked { commit_chunks, reveal_chunks, diff --git a/crates/bitcoin-da/src/helpers/builders/chunks.rs b/crates/bitcoin-da/src/helpers/builders/chunks.rs new file mode 100644 index 0000000000..28e7a715e6 --- /dev/null +++ b/crates/bitcoin-da/src/helpers/builders/chunks.rs @@ -0,0 +1,385 @@ +use core::result::Result::Ok; +use std::time::Instant; + +use bitcoin::blockdata::opcodes::all::{OP_ENDIF, OP_IF}; +use bitcoin::blockdata::opcodes::OP_FALSE; +use bitcoin::blockdata::script; +use bitcoin::hashes::Hash; +use bitcoin::key::{TapTweak, TweakedPublicKey, UntweakedKeypair}; +use bitcoin::opcodes::all::{OP_CHECKSIGVERIFY, OP_NIP}; +use bitcoin::script::PushBytesBuf; +use bitcoin::secp256k1::{SecretKey, XOnlyPublicKey}; +use bitcoin::{Address, Amount, Network, Transaction}; +use metrics::histogram; +use secp256k1::SECP256K1; +use serde::{Deserialize, Serialize}; +use sov_rollup_interface::da::DataOnDa; +use tracing::{info, instrument, trace, warn}; + +use super::{ + build_commit_transaction, build_control_block, build_reveal_transaction, build_witness, + get_size_reveal, sign_blob_with_private_key, update_witness, TransactionKind, TxWithId, +}; +use crate::spec::utxo::UTXO; +use crate::{REVEAL_OUTPUT_AMOUNT, REVEAL_OUTPUT_THRESHOLD}; + +/// Creates the inscription transactions Type 1 - Chunked +#[allow(clippy::too_many_arguments)] +#[instrument(level = "trace", skip_all, err)] +pub fn create_inscription_type_1( + chunks: Vec>, + da_private_key: &SecretKey, + mut prev_utxo: Option, + mut utxos: Vec, + change_address: Address, + commit_fee_rate: u64, + reveal_fee_rate: u64, + network: Network, + reveal_tx_prefix: &[u8], + current_idx: usize, +) -> Result { + // Create reveal key + let key_pair = UntweakedKeypair::from_secret_key(SECP256K1, da_private_key); + let (public_key, _parity) = XOnlyPublicKey::from_keypair(&key_pair); + + let mut commit_chunks: Vec = vec![]; + let mut reveal_chunks: Vec = vec![]; + + let start = Instant::now(); + + for body in chunks.iter().skip(current_idx) { + let kind = TransactionKind::Chunks; + let kind_bytes = kind.to_bytes(); + + // start creating inscription content + let mut reveal_script_builder = script::Builder::new() + .push_x_only_key(&public_key) + .push_opcode(OP_CHECKSIGVERIFY) + .push_slice(PushBytesBuf::from(kind_bytes)) + .push_opcode(OP_FALSE) + .push_opcode(OP_IF); + // push body in chunks of 520 bytes + for chunk in body.chunks(520) { + reveal_script_builder = reveal_script_builder.push_slice( + PushBytesBuf::try_from(chunk.to_vec()).expect("Cannot push body chunk"), + ); + } + // push end if + let reveal_script_builder = reveal_script_builder.push_opcode(OP_ENDIF); + + // Start loop to find a 'nonce' i.e. random number that makes the reveal tx hash starting with zeros given length + let mut nonce: i64 = 16; // skip the first digits to avoid OP_PUSHNUM_X + 'mine_chunk: loop { + if nonce % 1000 == 0 { + trace!(nonce, "Trying to find commit & reveal nonce for chunk"); + if nonce > 16384 { + warn!("Too many iterations finding nonce for chunk"); + } + } + // ownerships are moved to the loop + let mut reveal_script_builder = reveal_script_builder.clone(); + + // push nonce + reveal_script_builder = reveal_script_builder + .push_slice(nonce.to_le_bytes()) + // drop the second item, bc there is a big chance it's 0 (tx kind) and nonce is >= 16 + .push_opcode(OP_NIP); + nonce += 1; + + // finalize reveal script + let reveal_script = reveal_script_builder.into_script(); + + let (control_block, merkle_root, tapscript_hash) = + build_control_block(&reveal_script, public_key, SECP256K1); + + // create commit tx address + let commit_tx_address = Address::p2tr(SECP256K1, public_key, merkle_root, network); + + let reveal_value = REVEAL_OUTPUT_AMOUNT; + let fee = get_size_reveal( + change_address.script_pubkey(), + reveal_value, + &reveal_script, + &control_block, + ) as u64 + * reveal_fee_rate; + let reveal_input_value = fee + reveal_value + REVEAL_OUTPUT_THRESHOLD; + + // build commit tx + let (mut unsigned_commit_tx, leftover_utxos) = build_commit_transaction( + prev_utxo.clone(), + utxos.clone(), + commit_tx_address.clone(), + change_address.clone(), + reveal_input_value, + commit_fee_rate, + )?; + + let output_to_reveal = unsigned_commit_tx.output[0].clone(); + + let mut reveal_tx = build_reveal_transaction( + output_to_reveal.clone(), + unsigned_commit_tx.compute_txid(), + 0, + change_address.clone(), + reveal_value + REVEAL_OUTPUT_THRESHOLD, + reveal_fee_rate, + &reveal_script, + &control_block, + )?; + + build_witness( + &unsigned_commit_tx, + &mut reveal_tx, + tapscript_hash, + reveal_script, + control_block, + &key_pair, + SECP256K1, + ); + + let min_commit_value = Amount::from_sat(fee + reveal_value); + while unsigned_commit_tx.output[0].value >= min_commit_value + && reveal_tx.output[0].value > Amount::from_sat(REVEAL_OUTPUT_AMOUNT) + { + let reveal_wtxid = reveal_tx.compute_wtxid(); + let reveal_hash = reveal_wtxid.as_raw_hash().to_byte_array(); + + // check if first N bytes equal to the given prefix + if reveal_hash.starts_with(reveal_tx_prefix) { + // check if inscription locked to the correct address + let recovery_key_pair = key_pair.tap_tweak(SECP256K1, merkle_root); + let (x_only_pub_key, _parity) = + recovery_key_pair.to_inner().x_only_public_key(); + assert_eq!( + Address::p2tr_tweaked( + TweakedPublicKey::dangerous_assume_tweaked(x_only_pub_key), + network, + ), + commit_tx_address + ); + + // set prev utxo to last reveal tx[0] to chain txs in order + prev_utxo = Some(UTXO { + tx_id: reveal_tx.compute_txid(), + vout: 0, + script_pubkey: reveal_tx.output[0].script_pubkey.to_hex_string(), + address: None, + amount: reveal_tx.output[0].value.to_sat(), + confirmations: 0, + spendable: true, + solvable: true, + }); + + // Replace utxos with leftovers so we don't use prev utxos in next chunks + utxos = leftover_utxos; + + if unsigned_commit_tx.output.len() > 1 { + utxos.push(UTXO { + tx_id: unsigned_commit_tx.compute_txid(), + vout: 1, + address: None, + script_pubkey: unsigned_commit_tx.output[0] + .script_pubkey + .to_hex_string(), + amount: unsigned_commit_tx.output[1].value.to_sat(), + confirmations: 0, + spendable: true, + solvable: true, + }) + } + + commit_chunks.push(unsigned_commit_tx); + reveal_chunks.push(reveal_tx); + + if let Some(root) = merkle_root { + info!("Taproot merkle root for inscription - Chunked: {}", root); + } + + break 'mine_chunk; + } else { + unsigned_commit_tx.output[0].value -= Amount::ONE_SAT; + unsigned_commit_tx.output[1].value += Amount::ONE_SAT; + reveal_tx.output[0].value -= Amount::ONE_SAT; + reveal_tx.input[0].previous_output.txid = unsigned_commit_tx.compute_txid(); + update_witness( + &unsigned_commit_tx, + &mut reveal_tx, + tapscript_hash, + &key_pair, + SECP256K1, + ); + } + } + } + } + + let (reveal_tx_ids, reveal_wtx_ids): (Vec<_>, Vec<_>) = reveal_chunks + .iter() + .map(|tx| { + ( + tx.compute_txid().to_byte_array(), + tx.compute_wtxid().to_byte_array(), + ) + }) + .collect(); + + let aggregate = DataOnDa::Aggregate(reveal_tx_ids, reveal_wtx_ids); + + // To sign the list of tx ids we assume they form a contiguous list of bytes + let reveal_body: Vec = + borsh::to_vec(&aggregate).expect("Aggregate serialize must not fail"); + // sign the body for authentication of the sequencer + let (signature, signer_public_key) = sign_blob_with_private_key(&reveal_body, da_private_key); + + let kind = TransactionKind::Aggregate; + let kind_bytes = kind.to_bytes(); + + // start creating inscription content + let mut reveal_script_builder = script::Builder::new() + .push_x_only_key(&public_key) + .push_opcode(OP_CHECKSIGVERIFY) + .push_slice(PushBytesBuf::from(kind_bytes)) + .push_opcode(OP_FALSE) + .push_opcode(OP_IF) + .push_slice(PushBytesBuf::try_from(signature).expect("Cannot push signature")) + .push_slice( + PushBytesBuf::try_from(signer_public_key).expect("Cannot push sequencer public key"), + ); + // push body in chunks of 520 bytes + for chunk in reveal_body.chunks(520) { + reveal_script_builder = reveal_script_builder + .push_slice(PushBytesBuf::try_from(chunk.to_vec()).expect("Cannot push body chunk")); + } + // push end if + reveal_script_builder = reveal_script_builder.push_opcode(OP_ENDIF); + + // This envelope is not finished yet. The random number will be added later + + // Start loop to find a 'nonce' i.e. random number that makes the reveal tx hash starting with zeros given length + let mut nonce: i64 = 16; // skip the first digits to avoid OP_PUSHNUM_X + loop { + if nonce % 1000 == 0 { + trace!(nonce, "Trying to find commit & reveal nonce for aggr"); + if nonce > 16384 { + warn!("Too many iterations finding nonce for aggr"); + } + } + let utxos = utxos.clone(); + let change_address = change_address.clone(); + // ownerships are moved to the loop + let mut reveal_script_builder = reveal_script_builder.clone(); + + // push nonce + reveal_script_builder = reveal_script_builder + .push_slice(nonce.to_le_bytes()) + // drop the second item, bc there is a big chance it's 0 (tx kind) and nonce is >= 16 + .push_opcode(OP_NIP); + nonce += 1; + + // finalize reveal script + let reveal_script = reveal_script_builder.into_script(); + + let (control_block, merkle_root, tapscript_hash) = + build_control_block(&reveal_script, public_key, SECP256K1); + + // create commit tx address + let commit_tx_address = Address::p2tr(SECP256K1, public_key, merkle_root, network); + + let reveal_value = REVEAL_OUTPUT_AMOUNT; + let fee = get_size_reveal( + change_address.script_pubkey(), + reveal_value, + &reveal_script, + &control_block, + ) as u64 + * reveal_fee_rate; + let reveal_input_value = fee + reveal_value + REVEAL_OUTPUT_THRESHOLD; + + // build commit tx + let (mut unsigned_commit_tx, _leftover_utxos) = build_commit_transaction( + prev_utxo.clone(), + utxos, + commit_tx_address.clone(), + change_address.clone(), + reveal_input_value, + commit_fee_rate, + )?; + + let input_to_reveal = unsigned_commit_tx.output[0].clone(); + + let mut reveal_tx = build_reveal_transaction( + input_to_reveal.clone(), + unsigned_commit_tx.compute_txid(), + 0, + change_address, + reveal_value + REVEAL_OUTPUT_THRESHOLD, + reveal_fee_rate, + &reveal_script, + &control_block, + )?; + + build_witness( + &unsigned_commit_tx, + &mut reveal_tx, + tapscript_hash, + reveal_script, + control_block, + &key_pair, + SECP256K1, + ); + + let min_commit_value = Amount::from_sat(fee + reveal_value); + while unsigned_commit_tx.output[0].value >= min_commit_value + && reveal_tx.output[0].value > Amount::from_sat(REVEAL_OUTPUT_AMOUNT) + { + let reveal_wtxid = reveal_tx.compute_wtxid(); + let reveal_hash = reveal_wtxid.as_raw_hash().to_byte_array(); + + // check if first N bytes equal to the given prefix + if reveal_hash.starts_with(reveal_tx_prefix) { + // check if inscription locked to the correct address + let recovery_key_pair = key_pair.tap_tweak(SECP256K1, merkle_root); + let (x_only_pub_key, _parity) = recovery_key_pair.to_inner().x_only_public_key(); + assert_eq!( + Address::p2tr_tweaked( + TweakedPublicKey::dangerous_assume_tweaked(x_only_pub_key), + network, + ), + commit_tx_address + ); + + histogram!("chunked_mine_da_transaction").record( + Instant::now() + .saturating_duration_since(start) + .as_secs_f64(), + ); + + if let Some(root) = merkle_root { + info!("Taproot merkle root for inscription - Aggregate: {}", root); + } + return Ok(DaTxs::Chunked { + commit_chunks, + reveal_chunks, + commit: unsigned_commit_tx, + reveal: TxWithId { + id: reveal_tx.compute_txid(), + tx: reveal_tx, + }, + }); + } else { + unsigned_commit_tx.output[0].value -= Amount::ONE_SAT; + unsigned_commit_tx.output[1].value += Amount::ONE_SAT; + reveal_tx.output[0].value -= Amount::ONE_SAT; + reveal_tx.input[0].previous_output.txid = unsigned_commit_tx.compute_txid(); + update_witness( + &unsigned_commit_tx, + &mut reveal_tx, + tapscript_hash, + &key_pair, + SECP256K1, + ); + } + } + } +} diff --git a/crates/bitcoin-da/src/helpers/builders/mod.rs b/crates/bitcoin-da/src/helpers/builders/mod.rs index 06e0b98e01..6b5c27d586 100644 --- a/crates/bitcoin-da/src/helpers/builders/mod.rs +++ b/crates/bitcoin-da/src/helpers/builders/mod.rs @@ -2,6 +2,7 @@ //! related to commit-reveal pattern for Citrea rollup. pub mod body_builders; + #[cfg(feature = "testing")] pub mod test_utils; diff --git a/crates/bitcoin-da/src/helpers/builders/tests.rs b/crates/bitcoin-da/src/helpers/builders/tests.rs index ad9963b3a9..d908c169c4 100644 --- a/crates/bitcoin-da/src/helpers/builders/tests.rs +++ b/crates/bitcoin-da/src/helpers/builders/tests.rs @@ -11,6 +11,7 @@ use citrea_primitives::compression::{compress_blob, decompress_blob}; use super::body_builders::{DaTxs, RawTxData}; use crate::helpers::builders::sign_blob_with_private_key; use crate::helpers::parsers::{parse_relevant_transaction, ParsedTransaction}; +use crate::job::service::SentChunks; use crate::spec::utxo::UTXO; use crate::REVEAL_OUTPUT_AMOUNT; @@ -509,6 +510,7 @@ fn create_inscription_transactions() { let tx_prefix = &[0u8]; let DaTxs::Complete { commit, reveal } = super::body_builders::create_inscription_transactions( RawTxData::Complete(body.clone()), + SentChunks::default(), da_private_key, None, utxos.clone(), diff --git a/crates/bitcoin-da/src/helpers/mod.rs b/crates/bitcoin-da/src/helpers/mod.rs index b2a4d2bee8..6e58568b66 100644 --- a/crates/bitcoin-da/src/helpers/mod.rs +++ b/crates/bitcoin-da/src/helpers/mod.rs @@ -2,9 +2,11 @@ //! It includes transaction kind definitions, transaction builders, parsers, and Merkle tree utilities. use core::num::NonZero; +use std::time::{SystemTime, UNIX_EPOCH}; use bitcoin::consensus::Encodable; use bitcoin::Transaction; +use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; #[cfg(feature = "native")] @@ -16,7 +18,7 @@ pub mod merkle_tree; pub mod parsers; /// Type represents a typed enum for transaction kind -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] #[repr(u16)] pub(crate) enum TransactionKind { /// This type of transaction includes full body (< 400kb) @@ -68,6 +70,14 @@ impl TransactionKind { } } +/// Return UNIX timestamp in seconds +pub(crate) fn get_timestamp() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Cannot fail because there is always a UNIX epoch") + .as_secs() +} + /// Calculate SHA-256d with the patched sha256 impl. pub fn calculate_double_sha256(input: &[u8]) -> [u8; 32] { let mut hasher = Sha256::default(); diff --git a/crates/bitcoin-da/src/job/error.rs b/crates/bitcoin-da/src/job/error.rs new file mode 100644 index 0000000000..791ab2cd2b --- /dev/null +++ b/crates/bitcoin-da/src/job/error.rs @@ -0,0 +1,35 @@ +use thiserror::Error; + +use crate::job::service::JobId; + +/// Job errors +#[derive(Error, Debug)] +pub enum JobServiceError { + /// Job was not found + #[error("Job not found: {0}")] + JobNotFound(JobId), + + /// Job exceeded the timeout duration + #[error("Job {0} timed out after {1} seconds")] + JobTimeout(JobId, u64), + + /// Job completed in a corrupted state without transactions. + #[error("Job {0} completed but no transactions found")] + NoTransactionsFound(JobId), + + /// Failed to serialize or deserialize job data + #[error("Job serialization failed: {0}")] + SerializationError(#[from] bincode::Error), + + /// Database operation failed + #[error("Database error: {0}")] + DatabaseError(#[from] anyhow::Error), + + /// Job execution failed + #[error("Job {0} failed: {1}")] + JobFailed(JobId, String), + + /// Job was cancelled before completion + #[error("Job {0} was cancelled")] + JobCancelled(JobId), +} diff --git a/crates/bitcoin-da/src/job/mod.rs b/crates/bitcoin-da/src/job/mod.rs new file mode 100644 index 0000000000..6f227d7af3 --- /dev/null +++ b/crates/bitcoin-da/src/job/mod.rs @@ -0,0 +1,12 @@ +//! Job management for Bitcoin DA transactions. +//! +//! This module provides a persistent job queue system. +//! Jobs are stored in the database by uuidv7 and processed chronologically. +//! Supports partial sending of chunked transactions and recovery + +/// Job related error types +pub mod error; +/// TODO: RPC API +pub mod rpc; +/// Core job queue implementation and state management +pub mod service; diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -0,0 +1 @@ + diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs new file mode 100644 index 0000000000..1ebd75bd9b --- /dev/null +++ b/crates/bitcoin-da/src/job/service.rs @@ -0,0 +1,295 @@ +use std::time::{Duration, Instant}; + +use bitcoin::{Transaction, Txid}; +use serde::{Deserialize, Serialize}; +use sov_db::ledger_db::DaLedgerOps; +use tracing::{info, instrument}; +use uuid::Uuid; + +use crate::helpers::builders::body_builders::RawTxData; +use crate::helpers::get_timestamp; +use crate::job::error::JobServiceError; + +/// Unique job id using uuidv7 for ordering by creation time +pub(crate) type JobId = Uuid; + +type Result = std::result::Result; + +/// Job status representing the current state of transaction processing +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum JobStatus { + /// Job is queued and waiting to be processed + Pending, + /// Job is in progress + InProgress, + /// Job completed successfully + Completed, + /// Job was cancelled before completion + Cancelled, + /// Job failed with error + Failed { + /// Error associated to the failure + error: String, + }, +} + +// impl JobStatus { +// pub fn as_u8(&self) -> u8 { +// match self { +// JobStatus::Pending => 0, +// JobStatus::InProgress => 1, +// JobStatus::Completed => 2, +// JobStatus::Cancelled => 3, +// JobStatus::Failed { .. } => 4, +// } +// } +// } + +/// Tracks progress of a job including sent transactions for recovery. +/// +/// This state is persisted to the database and updated as transactions +/// are sent to bitcoin da. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct JobProgress { + /// Job id as uuidv7 + pub job_id: JobId, + /// Current job status + pub status: JobStatus, + /// Partially sent commit/reveal chunks for partial sending and recovery + pub sent_chunks: SentChunks, + /// Last update timestamp + pub last_updated: u64, +} + +impl JobProgress { + fn new(job_id: JobId, last_updated: u64) -> Self { + Self { + job_id, + status: JobStatus::Pending, + sent_chunks: SentChunks::new(), + last_updated, + } + } +} + +/// Track sent chunk for partial sending and recovery +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct SentChunks { + /// Sent commit txs + pub commit_txs: Vec, + /// Sent reveal txs + pub reveal_txs: Vec, +} + +impl SentChunks { + /// Return a default SentChunk with empty vectors + pub fn new() -> Self { + Self::default() + } + + /// Return the number of sent chunks + pub fn count(&self) -> usize { + self.reveal_txs.len() + } + + /// Extend with sent commit and reveal chunks + pub fn extend(&mut self, commits: Vec, reveals: Vec) { + self.commit_txs.extend(commits); + self.reveal_txs.extend(reveals); + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(crate) struct Job { + /// Job id as uuidv7 + pub id: JobId, + /// Raw job data + pub data: RawTxData, + /// Time of job creation + pub created_at: u64, +} + +impl Job { + pub(crate) fn new(data: RawTxData) -> Self { + Self { + id: Uuid::now_v7(), + data, + created_at: get_timestamp(), + } + } +} + +/// Job service +pub struct DaJobService { + ledger_db: DB, +} + +impl DaJobService { + /// Creates a new DaJobService with ledger_db + pub fn new(ledger_db: DB) -> Self { + Self { ledger_db } + } + + /// Create a new job and save to db + #[instrument(level = "trace", skip(self), ret)] + pub fn submit_job(&self, raw_tx_data: RawTxData) -> Result { + let job = Job::new(raw_tx_data); + let job_id = job.id; + + let progress = JobProgress::new(job_id, job.created_at); + + self.insert_job(&job)?; + self.upsert_progress(&progress)?; + + info!("Job {job_id} submitted and persisted"); + Ok(job) + } + + /// Save a new job to db + #[instrument(level = "trace", skip(self))] + fn insert_job(&self, job: &Job) -> Result<()> { + let value = bincode::serialize(job)?; + self.ledger_db + .insert_job(job.id, value) + .map_err(JobServiceError::DatabaseError) + } + + /// Get a job by id + #[instrument(level = "trace", skip(self), ret)] + pub(crate) fn get_job(&self, job_id: &JobId) -> Result> { + let job = self + .ledger_db + .get_job(job_id) + .map_err(JobServiceError::DatabaseError)? + .map(|v| bincode::deserialize(&v)) + .transpose()?; + Ok(job) + } + + /// Upsert job progress after serialization + #[instrument(level = "trace", skip(self))] + pub(crate) fn upsert_progress(&self, progress: &JobProgress) -> Result<()> { + let value = bincode::serialize(progress)?; + self.ledger_db + .upsert_progress(&progress.job_id, value) + .map_err(JobServiceError::DatabaseError) + } + + /// Retrieve and deserialize job progress by id + #[instrument(level = "trace", skip(self), ret)] + pub(crate) fn get_progress(&self, job_id: &JobId) -> Result> { + let progress = self + .ledger_db + .get_progress(job_id) + .map_err(JobServiceError::DatabaseError)? + .map(|v| bincode::deserialize(&v)) + .transpose()?; + Ok(progress) + } + + /// Get all job ids from storage + /// TODO Optimize with status indexing and query only Pending and InProgress status + #[instrument(level = "trace", skip(self), ret)] + pub(crate) fn get_all_job_ids(&self) -> Result> { + self.ledger_db + .all_jobs() + .map_err(JobServiceError::DatabaseError) + } + + /// Update job status by id + #[instrument(level = "debug", skip(self))] + pub fn update_job_status(&self, progress: &mut JobProgress, status: JobStatus) -> Result<()> { + progress.status = status; + progress.last_updated = get_timestamp(); + + self.upsert_progress(progress)?; + Ok(()) + } + + /// Record sending DA transactions and keep track of sent chunks and reveals + #[instrument(level = "debug", skip(self))] + pub fn record_sent_transactions( + &self, + progress: &mut JobProgress, + commits: Vec, + reveals: Vec, + ) -> Result<()> { + progress.sent_chunks.extend(commits, reveals); + progress.status = JobStatus::InProgress; + progress.last_updated = get_timestamp(); + + self.upsert_progress(progress)?; + Ok(()) + } + + /// Get all pending commit and reveals txids. + /// This is required for removing from the utxo set and prevent selecting UTXOs twice + #[instrument(level = "trace", skip_all, ret)] + pub(crate) fn get_pending_chunks(&self) -> Vec { + let mut txids = Vec::new(); + + if let Ok(all_job_ids) = self.get_all_job_ids() { + for job_id in all_job_ids { + if let Ok(Some(progress)) = self.get_progress(&job_id) { + if matches!(progress.status, JobStatus::InProgress) { + txids.extend( + progress + .sent_chunks + .commit_txs + .iter() + .map(|tx| tx.compute_txid()), + ); + txids.extend( + progress + .sent_chunks + .reveal_txs + .iter() + .map(|tx| tx.compute_txid()), + ); + } + } + } + } + + txids + } + + /// Wait for job completion and return the transaction ID + #[instrument(level = "debug", skip(self, timeout), ret)] + pub async fn wait_for_completion( + &self, + job_id: JobId, + timeout: Option, + ) -> Result { + let start = Instant::now(); + let timeout = timeout.unwrap_or(Duration::from_secs(600)); // Defaults to 10min + + loop { + if start.elapsed() > timeout { + return Err(JobServiceError::JobTimeout(job_id, timeout.as_secs())); + } + + let progress = self + .get_progress(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + + match progress.status { + JobStatus::Completed => { + if let Some(last_reveal) = progress.sent_chunks.reveal_txs.last() { + return Ok(last_reveal.compute_txid()); + } + return Err(JobServiceError::NoTransactionsFound(job_id)); + } + JobStatus::Failed { error, .. } => { + return Err(JobServiceError::JobFailed(job_id, error)); + } + JobStatus::Cancelled => { + return Err(JobServiceError::JobCancelled(job_id)); + } + _ => { + tokio::time::sleep(Duration::from_millis(100)).await; + } + } + } + } +} diff --git a/crates/bitcoin-da/src/lib.rs b/crates/bitcoin-da/src/lib.rs index e9c567adf5..0568e63a67 100644 --- a/crates/bitcoin-da/src/lib.rs +++ b/crates/bitcoin-da/src/lib.rs @@ -69,6 +69,9 @@ pub mod fee; #[cfg(feature = "native")] pub mod rpc; +#[cfg(feature = "native")] +pub mod job; + #[cfg(feature = "testing")] pub mod test_utils; diff --git a/crates/bitcoin-da/src/monitoring.rs b/crates/bitcoin-da/src/monitoring.rs index e4bf75b2ae..826c04962a 100644 --- a/crates/bitcoin-da/src/monitoring.rs +++ b/crates/bitcoin-da/src/monitoring.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use std::time::Duration; use anyhow::anyhow; use bitcoin::address::NetworkUnchecked; @@ -24,6 +24,7 @@ use tokio::time::interval; use tracing::{debug, error, info, instrument, trace}; use crate::helpers::builders::TxWithId; +use crate::helpers::get_timestamp; use crate::helpers::parsers::parse_relevant_transaction; use crate::spec::utxo::UTXO; @@ -32,20 +33,12 @@ type Result = std::result::Result; const REBROADCAST_EACH_N_BLOCK: u64 = 1; -/// Return UNIX timestamp in seconds -fn get_timestamp() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Cannot fail because there is always a UNIX epoch") - .as_secs() -} - /// Transaction status in the monitoring service. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum TxStatus { - /// Queued tx, not already broadcasted - Queued, + /// Pending status updated + Pending, /// Tx in mempool #[serde(rename_all = "camelCase")] InMempool { @@ -126,7 +119,7 @@ impl MonitoredTx { /// Return the UTXOs for this transaction if it's not replaced or evicted. pub fn to_utxos(&self) -> Option> { let confirmations = match self.status { - TxStatus::Queued | TxStatus::InMempool { .. } => 0, + TxStatus::InMempool { .. } => 0, TxStatus::Confirmed { confirmations, .. } | TxStatus::Finalized { confirmations, .. } => confirmations, _ => return None, @@ -174,8 +167,8 @@ impl Default for ChainState { #[derive(Error, Debug)] pub enum MonitorError { /// Already monitored. - #[error("Transaction already monitored")] - AlreadyMonitored, + #[error("Transaction {0} already monitored")] + AlreadyMonitored(Txid), /// Transaction not found. #[error("Transaction not found")] TxNotFound, @@ -485,7 +478,7 @@ impl MonitoringService { let mut monitored_txs = self.monitored_txs.write().await; if monitored_txs.contains_key(&txid) { - return Err(MonitorError::AlreadyMonitored); + return Err(MonitorError::AlreadyMonitored(txid)); } if let Some(prev_tx_id) = prev_txid { @@ -497,10 +490,14 @@ impl MonitoringService { let current_height = self.client.get_block_count().await?; + let tx_result = self.client.get_transaction(&txid, None).await?; + self.total_size .fetch_add(tx.tx.total_size(), Ordering::SeqCst); - let status = TxStatus::Queued; + let status = self + .determine_tx_status(&tx_result, &TxStatus::Pending) + .await?; let monitored_tx = MonitoredTx { tx: tx.tx, txid, @@ -649,7 +646,7 @@ impl MonitoringService { for (txid, monitored_tx) in txs.iter_mut() { match &monitored_tx.status { // Check non-finalized TXs - TxStatus::Queued | TxStatus::Confirmed { .. } | TxStatus::Replaced { .. } => { + TxStatus::Confirmed { .. } | TxStatus::Replaced { .. } => { if let Ok(tx_result) = self.client.get_transaction(txid, None).await { let new_status = self .determine_tx_status(&tx_result, &monitored_tx.status) @@ -738,7 +735,7 @@ impl MonitoringService { // Tx not found in mempool Err(_) => match current_status { // If transaction is queued or evicted, keep status as is - TxStatus::Queued | TxStatus::Evicted { .. } => current_status.clone(), + TxStatus::Evicted { .. } => current_status.clone(), // If transaction was previously in mempool or confirmed, re-org happened and it got evicted from mempool _ => { tracing::info!("Tx {} was evicted from mempool.", tx_result.info.txid); diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index ddc718f62d..e119129c09 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -6,11 +6,10 @@ use core::result::Result::Ok; use core::str::FromStr; use core::time::Duration; -use std::collections::{HashMap, VecDeque}; +use std::collections::HashMap; use std::num::NonZeroUsize; use std::path::PathBuf; use std::sync::Arc; -use std::time::Instant; use anyhow::anyhow; use async_trait::async_trait; @@ -29,13 +28,13 @@ use citrea_primitives::{MAX_COMPRESSED_BLOB_SIZE, MAX_TX_BODY_SIZE}; use lru::LruCache; use reth_tasks::shutdown::GracefulShutdown; use serde::{Deserialize, Serialize}; +use sov_db::ledger_db::LedgerDB; use sov_rollup_interface::da::{DaSpec, DaTxRequest, DataOnDa, SequencerCommitment}; -use sov_rollup_interface::services::da::{DaService, TxRequestWithNotifier}; +use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::zk::Proof; use sov_rollup_interface::Network; use tokio::select; -use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; -use tokio::sync::oneshot::channel as oneshot_channel; +use tokio::sync::mpsc::UnboundedReceiver; use tokio::sync::Mutex; use tracing::{debug, error, info, instrument, trace, warn}; @@ -47,6 +46,7 @@ use crate::helpers::builders::TxWithId; use crate::helpers::merkle_tree::BitcoinMerkleTree; use crate::helpers::parsers::{parse_relevant_transaction, ParsedTransaction, VerifyParsed}; use crate::helpers::{merkle_tree, TransactionKind}; +use crate::job::service::{DaJobService, Job, JobId, JobProgress, JobStatus, SentChunks}; use crate::metrics::BITCOIN_DA_METRICS as BM; use crate::monitoring::{MonitoredTxKind, MonitoringConfig, MonitoringService, TxStatus}; use crate::network_constants::NetworkConstants; @@ -154,22 +154,22 @@ impl citrea_common::FromEnv for BitcoinServiceConfig { } /// A service that provides data and data availability proofs for Bitcoin -#[derive(Debug)] pub struct BitcoinService { client: Arc, pub(crate) network: bitcoin::Network, network_constants: NetworkConstants, pub(crate) da_private_key: Option, pub(crate) reveal_tx_prefix: Vec, - inscribes_queue: UnboundedSender>, pub(crate) tx_backup_dir: PathBuf, /// Monitoring service for tracking transaction status. pub monitoring: Arc, fee: FeeService, l1_block_hash_to_height: Arc>>, - tx_queue: Arc>>, pub(crate) tx_signer: TxSigner, utxo_selection_mode: UtxoSelectionMode, + + // Persistent job queue + job_service: DaJobService, } impl BitcoinService { @@ -180,11 +180,11 @@ impl BitcoinService { network_constants: NetworkConstants, monitoring: Arc, fee: FeeService, - inscribes_queue: UnboundedSender>, da_private_key: Option, reveal_tx_prefix: Vec, tx_backup_dir: PathBuf, utxo_selection_mode: UtxoSelectionMode, + job_service: DaJobService, ) -> Self { Self { tx_signer: TxSigner::new(client.clone()), @@ -193,15 +193,14 @@ impl BitcoinService { network, da_private_key, reveal_tx_prefix, - inscribes_queue, tx_backup_dir, monitoring, fee, l1_block_hash_to_height: Arc::new(Mutex::new(LruCache::new( NonZeroUsize::new(100).unwrap(), ))), - tx_queue: Arc::new(Mutex::new(VecDeque::new())), utxo_selection_mode, + job_service, } } @@ -216,7 +215,7 @@ impl BitcoinService { monitoring: Arc, fee_service: FeeService, require_wallet_check: bool, - inscribes_queue: UnboundedSender>, + ledger_db: LedgerDB, ) -> Result { if require_wallet_check && client @@ -242,17 +241,19 @@ impl BitcoinService { .map_err(|_| BitcoinServiceError::InvalidPrivateKey)?; let utxo_selection_mode = config.utxo_selection_mode.clone().unwrap_or_default(); + + let job_service = DaJobService::new(ledger_db); Ok(Self::new( client, network, network_constants, monitoring, fee_service, - inscribes_queue, da_private_key, chain_params.reveal_tx_prefix, tx_backup_dir.to_path_buf(), utxo_selection_mode, + job_service, )) } @@ -260,13 +261,10 @@ impl BitcoinService { #[instrument(name = "BitcoinDA", skip_all)] pub async fn run_da_queue( self: Arc, - mut rx: UnboundedReceiver>, mut new_block_rx: UnboundedReceiver, mut shutdown: GracefulShutdown, ) { trace!("BitcoinDA queue is initialized. Waiting for the first request..."); - let mut fee_rate_multiplier = self.fee.base_fee_rate_multiplier(); - loop { select! { biased; @@ -277,86 +275,128 @@ impl BitcoinService { new_height_opt = new_block_rx.recv() => { if let Some(new_height) = new_height_opt { trace!("New da block height {new_height}. Processing transaction queue."); - if let Err(e) = self.process_transaction_queue().await { + + if let Err(e) = self.process_job_service().await { error!(?e, "Error processing queue on new block"); } } } - request_opt = rx.recv() => { - if let Some(request) = request_opt { - trace!("A new request is received"); - - loop { - // Build and queue tx with retries: - let fee_sat_per_vbyte = match self.fee.get_fee_rate().await { - Ok(rate) => (rate as f64 * fee_rate_multiplier).ceil() as u64, - Err(e) => { - error!(?e, "Failed to call get_fee_rate. Retrying..."); - tokio::time::sleep(Duration::from_secs(1)).await; - continue; - } - }; - match self - .send_transaction_with_fee_rate( - request.tx_request.clone(), - fee_sat_per_vbyte, - ) - .await - { - Ok(txs) => { - let txid = txs.last().unwrap()[1].id; - let tx_id = TxidWrapper(txid); - info!(%txid, "Sent tx to BitcoinDA"); - let _ = request.notify.send(Ok(tx_id)); - - fee_rate_multiplier = self.fee.base_fee_rate_multiplier(); - } - Err(e) => { - error!(?e, "Failed to send transaction to DA layer"); - tokio::time::sleep(Duration::from_secs(1)).await; - - match e { - BitcoinServiceError::MempoolRejection(MempoolRejection::MinRelayFeeNotMet) | BitcoinServiceError::FeeCalculation(_) => { - fee_rate_multiplier = self.fee.get_next_fee_rate_multiplier(fee_rate_multiplier); - }, - BitcoinServiceError::QueueNotEmpty => { - let _ = self.process_transaction_queue().await; - }, - _ => {} - } - - continue; - } - } - break; + } + } + } + + // Process job queue + async fn process_job_service(&self) -> Result<()> { + let fee_rate_multiplier = self.fee.base_fee_rate_multiplier(); + + let fee_sat_per_vbyte = loop { + match self.fee.get_fee_rate().await { + Ok(rate) => { + break (rate as f64 * fee_rate_multiplier).ceil() as u64; + } + Err(e) => { + error!(?e, "Failed to call get_fee_rate. Retrying..."); + tokio::time::sleep(Duration::from_secs(1)).await; + } + } + }; + + // Get all pending/in-progress jobs + let all_job_ids = self.job_service.get_all_job_ids()?; + let mut jobs_to_process = Vec::new(); + + for job_id in all_job_ids { + if let Some(progress) = self.job_service.get_progress(&job_id)? { + match progress.status { + JobStatus::Pending | JobStatus::InProgress => { + if let Some(job) = self.job_service.get_job(&job_id)? { + jobs_to_process.push((job, progress)); } } + _ => {} // Skip completed/cancelled/failed } } } + + let mut previous_job_was_partially_sent = false; + + for (job, mut progress) in jobs_to_process { + info!("Processing job {}", job.id); + + match self + .process_job( + &job, + &mut progress, + fee_sat_per_vbyte, + previous_job_was_partially_sent, + ) + .await + { + Ok(completed) => { + if completed { + info!("Job {} completed successfully", job.id); + previous_job_was_partially_sent = false; + } else { + info!("Job {} partially sent", job.id); + previous_job_was_partially_sent = true; + } + } + Err(e) => { + error!("Error processing job {}: {:?}", job.id, e); + previous_job_was_partially_sent = true; + self.job_service.update_job_status( + &mut progress, + JobStatus::Failed { + error: e.to_string(), + }, + )?; + } + } + } + + Ok(()) } - /// Queue and try sending transaction to DA - pub async fn send_transaction_with_fee_rate( + async fn process_job( &self, - tx_request: DaTxRequest, + job: &Job, + progress: &mut JobProgress, fee_sat_per_vbyte: u64, - ) -> Result> { - let now = Instant::now(); + previous_job_was_partially_sent: bool, + ) -> Result { + info!( + "Processing job {} with status {:?}", + job.id, progress.status + ); + + if matches!(progress.status, JobStatus::Pending) { + self.job_service + .update_job_status(progress, JobStatus::InProgress)?; + } + + let prev_utxo = self + .select_prev_utxo(previous_job_was_partially_sent) + .await?; - let prev_utxo = self.select_prev_utxo().await?; // get all available utxos let utxos = self.get_utxos().await?; + let current_idx = progress.sent_chunks.count(); + let da_txs = self .create_da_transactions_with_fee_rate( - tx_request, fee_sat_per_vbyte, utxos.clone(), prev_utxo.clone(), + job.data.clone(), + progress.sent_chunks.clone(), ) .await?; - let signed_txs = self.tx_signer.sign_da_txs(da_txs).await?; + + let signed_txs = self + .tx_signer + .sign_da_txs(da_txs.clone(), current_idx) + .await?; // Test whether signed_txs should be accepted in queue if !self.test_mempool_accept_queue_tx(&signed_txs).await? { @@ -368,43 +408,84 @@ impl BitcoinService { // backup to file after mempool acceptance backup_txs_to_file(&self.tx_backup_dir, &signed_txs)?; - let txs = signed_txs - .iter() - .map(|tx| tx.clone().into_txs_with_id()) - .collect::>(); - self.monitoring - .monitor_transaction_chain(txs.clone()) - .await?; + let mut txids = Vec::new(); + let mut commits_sent = Vec::new(); + let mut reveals_sent = Vec::new(); + let mut sent_count = 0; + + for signed_tx in &signed_txs { + // Test mempool acceptance for this specific transaction + if let Err(e) = self.test_mempool_accept(&signed_tx.as_raw_txs()).await { + debug!(?e, "Transaction rejected by mempool, stopping batch"); + break; + } - // Queue transactions - self.queue_transactions(signed_txs).await; + match self.send_signed_transaction(signed_tx).await { + Ok(ids) => { + sent_count += 1; + txids.extend(ids); + commits_sent.push(signed_tx.commit.tx.clone()); + reveals_sent.push(signed_tx.reveal.tx.clone()); + + self.job_service.record_sent_transactions( + progress, + vec![signed_tx.commit.tx.clone()], + vec![signed_tx.reveal.tx.clone()], + )?; + + let txs = signed_tx.clone().into_txs_with_id(); + self.monitoring.monitor_transaction_chain(vec![txs]).await?; + } + Err(e) => { + error!(?e, "Error sending signed transaction"); + break; + } + } + } - // Process transaction queue. - self.process_transaction_queue().await?; + if let Err(e) = self.monitoring.update_txs_status(&txids).await { + error!(?e, "Failed to update queued tx status"); + } + + let total_needed = da_txs.count(); + let total_sent = current_idx + sent_count; + let completed = total_sent >= total_needed; - BM.transaction_queue_processing_time - .record(Instant::now().saturating_duration_since(now).as_secs_f64()); + if completed { + // Mark job as completed + self.job_service + .update_job_status(progress, JobStatus::Completed)?; - Ok(txs) + info!("Job {} marked as completed", job.id); + } else if sent_count > 0 { + // Job partially sent + info!( + "Job {} progress recorded: {}/{} transactions sent", + job.id, total_sent, total_needed + ); + } + + Ok(completed) } - async fn select_prev_utxo(&self) -> Result> { + async fn select_prev_utxo(&self, should_select_new_utxo: bool) -> Result> { let prev_utxo = self.get_prev_utxo().await; - if self.tx_queue.lock().await.is_empty() { + if !should_select_new_utxo { return Ok(prev_utxo); } match self.utxo_selection_mode { UtxoSelectionMode::Chained => { // Prevent UTXO conflicts when queue is not empty and running UtxoSelectionMode::Chained mode - Err(BitcoinServiceError::QueueNotEmpty) + Err(BitcoinServiceError::PreviousJobInProgress) } - UtxoSelectionMode::Oldest => Ok(if prev_utxo.is_some() { - // Latest monitored TX has been successfully accepted to mempool and can be used as starting point for another utxo chain - prev_utxo - } else { + UtxoSelectionMode::Oldest => Ok(if should_select_new_utxo { // Latest monitored TX has `Queued` status and internal `get_tx_out` errors. + self.get_highest_confirmation_utxo().await? + } else { + // Latest monitored TX has been successfully accepted to mempool and can be used as starting point for another utxo chain + prev_utxo }), } } @@ -451,19 +532,7 @@ impl BitcoinService { // To make sure there are no conflicts between parallel utxos chain, // this additional filters out any UTXO used by queued txs and any change UTXO that are not finalized UtxoSelectionMode::Oldest => { - let txids = self - .tx_queue - .lock() - .await - .iter() - .flat_map(|tx| { - tx.commit - .tx - .input - .iter() - .map(|input| input.previous_output.txid) - }) - .collect::>(); + let txids = self.job_service.get_pending_chunks(); utxos.into_iter().filter(|utxo| { utxo.spendable @@ -510,25 +579,12 @@ impl BitcoinService { #[instrument(level = "trace", fields(prev_utxo), ret, err, skip(self))] async fn create_da_transactions_with_fee_rate( &self, - tx_request: DaTxRequest, fee_sat_per_vbyte: u64, utxos: Vec, prev_utxo: Option, + data: RawTxData, + sent_chunks: SentChunks, ) -> Result { - let data = match tx_request { - DaTxRequest::ZKProof(zkproof) => split_proof(zkproof)?, - DaTxRequest::SequencerCommitment(comm) => { - let data = DataOnDa::SequencerCommitment(comm); - let blob = borsh::to_vec(&data).expect("DataOnDa serialize must not fail"); - RawTxData::SequencerCommitment(blob) - } - DaTxRequest::BatchProofMethodId(method_id) => { - let data = DataOnDa::BatchProofMethodId(method_id); - let blob = borsh::to_vec(&data).expect("DataOnDa serialize must not fail"); - RawTxData::BatchProofMethodId(blob) - } - }; - let network = self.network; let da_private_key = self.da_private_key.expect("No private key set"); // get address from a utxo @@ -544,6 +600,7 @@ impl BitcoinService { // to release the tokio runtime execution create_inscription_transactions( data, + sent_chunks, da_private_key, prev_utxo, utxos, @@ -558,96 +615,96 @@ impl BitcoinService { .map_err(|e| BitcoinServiceError::TransactionBuilderError(e.to_string())) } - async fn queue_transactions(&self, txs: Vec) { - let txs_len = txs.len(); - self.tx_queue.lock().await.extend(txs); - BM.transaction_queue_size.increment(txs_len as f64); - } - - pub(crate) async fn process_transaction_queue(&self) -> Result> { - match self.utxo_selection_mode { - UtxoSelectionMode::Chained => self.process_transaction_queue_chained().await, - UtxoSelectionMode::Oldest => self.process_transaction_queue_oldest_mode().await, - } - } - - pub(crate) async fn process_transaction_queue_oldest_mode(&self) -> Result> { - let mut queue = self.tx_queue.lock().await; - - let mut txids = Vec::new(); - let mut failed_txs = VecDeque::new(); - while let Some(tx) = queue.pop_front() { - info!( - "Processing transaction from queue. Commit: {} Reveal: {}", - tx.commit_txid(), - tx.reveal_txid() - ); - if let Err(e) = self.test_mempool_accept(&tx.as_raw_txs()).await { - debug!(?e, "Rejected by mempool"); - failed_txs.push_back(tx); - continue; - } - - match self.send_signed_transaction(&tx).await { - Ok(ids) => { - BM.transaction_queue_size.decrement(1); - txids.extend(ids) - } - Err(e) => { - error!(?e, "Error sending signed transaction"); - failed_txs.push_back(tx); - } - } - } - - *queue = failed_txs; - - // Update monitored tx status - if let Err(e) = self.monitoring.update_txs_status(&txids).await { - error!(?e, "Failed to update queued tx status"); - } - - Ok(txids) - } - - /// Send transaction out of the queue to DA until the first error. - /// Returns the successfully sent txs. - pub(crate) async fn process_transaction_queue_chained(&self) -> Result> { - let mut queue = self.tx_queue.lock().await; - - let mut txids = Vec::new(); - while let Some(tx) = queue.front() { - info!( - "Processing transaction from queue. Commit: {} Reveal: {}", - tx.commit_txid(), - tx.reveal_txid() - ); - if let Err(e) = self.test_mempool_accept(&tx.as_raw_txs()).await { - warn!(?e, "Rejected by mempool"); - break; - } - - match self.send_signed_transaction(tx).await { - Ok(ids) => { - queue.pop_front(); - BM.transaction_queue_size.decrement(1); - txids.extend(ids) - } - Err(e) => { - error!(?e, "Error sending signed transaction"); - // Break on first error and return successfully sent txids - break; - } - } - } - - // Update monitored tx status - if let Err(e) = self.monitoring.update_txs_status(&txids).await { - error!(?e, "Failed to update queued tx status"); - } - - Ok(txids) - } + // async fn queue_transactions(&self, txs: Vec) { + // let txs_len = txs.len(); + // self.tx_queue.lock().await.extend(txs); + // BM.transaction_queue_size.increment(txs_len as f64); + // } + + // pub(crate) async fn process_transaction_queue(&self) -> Result> { + // match self.utxo_selection_mode { + // UtxoSelectionMode::Chained => self.process_transaction_queue_chained().await, + // UtxoSelectionMode::Oldest => self.process_transaction_queue_oldest_mode().await, + // } + // } + + // pub(crate) async fn process_transaction_queue_oldest_mode(&self) -> Result> { + // let mut queue = self.tx_queue.lock().await; + + // let mut txids = Vec::new(); + // let mut failed_txs = VecDeque::new(); + // while let Some(tx) = queue.pop_front() { + // info!( + // "Processing transaction from queue. Commit: {} Reveal: {}", + // tx.commit_txid(), + // tx.reveal_txid() + // ); + // if let Err(e) = self.test_mempool_accept(&tx.as_raw_txs()).await { + // debug!(?e, "Rejected by mempool"); + // failed_txs.push_back(tx); + // continue; + // } + + // match self.send_signed_transaction(&tx).await { + // Ok(ids) => { + // BM.transaction_queue_size.decrement(1); + // txids.extend(ids) + // } + // Err(e) => { + // error!(?e, "Error sending signed transaction"); + // failed_txs.push_back(tx); + // } + // } + // } + + // *queue = failed_txs; + + // // Update monitored tx status + // if let Err(e) = self.monitoring.update_txs_status(&txids).await { + // error!(?e, "Failed to update queued tx status"); + // } + + // Ok(txids) + // } + + // /// Send transaction out of the queue to DA until the first error. + // /// Returns the successfully sent txs. + // pub(crate) async fn process_transaction_queue_chained(&self) -> Result> { + // let mut queue = self.tx_queue.lock().await; + + // let mut txids = Vec::new(); + // while let Some(tx) = queue.front() { + // info!( + // "Processing transaction from queue. Commit: {} Reveal: {}", + // tx.commit_txid(), + // tx.reveal_txid() + // ); + // if let Err(e) = self.test_mempool_accept(&tx.as_raw_txs()).await { + // warn!(?e, "Rejected by mempool"); + // break; + // } + + // match self.send_signed_transaction(tx).await { + // Ok(ids) => { + // queue.pop_front(); + // BM.transaction_queue_size.decrement(1); + // txids.extend(ids) + // } + // Err(e) => { + // error!(?e, "Error sending signed transaction"); + // // Break on first error and return successfully sent txids + // break; + // } + // } + // } + + // // Update monitored tx status + // if let Err(e) = self.monitoring.update_txs_status(&txids).await { + // error!(?e, "Failed to update queued tx status"); + // } + + // Ok(txids) + // } pub(crate) async fn send_signed_transaction(&self, tx: &SignedTxPair) -> Result> { let raw_txs = tx.as_raw_txs(); @@ -1320,26 +1377,37 @@ impl DaService for BitcoinService { (relevant_txs, inclusion_proof, completeness_proof) } - #[instrument(level = "trace", skip_all)] - async fn send_transaction( - &self, - tx_request: DaTxRequest, - ) -> Result<::TransactionId> { - let queue = self.get_send_transaction_queue(); - let (tx, rx) = oneshot_channel(); - queue - .send(TxRequestWithNotifier { - tx_request, - notify: tx, - }) - .map_err(|_| BitcoinServiceError::ChannelSendError)?; - Ok(rx.await?.expect("Queue never sends error")) + /// Submit a new job to the queue + async fn send_transaction(&self, tx_request: DaTxRequest) -> Result { + // TODO handle chaining job request + if self.utxo_selection_mode == UtxoSelectionMode::Chained { + let all_job_ids = self.job_service.get_all_job_ids()?; + for job_id in all_job_ids { + if let Some(progress) = self.job_service.get_progress(&job_id)? { + if matches!(progress.status, JobStatus::Pending | JobStatus::InProgress) { + return Err(BitcoinServiceError::PreviousJobInProgress); + } + } + } + } + + let job = self.job_service.submit_job(tx_request.try_into()?)?; + + self.process_job_service().await?; + + Ok(job.id) } - fn get_send_transaction_queue( + async fn wait_for_completion( &self, - ) -> UnboundedSender> { - self.inscribes_queue.clone() + job_id: JobId, + timeout: Option, + ) -> Result { + Ok(self + .job_service + .wait_for_completion(job_id, timeout) + .await + .map(TxidWrapper)?) } #[instrument(level = "trace", skip(self))] @@ -1473,7 +1541,7 @@ impl DaService for BitcoinService { /// Wrapper around Txid to be used in DaSpec. #[derive(PartialEq, Eq, PartialOrd, Ord, core::hash::Hash)] -pub struct TxidWrapper(Txid); +pub struct TxidWrapper(pub(crate) Txid); impl From for [u8; 32] { fn from(val: TxidWrapper) -> Self { val.0.to_byte_array() diff --git a/crates/bitcoin-da/src/test_utils.rs b/crates/bitcoin-da/src/test_utils.rs index f74dd3db76..4344e8d4a2 100644 --- a/crates/bitcoin-da/src/test_utils.rs +++ b/crates/bitcoin-da/src/test_utils.rs @@ -76,7 +76,7 @@ impl BitcoinService { } .unwrap(); - let signed_txs = self.tx_signer.sign_da_txs(da_txs).await?; + let signed_txs = self.tx_signer.sign_da_txs(da_txs, 0).await?; reveal_chunks.push((txid, wtxid)); @@ -119,7 +119,7 @@ impl BitcoinService { ) .unwrap(); - let signed_txs = self.tx_signer.sign_da_txs(da_txs).await?; + let signed_txs = self.tx_signer.sign_da_txs(da_txs, 0).await?; txids.extend(self.send_signed_transaction(&signed_txs[0]).await?); } diff --git a/crates/bitcoin-da/src/tx_signer.rs b/crates/bitcoin-da/src/tx_signer.rs index 5accbaa6da..089115901c 100644 --- a/crates/bitcoin-da/src/tx_signer.rs +++ b/crates/bitcoin-da/src/tx_signer.rs @@ -7,6 +7,7 @@ use bitcoin::consensus::encode; use bitcoin::{Transaction, Txid}; use bitcoincore_rpc::json::SignRawTransactionInput; use bitcoincore_rpc::{Client, RpcApi}; +use serde::{Deserialize, Serialize}; use tracing::trace; use crate::error::BitcoinServiceError; @@ -16,7 +17,7 @@ use crate::helpers::TransactionKind; pub(crate) type Result = std::result::Result; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct SignedTxWithId { hex: Vec, pub tx: Transaction, @@ -24,7 +25,7 @@ pub(crate) struct SignedTxWithId { } /// Pair of commit/reveal signed transactions -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct SignedTxPair { pub commit: SignedTxWithId, pub reveal: SignedTxWithId, @@ -70,7 +71,11 @@ impl TxSigner { Self { client } } - pub(crate) async fn sign_da_txs(&self, da_txs: DaTxs) -> Result> { + pub(crate) async fn sign_da_txs( + &self, + da_txs: DaTxs, + current_idx: usize, + ) -> Result> { let queued_txs = match da_txs { DaTxs::Complete { commit, reveal } => { vec![ @@ -104,8 +109,14 @@ impl TxSigner { commit, reveal, } => { - self.sign_chunked_transaction(commit_chunks, reveal_chunks, commit, reveal) - .await? + self.sign_chunked_transaction( + commit_chunks, + reveal_chunks, + commit, + reveal, + current_idx, + ) + .await? } }; @@ -154,6 +165,7 @@ impl TxSigner { reveal_chunks: Vec, commit: Transaction, reveal: TxWithId, + current_idx: usize, ) -> Result> { assert!(!commit_chunks.is_empty(), "Received empty chunks"); assert_eq!( @@ -181,7 +193,11 @@ impl TxSigner { let mut raw_txs = Vec::with_capacity(all_tx_map.len()); - for (commit, reveal) in commit_chunks.into_iter().zip(reveal_chunks) { + for (commit, reveal) in commit_chunks + .into_iter() + .zip(reveal_chunks) + .skip(current_idx) + { let mut inputs = vec![]; for input in commit.input.iter() { diff --git a/crates/prover-services/src/parallel.rs b/crates/prover-services/src/parallel.rs index c60386fa82..18b3fafbfc 100644 --- a/crates/prover-services/src/parallel.rs +++ b/crates/prover-services/src/parallel.rs @@ -203,30 +203,31 @@ where } /// Submits the zk proof to the DA service, returning transaction id. - #[instrument(name = "ParallelProverService", skip_all, fields(job_id = _job_id.to_string()))] + #[instrument(name = "ParallelProverService", skip_all)] pub async fn submit_proof( &self, proof: Proof, - _job_id: Uuid, ) -> anyhow::Result<::TransactionId> { let tx_request = DaTxRequest::ZKProof(proof); info!("Submitting proof to DA service"); - self.da_service + let job_id = self + .da_service .send_transaction(tx_request) .await + .map_err(|e| anyhow::anyhow!(e))?; + + self.da_service + .wait_for_completion(job_id, None) + .await .map_err(|e| anyhow::anyhow!(e)) } // Only used in tests - pub async fn submit_proofs( - &self, - proofs: Vec, - ) -> anyhow::Result::TransactionId, Proof)>> { + pub async fn submit_proofs(&self, proofs: Vec) -> anyhow::Result> { let mut tx_and_proof = Vec::with_capacity(proofs.len()); - let job_id = Uuid::nil(); for proof in proofs { - let tx_id = self.submit_proof(proof.clone(), job_id).await?; - tx_and_proof.push((tx_id, proof)); + self.submit_proof(proof.clone()).await?; + tx_and_proof.push(proof); } Ok(tx_and_proof) } diff --git a/crates/prover-services/tests/prover_tests.rs b/crates/prover-services/tests/prover_tests.rs index fcc8fe3fa7..a3d702b6b8 100644 --- a/crates/prover-services/tests/prover_tests.rs +++ b/crates/prover-services/tests/prover_tests.rs @@ -25,7 +25,7 @@ async fn test_successful_prover_execution() { let header_hash = MockHash::from([0; 32]); // Spawn mock proving in the background - let (id, rx) = start_proof(&prover_service, header_hash).await; + let (_, rx) = start_proof(&prover_service, header_hash).await; // Signal finish to 1st proof assert!(vm.finish_next_proof()); @@ -36,7 +36,7 @@ async fn test_successful_prover_execution() { let hash_from_proof = extract_output_header(&proof.proof); assert_eq!(hash_from_proof, header_hash); - prover_service.submit_proof(proof.proof, id).await.unwrap(); + prover_service.submit_proof(proof.proof).await.unwrap(); } #[tokio::test(flavor = "multi_thread")] diff --git a/crates/sequencer/src/commitment/service.rs b/crates/sequencer/src/commitment/service.rs index f9999fe42b..84f53bbc29 100644 --- a/crates/sequencer/src/commitment/service.rs +++ b/crates/sequencer/src/commitment/service.rs @@ -15,10 +15,10 @@ use sov_db::schema::types::L2BlockNumber; use sov_modules_api::WorkingSet; use sov_prover_storage_manager::ProverStorageManager; use sov_rollup_interface::da::{BlockHeaderTrait, DaTxRequest, SequencerCommitment}; -use sov_rollup_interface::services::da::{DaService, TxRequestWithNotifier}; +use sov_rollup_interface::services::da::DaService; use sov_state::ProverStorage; use tokio::select; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::mpsc; use tracing::{debug, error, info, instrument, warn}; use super::controller::CommitmentController; @@ -220,13 +220,14 @@ where debug!("Sequencer: submitting commitment: {:?}", commitment); let tx_request = DaTxRequest::SequencerCommitment(commitment.clone()); - let (notify, rx) = oneshot::channel(); - let request = TxRequestWithNotifier { tx_request, notify }; - self.da_service - .get_send_transaction_queue() - .send(request) - .map_err(|_| anyhow!("Bitcoin service already stopped!"))?; + let job_id = self + .da_service + .send_transaction(tx_request) + .await + .map_err(|e| anyhow!("Failed to submit job to DA {e}"))?; + + println!("sent job"); info!( "Sent commitment to DA queue. L2 range: #{}-{}, index: {}", l2_start.0, l2_end.0, commitment_index, @@ -235,10 +236,13 @@ where let start = Instant::now(); let ledger_db = self.ledger_db.clone(); - let _tx_id = rx + println!("awaiting txid"); + let _txid = self + .da_service + .wait_for_completion(job_id, None) .await - .map_err(|_| anyhow!("DA service is dead!"))? - .map_err(|_| anyhow!("Send transaction cannot fail"))?; + .map_err(|e| anyhow!(e))?; + println!("awaited rx"); SM.send_commitment_execution.record( Instant::now() diff --git a/crates/sovereign-sdk/adapters/mock-da/Cargo.toml b/crates/sovereign-sdk/adapters/mock-da/Cargo.toml index 8f89e5c0b0..2b34b16ea7 100644 --- a/crates/sovereign-sdk/adapters/mock-da/Cargo.toml +++ b/crates/sovereign-sdk/adapters/mock-da/Cargo.toml @@ -22,6 +22,8 @@ tokio = { workspace = true, optional = true } rusqlite = { version = "0.34.0", features = ["bundled"], optional = true } serde_json = { workspace = true, optional = true } tracing = { workspace = true, optional = true, features = ["attributes"]} +uuid = { workspace = true, optional = true } + sov-rollup-interface = { path = "../../rollup-interface" } @@ -35,5 +37,6 @@ native = [ "dep:serde_json", "dep:tokio", "dep:tracing", + "dep:uuid", "sov-rollup-interface/native", ] diff --git a/crates/sovereign-sdk/adapters/mock-da/src/service.rs b/crates/sovereign-sdk/adapters/mock-da/src/service.rs index 2ddefaa84d..6a249c2ee4 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/service.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/service.rs @@ -8,12 +8,12 @@ use sha2::Digest; use sov_rollup_interface::da::{ BlobReaderTrait, BlockHeaderTrait, DaSpec, DaTxRequest, DataOnDa, SequencerCommitment, Time, }; -use sov_rollup_interface::services::da::{DaService, SlotData, TxRequestWithNotifier}; +use sov_rollup_interface::services::da::{DaService, SlotData}; use sov_rollup_interface::zk::Proof; -use tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; use tokio::sync::{broadcast, Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; use tokio::time; use tracing::instrument::Instrument; +use uuid::Uuid; use crate::db_connector::DbConnector; use crate::types::{MockAddress, MockBlob, MockBlock, MockDaVerifier}; @@ -427,10 +427,7 @@ impl DaService for MockDaService { } #[tracing::instrument(name = "MockDA", level = "debug", skip_all)] - async fn send_transaction( - &self, - tx_request: DaTxRequest, - ) -> Result { + async fn send_transaction(&self, tx_request: DaTxRequest) -> Result { let blob = match tx_request { DaTxRequest::ZKProof(proof) => { tracing::debug!("Adding a zkproof"); @@ -450,21 +447,15 @@ impl DaService for MockDaService { }; let blocks = self.blocks.lock().await; let _ = self.add_blob(&blocks, blob, Default::default())?; - Ok(MockHash([0; 32])) + Ok(Uuid::default()) } - fn get_send_transaction_queue( + async fn wait_for_completion( &self, - ) -> UnboundedSender> { - let (tx, mut rx) = unbounded_channel::>(); - let this = self.clone(); - tokio::spawn(async move { - while let Some(req) = rx.recv().await { - let res = this.send_transaction(req.tx_request).await; - let _ = req.notify.send(res); - } - }); - tx + _job_id: Uuid, + _timeout: Option, + ) -> Result { + Ok(MockHash([0; 32])) } async fn get_fee_rate(&self) -> Result { @@ -583,8 +574,8 @@ mod tests { let block_3_before = da.get_block_at(3).await.unwrap(); // Disabling this check because our modified mock da creates blocks when a transaction is sent - // let result = da.get_block_at(4).await; - // assert!(result.is_err()); + let result = da.get_block_at(4).await; + assert!(result.is_err()); let block_1_after = da.get_block_at(1).await.unwrap(); let block_2_after = da.get_block_at(2).await.unwrap(); diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs index 1fbdf3a880..f93d2f7ed3 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs @@ -17,12 +17,13 @@ use crate::rocks_db_config::RocksdbConfig; use crate::schema::tables::TestTableNew; use crate::schema::tables::{ CommitmentIndicesByJobId, CommitmentIndicesByL1, CommitmentMerkleRoots, CommitmentsByNumber, - ExecutedMigrations, JobIdOfCommitment, L2BlockByHash, L2BlockByNumber, L2GenesisStateRoot, - L2RangeByL1Height, L2StatusHeights, LastPrunedBlock, LightClientProofBySlotNumber, MempoolTxs, - PendingBonsaiSessionByJobId, PendingL1SubmissionJobs, PendingProofs, - PendingSequencerCommitments, ProofByJobId, ProverLastScannedSlot, ProverPendingCommitments, - ProverStateDiffs, SequencerCommitmentByIndex, ShortHeaderProofBySlotHash, SlotByHash, - StateDiffByBlockNumber, VerifiedBatchProofsBySlotNumber, LEDGER_TABLES, + DaJobById, DaJobProgressById, ExecutedMigrations, JobIdOfCommitment, L2BlockByHash, + L2BlockByNumber, L2GenesisStateRoot, L2RangeByL1Height, L2StatusHeights, LastPrunedBlock, + LightClientProofBySlotNumber, MempoolTxs, PendingBonsaiSessionByJobId, PendingL1SubmissionJobs, + PendingProofs, PendingSequencerCommitments, ProofByJobId, ProverLastScannedSlot, + ProverPendingCommitments, ProverStateDiffs, SequencerCommitmentByIndex, + ShortHeaderProofBySlotHash, SlotByHash, StateDiffByBlockNumber, + VerifiedBatchProofsBySlotNumber, LEDGER_TABLES, }; use crate::schema::types::batch_proof::{ StoredBatchProof, StoredBatchProofOutput, StoredVerifiedProof, @@ -970,3 +971,40 @@ impl ForkMigration for LedgerDB { Ok(()) } } + +impl DaLedgerOps for LedgerDB { + fn insert_job(&self, job_id: Uuid, job: Vec) -> anyhow::Result<()> { + let mut batch = SchemaBatch::new(); + batch.put::(&job_id, &job)?; + self.db.write_schemas(batch)?; + Ok(()) + } + + fn get_job(&self, job_id: &Uuid) -> anyhow::Result>> { + self.db.get::(job_id) + } + + fn upsert_progress(&self, job_id: &Uuid, progress: Vec) -> anyhow::Result<()> { + let mut batch = SchemaBatch::new(); + batch.put::(job_id, &progress)?; + self.db.write_schemas(batch)?; + Ok(()) + } + + fn get_progress(&self, job_id: &Uuid) -> anyhow::Result>> { + self.db.get::(job_id) + } + + fn all_jobs(&self) -> anyhow::Result> { + let mut iter = self.db.iter::()?; + iter.seek_to_first(); + + let mut jobs = Vec::new(); + for job in iter { + let (job_id, _) = job?.into_tuple(); + jobs.push(job_id); + } + + Ok(jobs) + } +} diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs index fdae92523a..fee140bf33 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs @@ -325,6 +325,22 @@ pub trait SequencerLedgerOps: SharedLedgerOps { fn get_mempool_txs(&self) -> anyhow::Result, Vec)>>; } +/// Bitcoin da ledger operations +pub trait DaLedgerOps { + /// Insert a DA job by id + fn insert_job(&self, job_id: Uuid, job: Vec) -> Result<()>; + /// Get a DA job by id + fn get_job(&self, job_id: &Uuid) -> Result>>; + + /// Update a DA job progress by id + fn upsert_progress(&self, job_id: &Uuid, progress: Vec) -> Result<()>; + /// Get a DA job progress by id + fn get_progress(&self, job_id: &Uuid) -> Result>>; + + /// Get all DA job ids + fn all_jobs(&self) -> Result>; +} + /// Test ledger operations #[cfg(test)] pub trait TestLedgerOps { diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs index 8ee4b62f8a..79f2644fd0 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs @@ -43,6 +43,8 @@ pub const STATE_TABLES: &[&str] = &[ /// Note: Please keep the list sorted alphabetically pub const SEQUENCER_LEDGER_TABLES: &[&str] = &[ CommitmentsByNumber::table_name(), + DaJobById::table_name(), + DaJobProgressById::table_name(), ExecutedMigrations::table_name(), L2BlockByHash::table_name(), L2BlockByNumber::table_name(), @@ -93,6 +95,8 @@ pub const FULL_NODE_LEDGER_TABLES: &[&str] = &[ pub const BATCH_PROVER_LEDGER_TABLES: &[&str] = &[ CommitmentIndicesByJobId::table_name(), CommitmentIndicesByL1::table_name(), + DaJobById::table_name(), + DaJobProgressById::table_name(), ExecutedMigrations::table_name(), JobIdOfCommitment::table_name(), L2BlockByHash::table_name(), @@ -142,6 +146,8 @@ pub const LEDGER_TABLES: &[&str] = &[ CommitmentIndicesByL1::table_name(), CommitmentMerkleRoots::table_name(), CommitmentsByNumber::table_name(), + DaJobById::table_name(), + DaJobProgressById::table_name(), ExecutedMigrations::table_name(), JobIdOfCommitment::table_name(), L2BlockByHash::table_name(), @@ -493,6 +499,16 @@ define_table_with_seek_key_codec!( (PendingProofs) (u32, u32) => (Proof, L1Height) ); +define_table_with_seek_key_codec!( + /// Da job by uuid + (DaJobById) Uuid => Vec +); + +define_table_with_seek_key_codec!( + /// Da job progress by uuid + (DaJobProgressById) Uuid => Vec +); + #[cfg(test)] define_table_with_seek_key_codec!( /// Test table old diff --git a/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs b/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs index 14ccf9ce2b..bfee6f35ea 100644 --- a/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs +++ b/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs @@ -107,6 +107,7 @@ pub trait RollupBlueprint: Sized + Send + Sync { require_wallet_check: bool, task_manager: TaskExecutor, network: Network, + ledger_db: LedgerDB, ) -> Result, anyhow::Error>; /// Creates instance of [`ProverService`]. diff --git a/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs b/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs index c61391fca4..20cd3215ea 100644 --- a/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs +++ b/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs @@ -1,11 +1,14 @@ //! The da module defines traits used by the full node to interact with the DA layer. +#[cfg(feature = "native")] +use std::time::Duration; + use serde::de::DeserializeOwned; use serde::Serialize; #[cfg(feature = "native")] -use tokio::sync::mpsc::UnboundedSender; -#[cfg(feature = "native")] use tokio::sync::oneshot::Sender as OneshotSender; +#[cfg(feature = "native")] +use uuid::Uuid; use crate::da::BlockHeaderTrait; #[cfg(feature = "native")] @@ -104,17 +107,14 @@ pub trait DaService: Send + Sync + 'static { /// Send a transaction directly to the DA layer. /// blob is the serialized and signed transaction. /// Returns nothing if the transaction was successfully sent. - async fn send_transaction( - &self, - tx_request: DaTxRequest, - ) -> Result; + async fn send_transaction(&self, tx_request: DaTxRequest) -> Result; - /// A tx part of the queue to send transactions in order - fn get_send_transaction_queue( + /// Wait for a job to finish + async fn wait_for_completion( &self, - ) -> UnboundedSender> { - unimplemented!() - } + job_id: Uuid, + timeout: Option, + ) -> Result; /// Returns fee rate per byte on DA layer. async fn get_fee_rate(&self) -> Result; From a539c1e701eadab1d364b020dadfb7de385349cc Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 8 Oct 2025 11:27:58 +0100 Subject: [PATCH 11/81] Rm unused file --- .../bitcoin-da/src/helpers/builders/chunks.rs | 385 ------------------ 1 file changed, 385 deletions(-) delete mode 100644 crates/bitcoin-da/src/helpers/builders/chunks.rs diff --git a/crates/bitcoin-da/src/helpers/builders/chunks.rs b/crates/bitcoin-da/src/helpers/builders/chunks.rs deleted file mode 100644 index 28e7a715e6..0000000000 --- a/crates/bitcoin-da/src/helpers/builders/chunks.rs +++ /dev/null @@ -1,385 +0,0 @@ -use core::result::Result::Ok; -use std::time::Instant; - -use bitcoin::blockdata::opcodes::all::{OP_ENDIF, OP_IF}; -use bitcoin::blockdata::opcodes::OP_FALSE; -use bitcoin::blockdata::script; -use bitcoin::hashes::Hash; -use bitcoin::key::{TapTweak, TweakedPublicKey, UntweakedKeypair}; -use bitcoin::opcodes::all::{OP_CHECKSIGVERIFY, OP_NIP}; -use bitcoin::script::PushBytesBuf; -use bitcoin::secp256k1::{SecretKey, XOnlyPublicKey}; -use bitcoin::{Address, Amount, Network, Transaction}; -use metrics::histogram; -use secp256k1::SECP256K1; -use serde::{Deserialize, Serialize}; -use sov_rollup_interface::da::DataOnDa; -use tracing::{info, instrument, trace, warn}; - -use super::{ - build_commit_transaction, build_control_block, build_reveal_transaction, build_witness, - get_size_reveal, sign_blob_with_private_key, update_witness, TransactionKind, TxWithId, -}; -use crate::spec::utxo::UTXO; -use crate::{REVEAL_OUTPUT_AMOUNT, REVEAL_OUTPUT_THRESHOLD}; - -/// Creates the inscription transactions Type 1 - Chunked -#[allow(clippy::too_many_arguments)] -#[instrument(level = "trace", skip_all, err)] -pub fn create_inscription_type_1( - chunks: Vec>, - da_private_key: &SecretKey, - mut prev_utxo: Option, - mut utxos: Vec, - change_address: Address, - commit_fee_rate: u64, - reveal_fee_rate: u64, - network: Network, - reveal_tx_prefix: &[u8], - current_idx: usize, -) -> Result { - // Create reveal key - let key_pair = UntweakedKeypair::from_secret_key(SECP256K1, da_private_key); - let (public_key, _parity) = XOnlyPublicKey::from_keypair(&key_pair); - - let mut commit_chunks: Vec = vec![]; - let mut reveal_chunks: Vec = vec![]; - - let start = Instant::now(); - - for body in chunks.iter().skip(current_idx) { - let kind = TransactionKind::Chunks; - let kind_bytes = kind.to_bytes(); - - // start creating inscription content - let mut reveal_script_builder = script::Builder::new() - .push_x_only_key(&public_key) - .push_opcode(OP_CHECKSIGVERIFY) - .push_slice(PushBytesBuf::from(kind_bytes)) - .push_opcode(OP_FALSE) - .push_opcode(OP_IF); - // push body in chunks of 520 bytes - for chunk in body.chunks(520) { - reveal_script_builder = reveal_script_builder.push_slice( - PushBytesBuf::try_from(chunk.to_vec()).expect("Cannot push body chunk"), - ); - } - // push end if - let reveal_script_builder = reveal_script_builder.push_opcode(OP_ENDIF); - - // Start loop to find a 'nonce' i.e. random number that makes the reveal tx hash starting with zeros given length - let mut nonce: i64 = 16; // skip the first digits to avoid OP_PUSHNUM_X - 'mine_chunk: loop { - if nonce % 1000 == 0 { - trace!(nonce, "Trying to find commit & reveal nonce for chunk"); - if nonce > 16384 { - warn!("Too many iterations finding nonce for chunk"); - } - } - // ownerships are moved to the loop - let mut reveal_script_builder = reveal_script_builder.clone(); - - // push nonce - reveal_script_builder = reveal_script_builder - .push_slice(nonce.to_le_bytes()) - // drop the second item, bc there is a big chance it's 0 (tx kind) and nonce is >= 16 - .push_opcode(OP_NIP); - nonce += 1; - - // finalize reveal script - let reveal_script = reveal_script_builder.into_script(); - - let (control_block, merkle_root, tapscript_hash) = - build_control_block(&reveal_script, public_key, SECP256K1); - - // create commit tx address - let commit_tx_address = Address::p2tr(SECP256K1, public_key, merkle_root, network); - - let reveal_value = REVEAL_OUTPUT_AMOUNT; - let fee = get_size_reveal( - change_address.script_pubkey(), - reveal_value, - &reveal_script, - &control_block, - ) as u64 - * reveal_fee_rate; - let reveal_input_value = fee + reveal_value + REVEAL_OUTPUT_THRESHOLD; - - // build commit tx - let (mut unsigned_commit_tx, leftover_utxos) = build_commit_transaction( - prev_utxo.clone(), - utxos.clone(), - commit_tx_address.clone(), - change_address.clone(), - reveal_input_value, - commit_fee_rate, - )?; - - let output_to_reveal = unsigned_commit_tx.output[0].clone(); - - let mut reveal_tx = build_reveal_transaction( - output_to_reveal.clone(), - unsigned_commit_tx.compute_txid(), - 0, - change_address.clone(), - reveal_value + REVEAL_OUTPUT_THRESHOLD, - reveal_fee_rate, - &reveal_script, - &control_block, - )?; - - build_witness( - &unsigned_commit_tx, - &mut reveal_tx, - tapscript_hash, - reveal_script, - control_block, - &key_pair, - SECP256K1, - ); - - let min_commit_value = Amount::from_sat(fee + reveal_value); - while unsigned_commit_tx.output[0].value >= min_commit_value - && reveal_tx.output[0].value > Amount::from_sat(REVEAL_OUTPUT_AMOUNT) - { - let reveal_wtxid = reveal_tx.compute_wtxid(); - let reveal_hash = reveal_wtxid.as_raw_hash().to_byte_array(); - - // check if first N bytes equal to the given prefix - if reveal_hash.starts_with(reveal_tx_prefix) { - // check if inscription locked to the correct address - let recovery_key_pair = key_pair.tap_tweak(SECP256K1, merkle_root); - let (x_only_pub_key, _parity) = - recovery_key_pair.to_inner().x_only_public_key(); - assert_eq!( - Address::p2tr_tweaked( - TweakedPublicKey::dangerous_assume_tweaked(x_only_pub_key), - network, - ), - commit_tx_address - ); - - // set prev utxo to last reveal tx[0] to chain txs in order - prev_utxo = Some(UTXO { - tx_id: reveal_tx.compute_txid(), - vout: 0, - script_pubkey: reveal_tx.output[0].script_pubkey.to_hex_string(), - address: None, - amount: reveal_tx.output[0].value.to_sat(), - confirmations: 0, - spendable: true, - solvable: true, - }); - - // Replace utxos with leftovers so we don't use prev utxos in next chunks - utxos = leftover_utxos; - - if unsigned_commit_tx.output.len() > 1 { - utxos.push(UTXO { - tx_id: unsigned_commit_tx.compute_txid(), - vout: 1, - address: None, - script_pubkey: unsigned_commit_tx.output[0] - .script_pubkey - .to_hex_string(), - amount: unsigned_commit_tx.output[1].value.to_sat(), - confirmations: 0, - spendable: true, - solvable: true, - }) - } - - commit_chunks.push(unsigned_commit_tx); - reveal_chunks.push(reveal_tx); - - if let Some(root) = merkle_root { - info!("Taproot merkle root for inscription - Chunked: {}", root); - } - - break 'mine_chunk; - } else { - unsigned_commit_tx.output[0].value -= Amount::ONE_SAT; - unsigned_commit_tx.output[1].value += Amount::ONE_SAT; - reveal_tx.output[0].value -= Amount::ONE_SAT; - reveal_tx.input[0].previous_output.txid = unsigned_commit_tx.compute_txid(); - update_witness( - &unsigned_commit_tx, - &mut reveal_tx, - tapscript_hash, - &key_pair, - SECP256K1, - ); - } - } - } - } - - let (reveal_tx_ids, reveal_wtx_ids): (Vec<_>, Vec<_>) = reveal_chunks - .iter() - .map(|tx| { - ( - tx.compute_txid().to_byte_array(), - tx.compute_wtxid().to_byte_array(), - ) - }) - .collect(); - - let aggregate = DataOnDa::Aggregate(reveal_tx_ids, reveal_wtx_ids); - - // To sign the list of tx ids we assume they form a contiguous list of bytes - let reveal_body: Vec = - borsh::to_vec(&aggregate).expect("Aggregate serialize must not fail"); - // sign the body for authentication of the sequencer - let (signature, signer_public_key) = sign_blob_with_private_key(&reveal_body, da_private_key); - - let kind = TransactionKind::Aggregate; - let kind_bytes = kind.to_bytes(); - - // start creating inscription content - let mut reveal_script_builder = script::Builder::new() - .push_x_only_key(&public_key) - .push_opcode(OP_CHECKSIGVERIFY) - .push_slice(PushBytesBuf::from(kind_bytes)) - .push_opcode(OP_FALSE) - .push_opcode(OP_IF) - .push_slice(PushBytesBuf::try_from(signature).expect("Cannot push signature")) - .push_slice( - PushBytesBuf::try_from(signer_public_key).expect("Cannot push sequencer public key"), - ); - // push body in chunks of 520 bytes - for chunk in reveal_body.chunks(520) { - reveal_script_builder = reveal_script_builder - .push_slice(PushBytesBuf::try_from(chunk.to_vec()).expect("Cannot push body chunk")); - } - // push end if - reveal_script_builder = reveal_script_builder.push_opcode(OP_ENDIF); - - // This envelope is not finished yet. The random number will be added later - - // Start loop to find a 'nonce' i.e. random number that makes the reveal tx hash starting with zeros given length - let mut nonce: i64 = 16; // skip the first digits to avoid OP_PUSHNUM_X - loop { - if nonce % 1000 == 0 { - trace!(nonce, "Trying to find commit & reveal nonce for aggr"); - if nonce > 16384 { - warn!("Too many iterations finding nonce for aggr"); - } - } - let utxos = utxos.clone(); - let change_address = change_address.clone(); - // ownerships are moved to the loop - let mut reveal_script_builder = reveal_script_builder.clone(); - - // push nonce - reveal_script_builder = reveal_script_builder - .push_slice(nonce.to_le_bytes()) - // drop the second item, bc there is a big chance it's 0 (tx kind) and nonce is >= 16 - .push_opcode(OP_NIP); - nonce += 1; - - // finalize reveal script - let reveal_script = reveal_script_builder.into_script(); - - let (control_block, merkle_root, tapscript_hash) = - build_control_block(&reveal_script, public_key, SECP256K1); - - // create commit tx address - let commit_tx_address = Address::p2tr(SECP256K1, public_key, merkle_root, network); - - let reveal_value = REVEAL_OUTPUT_AMOUNT; - let fee = get_size_reveal( - change_address.script_pubkey(), - reveal_value, - &reveal_script, - &control_block, - ) as u64 - * reveal_fee_rate; - let reveal_input_value = fee + reveal_value + REVEAL_OUTPUT_THRESHOLD; - - // build commit tx - let (mut unsigned_commit_tx, _leftover_utxos) = build_commit_transaction( - prev_utxo.clone(), - utxos, - commit_tx_address.clone(), - change_address.clone(), - reveal_input_value, - commit_fee_rate, - )?; - - let input_to_reveal = unsigned_commit_tx.output[0].clone(); - - let mut reveal_tx = build_reveal_transaction( - input_to_reveal.clone(), - unsigned_commit_tx.compute_txid(), - 0, - change_address, - reveal_value + REVEAL_OUTPUT_THRESHOLD, - reveal_fee_rate, - &reveal_script, - &control_block, - )?; - - build_witness( - &unsigned_commit_tx, - &mut reveal_tx, - tapscript_hash, - reveal_script, - control_block, - &key_pair, - SECP256K1, - ); - - let min_commit_value = Amount::from_sat(fee + reveal_value); - while unsigned_commit_tx.output[0].value >= min_commit_value - && reveal_tx.output[0].value > Amount::from_sat(REVEAL_OUTPUT_AMOUNT) - { - let reveal_wtxid = reveal_tx.compute_wtxid(); - let reveal_hash = reveal_wtxid.as_raw_hash().to_byte_array(); - - // check if first N bytes equal to the given prefix - if reveal_hash.starts_with(reveal_tx_prefix) { - // check if inscription locked to the correct address - let recovery_key_pair = key_pair.tap_tweak(SECP256K1, merkle_root); - let (x_only_pub_key, _parity) = recovery_key_pair.to_inner().x_only_public_key(); - assert_eq!( - Address::p2tr_tweaked( - TweakedPublicKey::dangerous_assume_tweaked(x_only_pub_key), - network, - ), - commit_tx_address - ); - - histogram!("chunked_mine_da_transaction").record( - Instant::now() - .saturating_duration_since(start) - .as_secs_f64(), - ); - - if let Some(root) = merkle_root { - info!("Taproot merkle root for inscription - Aggregate: {}", root); - } - return Ok(DaTxs::Chunked { - commit_chunks, - reveal_chunks, - commit: unsigned_commit_tx, - reveal: TxWithId { - id: reveal_tx.compute_txid(), - tx: reveal_tx, - }, - }); - } else { - unsigned_commit_tx.output[0].value -= Amount::ONE_SAT; - unsigned_commit_tx.output[1].value += Amount::ONE_SAT; - reveal_tx.output[0].value -= Amount::ONE_SAT; - reveal_tx.input[0].previous_output.txid = unsigned_commit_tx.compute_txid(); - update_witness( - &unsigned_commit_tx, - &mut reveal_tx, - tapscript_hash, - &key_pair, - SECP256K1, - ); - } - } - } -} From 3d7f1c3e5a690597f226f0aba9d7a0b5aa35212f Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 8 Oct 2025 11:29:57 +0100 Subject: [PATCH 12/81] Remove dead code --- crates/bitcoin-da/src/service.rs | 91 -------------------------------- 1 file changed, 91 deletions(-) diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index e119129c09..e054816c0e 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -615,97 +615,6 @@ impl BitcoinService { .map_err(|e| BitcoinServiceError::TransactionBuilderError(e.to_string())) } - // async fn queue_transactions(&self, txs: Vec) { - // let txs_len = txs.len(); - // self.tx_queue.lock().await.extend(txs); - // BM.transaction_queue_size.increment(txs_len as f64); - // } - - // pub(crate) async fn process_transaction_queue(&self) -> Result> { - // match self.utxo_selection_mode { - // UtxoSelectionMode::Chained => self.process_transaction_queue_chained().await, - // UtxoSelectionMode::Oldest => self.process_transaction_queue_oldest_mode().await, - // } - // } - - // pub(crate) async fn process_transaction_queue_oldest_mode(&self) -> Result> { - // let mut queue = self.tx_queue.lock().await; - - // let mut txids = Vec::new(); - // let mut failed_txs = VecDeque::new(); - // while let Some(tx) = queue.pop_front() { - // info!( - // "Processing transaction from queue. Commit: {} Reveal: {}", - // tx.commit_txid(), - // tx.reveal_txid() - // ); - // if let Err(e) = self.test_mempool_accept(&tx.as_raw_txs()).await { - // debug!(?e, "Rejected by mempool"); - // failed_txs.push_back(tx); - // continue; - // } - - // match self.send_signed_transaction(&tx).await { - // Ok(ids) => { - // BM.transaction_queue_size.decrement(1); - // txids.extend(ids) - // } - // Err(e) => { - // error!(?e, "Error sending signed transaction"); - // failed_txs.push_back(tx); - // } - // } - // } - - // *queue = failed_txs; - - // // Update monitored tx status - // if let Err(e) = self.monitoring.update_txs_status(&txids).await { - // error!(?e, "Failed to update queued tx status"); - // } - - // Ok(txids) - // } - - // /// Send transaction out of the queue to DA until the first error. - // /// Returns the successfully sent txs. - // pub(crate) async fn process_transaction_queue_chained(&self) -> Result> { - // let mut queue = self.tx_queue.lock().await; - - // let mut txids = Vec::new(); - // while let Some(tx) = queue.front() { - // info!( - // "Processing transaction from queue. Commit: {} Reveal: {}", - // tx.commit_txid(), - // tx.reveal_txid() - // ); - // if let Err(e) = self.test_mempool_accept(&tx.as_raw_txs()).await { - // warn!(?e, "Rejected by mempool"); - // break; - // } - - // match self.send_signed_transaction(tx).await { - // Ok(ids) => { - // queue.pop_front(); - // BM.transaction_queue_size.decrement(1); - // txids.extend(ids) - // } - // Err(e) => { - // error!(?e, "Error sending signed transaction"); - // // Break on first error and return successfully sent txids - // break; - // } - // } - // } - - // // Update monitored tx status - // if let Err(e) = self.monitoring.update_txs_status(&txids).await { - // error!(?e, "Failed to update queued tx status"); - // } - - // Ok(txids) - // } - pub(crate) async fn send_signed_transaction(&self, tx: &SignedTxPair) -> Result> { let raw_txs = tx.as_raw_txs(); let raw_txs_size_sum = raw_txs.iter().map(|tx| tx.len()).sum::() as f64; From 3c9317345df86377a3d61da5524d6052ee1f5e59 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 8 Oct 2025 11:41:14 +0100 Subject: [PATCH 13/81] Fix build --- crates/batch-prover/src/prover.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/batch-prover/src/prover.rs b/crates/batch-prover/src/prover.rs index 277444fb5f..1e0845ad15 100644 --- a/crates/batch-prover/src/prover.rs +++ b/crates/batch-prover/src/prover.rs @@ -823,11 +823,11 @@ where // submit all proofs to da for (job_id, proof) in proofs { let prover_service = self.prover_service.clone(); - let ledger_db = self.ledger_db.clone(); + let _ledger_db = self.ledger_db.clone(); info!("Submitting recovered proof for job {}", job_id); // submit in the background tokio::spawn(async move { - let id = prover_service + let _id = prover_service .submit_proof(proof) .await .expect("Failed to submit transaction"); From d9f33a90d0f35c4b5e6c1cbe8be2526843fdd934 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 8 Oct 2025 12:24:48 +0100 Subject: [PATCH 14/81] Index job ids by index --- bin/citrea/tests/bitcoin/light_client_test.rs | 22 ++-- crates/bitcoin-da/src/job/service.rs | 102 +++++++++++------- crates/bitcoin-da/src/service.rs | 27 ++--- .../full-node/db/sov-db/src/ledger_db/mod.rs | 47 +++++--- .../db/sov-db/src/ledger_db/traits.rs | 11 +- .../full-node/db/sov-db/src/schema/tables.rs | 8 ++ 6 files changed, 131 insertions(+), 86 deletions(-) diff --git a/bin/citrea/tests/bitcoin/light_client_test.rs b/bin/citrea/tests/bitcoin/light_client_test.rs index 84bf4d0409..9488ebc38e 100644 --- a/bin/citrea/tests/bitcoin/light_client_test.rs +++ b/bin/citrea/tests/bitcoin/light_client_test.rs @@ -1092,13 +1092,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { let prehash4 = eip191_hash_message(msg4.as_slice()); let signatures_with_index = create_valid_signatures(&signers, &prehash4); bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body4.clone(), - signatures_with_index, - }), - 1, - ) + .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body4.clone(), + signatures_with_index, + })) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -1132,13 +1129,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { signatures_with_index.swap(0, 2); bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body5.clone(), - signatures_with_index, - }), - 1, - ) + .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body5.clone(), + signatures_with_index, + })) .await .unwrap(); da.wait_mempool_len(2, None).await?; diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 1ebd75bd9b..f23bc09124 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -33,17 +33,18 @@ pub enum JobStatus { }, } -// impl JobStatus { -// pub fn as_u8(&self) -> u8 { -// match self { -// JobStatus::Pending => 0, -// JobStatus::InProgress => 1, -// JobStatus::Completed => 2, -// JobStatus::Cancelled => 3, -// JobStatus::Failed { .. } => 4, -// } -// } -// } +impl JobStatus { + /// u8 representation of `JobStatus` + pub fn as_u8(&self) -> u8 { + match self { + JobStatus::Pending => 0, + JobStatus::InProgress => 1, + JobStatus::Completed => 2, + JobStatus::Cancelled => 3, + JobStatus::Failed { .. } => 4, + } + } +} /// Tracks progress of a job including sent transactions for recovery. /// @@ -140,6 +141,8 @@ impl DaJobService { self.insert_job(&job)?; self.upsert_progress(&progress)?; + self.ledger_db + .insert_job_status_index(progress.status.as_u8(), job_id)?; info!("Job {job_id} submitted and persisted"); Ok(job) @@ -187,22 +190,46 @@ impl DaJobService { Ok(progress) } - /// Get all job ids from storage - /// TODO Optimize with status indexing and query only Pending and InProgress status + /// Get all `Pending` and `InProgress` job ids from storage #[instrument(level = "trace", skip(self), ret)] - pub(crate) fn get_all_job_ids(&self) -> Result> { - self.ledger_db - .all_jobs() - .map_err(JobServiceError::DatabaseError) + pub(crate) fn get_all_active_job_ids(&self) -> Result> { + let mut active_jobs = Vec::new(); + + active_jobs.extend( + self.ledger_db + .get_job_ids_by_status(JobStatus::Pending.as_u8())?, + ); + + active_jobs.extend( + self.ledger_db + .get_job_ids_by_status(JobStatus::InProgress.as_u8())?, + ); + + // Sort uuidv7 chronogically + active_jobs.sort(); + + Ok(active_jobs) } /// Update job status by id #[instrument(level = "debug", skip(self))] pub fn update_job_status(&self, progress: &mut JobProgress, status: JobStatus) -> Result<()> { + let old_status = progress.status.as_u8(); + let new_status = status.as_u8(); + progress.status = status; progress.last_updated = get_timestamp(); self.upsert_progress(progress)?; + + // Update status indexing + if old_status != new_status { + self.ledger_db + .remove_job_status_index(old_status, progress.job_id)?; + self.ledger_db + .insert_job_status_index(new_status, progress.job_id)?; + } + Ok(()) } @@ -225,33 +252,32 @@ impl DaJobService { /// Get all pending commit and reveals txids. /// This is required for removing from the utxo set and prevent selecting UTXOs twice #[instrument(level = "trace", skip_all, ret)] - pub(crate) fn get_pending_chunks(&self) -> Vec { + pub(crate) fn get_pending_chunks(&self) -> Result> { let mut txids = Vec::new(); - if let Ok(all_job_ids) = self.get_all_job_ids() { - for job_id in all_job_ids { - if let Ok(Some(progress)) = self.get_progress(&job_id) { - if matches!(progress.status, JobStatus::InProgress) { - txids.extend( - progress - .sent_chunks - .commit_txs - .iter() - .map(|tx| tx.compute_txid()), - ); - txids.extend( - progress - .sent_chunks - .reveal_txs - .iter() - .map(|tx| tx.compute_txid()), - ); - } + let active_job_ids = self.get_all_active_job_ids()?; + for job_id in active_job_ids { + if let Some(progress) = self.get_progress(&job_id)? { + if matches!(progress.status, JobStatus::InProgress) { + txids.extend( + progress + .sent_chunks + .commit_txs + .iter() + .map(|tx| tx.compute_txid()), + ); + txids.extend( + progress + .sent_chunks + .reveal_txs + .iter() + .map(|tx| tx.compute_txid()), + ); } } } - txids + Ok(txids) } /// Wait for job completion and return the transaction ID diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index e054816c0e..94a1b0fd7c 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -302,18 +302,13 @@ impl BitcoinService { }; // Get all pending/in-progress jobs - let all_job_ids = self.job_service.get_all_job_ids()?; + let active_job_ids = self.job_service.get_all_active_job_ids()?; let mut jobs_to_process = Vec::new(); - for job_id in all_job_ids { - if let Some(progress) = self.job_service.get_progress(&job_id)? { - match progress.status { - JobStatus::Pending | JobStatus::InProgress => { - if let Some(job) = self.job_service.get_job(&job_id)? { - jobs_to_process.push((job, progress)); - } - } - _ => {} // Skip completed/cancelled/failed + for job_id in active_job_ids { + if let Some(job) = self.job_service.get_job(&job_id)? { + if let Some(progress) = self.job_service.get_progress(&job_id)? { + jobs_to_process.push((job, progress)); } } } @@ -532,7 +527,7 @@ impl BitcoinService { // To make sure there are no conflicts between parallel utxos chain, // this additional filters out any UTXO used by queued txs and any change UTXO that are not finalized UtxoSelectionMode::Oldest => { - let txids = self.job_service.get_pending_chunks(); + let txids = self.job_service.get_pending_chunks()?; utxos.into_iter().filter(|utxo| { utxo.spendable @@ -1290,13 +1285,9 @@ impl DaService for BitcoinService { async fn send_transaction(&self, tx_request: DaTxRequest) -> Result { // TODO handle chaining job request if self.utxo_selection_mode == UtxoSelectionMode::Chained { - let all_job_ids = self.job_service.get_all_job_ids()?; - for job_id in all_job_ids { - if let Some(progress) = self.job_service.get_progress(&job_id)? { - if matches!(progress.status, JobStatus::Pending | JobStatus::InProgress) { - return Err(BitcoinServiceError::PreviousJobInProgress); - } - } + let active_jobs = self.job_service.get_all_active_job_ids()?; + if !active_jobs.is_empty() { + return Err(BitcoinServiceError::PreviousJobInProgress); } } diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs index f93d2f7ed3..1eb018fdb6 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs @@ -17,11 +17,11 @@ use crate::rocks_db_config::RocksdbConfig; use crate::schema::tables::TestTableNew; use crate::schema::tables::{ CommitmentIndicesByJobId, CommitmentIndicesByL1, CommitmentMerkleRoots, CommitmentsByNumber, - DaJobById, DaJobProgressById, ExecutedMigrations, JobIdOfCommitment, L2BlockByHash, - L2BlockByNumber, L2GenesisStateRoot, L2RangeByL1Height, L2StatusHeights, LastPrunedBlock, - LightClientProofBySlotNumber, MempoolTxs, PendingBonsaiSessionByJobId, PendingL1SubmissionJobs, - PendingProofs, PendingSequencerCommitments, ProofByJobId, ProverLastScannedSlot, - ProverPendingCommitments, ProverStateDiffs, SequencerCommitmentByIndex, + DaJobById, DaJobProgressById, DaJobStatusIndex, ExecutedMigrations, JobIdOfCommitment, + L2BlockByHash, L2BlockByNumber, L2GenesisStateRoot, L2RangeByL1Height, L2StatusHeights, + LastPrunedBlock, LightClientProofBySlotNumber, MempoolTxs, PendingBonsaiSessionByJobId, + PendingL1SubmissionJobs, PendingProofs, PendingSequencerCommitments, ProofByJobId, + ProverLastScannedSlot, ProverPendingCommitments, ProverStateDiffs, SequencerCommitmentByIndex, ShortHeaderProofBySlotHash, SlotByHash, StateDiffByBlockNumber, VerifiedBatchProofsBySlotNumber, LEDGER_TABLES, }; @@ -995,16 +995,35 @@ impl DaLedgerOps for LedgerDB { self.db.get::(job_id) } - fn all_jobs(&self) -> anyhow::Result> { - let mut iter = self.db.iter::()?; - iter.seek_to_first(); + fn insert_job_status_index(&self, status: u8, job_id: Uuid) -> anyhow::Result<()> { + let mut batch = SchemaBatch::new(); + batch.put::(&(status, job_id), &())?; + self.db.write_schemas(batch)?; + Ok(()) + } - let mut jobs = Vec::new(); - for job in iter { - let (job_id, _) = job?.into_tuple(); - jobs.push(job_id); - } + fn remove_job_status_index(&self, status: u8, job_id: Uuid) -> anyhow::Result<()> { + let mut batch = SchemaBatch::new(); + batch.delete::(&(status, job_id))?; + self.db.write_schemas(batch)?; + Ok(()) + } - Ok(jobs) + fn get_job_ids_by_status(&self, status: u8) -> anyhow::Result> { + let mut iter = self.db.iter::()?; + + iter.seek(&(status, Uuid::nil()))?; + + let mut job_ids = Vec::new(); + for item in iter { + let ((item_status, job_id), _) = item?.into_tuple(); + + if item_status != status { + break; + } + + job_ids.push(job_id); + } + Ok(job_ids) } } diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs index fee140bf33..7c666459ee 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs @@ -327,18 +327,25 @@ pub trait SequencerLedgerOps: SharedLedgerOps { /// Bitcoin da ledger operations pub trait DaLedgerOps { + /// DaJobById related methods /// Insert a DA job by id fn insert_job(&self, job_id: Uuid, job: Vec) -> Result<()>; /// Get a DA job by id fn get_job(&self, job_id: &Uuid) -> Result>>; + /// DaJobProgressById related methods /// Update a DA job progress by id fn upsert_progress(&self, job_id: &Uuid, progress: Vec) -> Result<()>; /// Get a DA job progress by id fn get_progress(&self, job_id: &Uuid) -> Result>>; - /// Get all DA job ids - fn all_jobs(&self) -> Result>; + /// DaJobStatusIndex related methods + /// Insert a job status index entry + fn insert_job_status_index(&self, status: u8, job_id: Uuid) -> Result<()>; + /// Remove a job status index entry + fn remove_job_status_index(&self, status: u8, job_id: Uuid) -> Result<()>; + /// Get all job ids for a specific status + fn get_job_ids_by_status(&self, status: u8) -> Result>; } /// Test ledger operations diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs index 79f2644fd0..dc9a618da1 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs @@ -45,6 +45,7 @@ pub const SEQUENCER_LEDGER_TABLES: &[&str] = &[ CommitmentsByNumber::table_name(), DaJobById::table_name(), DaJobProgressById::table_name(), + DaJobStatusIndex::table_name(), ExecutedMigrations::table_name(), L2BlockByHash::table_name(), L2BlockByNumber::table_name(), @@ -97,6 +98,7 @@ pub const BATCH_PROVER_LEDGER_TABLES: &[&str] = &[ CommitmentIndicesByL1::table_name(), DaJobById::table_name(), DaJobProgressById::table_name(), + DaJobStatusIndex::table_name(), ExecutedMigrations::table_name(), JobIdOfCommitment::table_name(), L2BlockByHash::table_name(), @@ -148,6 +150,7 @@ pub const LEDGER_TABLES: &[&str] = &[ CommitmentsByNumber::table_name(), DaJobById::table_name(), DaJobProgressById::table_name(), + DaJobStatusIndex::table_name(), ExecutedMigrations::table_name(), JobIdOfCommitment::table_name(), L2BlockByHash::table_name(), @@ -509,6 +512,11 @@ define_table_with_seek_key_codec!( (DaJobProgressById) Uuid => Vec ); +define_table_with_seek_key_codec!( + /// Index by (status, jobid) + (DaJobStatusIndex) (u8, Uuid) => () +); + #[cfg(test)] define_table_with_seek_key_codec!( /// Test table old From d0682f0940b8795c5840616243fbfb53533be799 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 8 Oct 2025 13:33:04 +0100 Subject: [PATCH 15/81] Fetch fee rate before right before funding tx --- crates/bitcoin-da/src/service.rs | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 94a1b0fd7c..ed6a1c8182 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -287,20 +287,6 @@ impl BitcoinService { // Process job queue async fn process_job_service(&self) -> Result<()> { - let fee_rate_multiplier = self.fee.base_fee_rate_multiplier(); - - let fee_sat_per_vbyte = loop { - match self.fee.get_fee_rate().await { - Ok(rate) => { - break (rate as f64 * fee_rate_multiplier).ceil() as u64; - } - Err(e) => { - error!(?e, "Failed to call get_fee_rate. Retrying..."); - tokio::time::sleep(Duration::from_secs(1)).await; - } - } - }; - // Get all pending/in-progress jobs let active_job_ids = self.job_service.get_all_active_job_ids()?; let mut jobs_to_process = Vec::new(); @@ -319,12 +305,7 @@ impl BitcoinService { info!("Processing job {}", job.id); match self - .process_job( - &job, - &mut progress, - fee_sat_per_vbyte, - previous_job_was_partially_sent, - ) + .process_job(&job, &mut progress, previous_job_was_partially_sent) .await { Ok(completed) => { @@ -356,7 +337,6 @@ impl BitcoinService { &self, job: &Job, progress: &mut JobProgress, - fee_sat_per_vbyte: u64, previous_job_was_partially_sent: bool, ) -> Result { info!( @@ -376,7 +356,8 @@ impl BitcoinService { // get all available utxos let utxos = self.get_utxos().await?; - let current_idx = progress.sent_chunks.count(); + /// Get current fee rate as sat/vb + let fee_sat_per_vbyte = self.fee.get_fee_rate().await?; let da_txs = self .create_da_transactions_with_fee_rate( @@ -388,6 +369,7 @@ impl BitcoinService { ) .await?; + let current_idx = progress.sent_chunks.count(); let signed_txs = self .tx_signer .sign_da_txs(da_txs.clone(), current_idx) From bb1525f2cee6a470879240d760c25cd5629898c3 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 8 Oct 2025 13:41:18 +0100 Subject: [PATCH 16/81] Fix lint --- crates/bitcoin-da/src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index ed6a1c8182..3c60ee13e8 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -356,7 +356,7 @@ impl BitcoinService { // get all available utxos let utxos = self.get_utxos().await?; - /// Get current fee rate as sat/vb + // Get current fee rate as sat/vb let fee_sat_per_vbyte = self.fee.get_fee_rate().await?; let da_txs = self From 7c444152dd6c68b6e3ade2fc86ddad81c1a37660 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Thu, 9 Oct 2025 11:10:20 +0100 Subject: [PATCH 17/81] Rethink should_select_new_utxo logic --- bin/citrea/tests/bitcoin/da_queue.rs | 19 +++++++ bin/citrea/tests/bitcoin/utils.rs | 7 ++- crates/bitcoin-da/src/fee.rs | 19 ++++++- crates/bitcoin-da/src/job/service.rs | 16 ++++-- crates/bitcoin-da/src/service.rs | 62 +++++++++------------- crates/sequencer/src/commitment/service.rs | 3 -- 6 files changed, 79 insertions(+), 47 deletions(-) diff --git a/bin/citrea/tests/bitcoin/da_queue.rs b/bin/citrea/tests/bitcoin/da_queue.rs index 913c58ef73..d760b0f23f 100644 --- a/bin/citrea/tests/bitcoin/da_queue.rs +++ b/bin/citrea/tests/bitcoin/da_queue.rs @@ -62,25 +62,32 @@ impl DaTransactionQueueingTest { // Fill mempool for i in 1..=3 { + println!("i : {:?}", i); da_service .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await?; da.wait_mempool_len(8 * i, None).await?; } + println!("22"); da_service .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await?; + println!("223"); // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit // The three first proofs should hit the mempool + 1 chunk da.wait_mempool_len(8 * 3 + 2, None).await?; + + println!("33"); assert_eq!(da.get_raw_mempool().await?.len(), 26); + println!("44"); // Assert that all sent txs are monitored let monitored_txs = da_service.monitoring.get_monitored_txs().await; assert_eq!(monitored_txs.len(), 26); + println!("55"); // Try to send when queue is already filled up. // This is to test that utxos is correctly selected and that it's doesn't hang on waiting for list of queued txids to be returned let res = da_service @@ -92,8 +99,11 @@ impl DaTransactionQueueingTest { Err(BitcoinServiceError::PreviousJobInProgress) )); + println!("66"); + da.generate(1).await?; + println!("77"); // We mine the first three proofs + the 1 chunk pair and make sure that the remaining chunks and aggregate // and the extra proof is properly queued and sent on next block when mempool size is freed // Assert that all chunks were mined and mempool space is freed @@ -106,18 +116,23 @@ impl DaTransactionQueueingTest { assert_eq!(relevant_txs.len(), 13); + println!("88"); tokio::time::sleep(std::time::Duration::from_secs(3)).await; // Send additional proof and make sure it doesn't hit PreviousJobInProgress error let res = da_service .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await; + println!("99"); assert!(res.is_ok()); + assert_eq!(da.get_raw_mempool().await?.len(), 8 + 6); // Remaining chunks and aggregate + extra queued proof should now hit the mempool da.wait_mempool_len(8 + 6, None).await?; + println!("1010"); assert_eq!(da.get_raw_mempool().await?.len(), 8 + 6); da.generate(1).await?; + println!("1111"); assert_eq!(da.get_raw_mempool().await?.len(), 0); let height = da.get_block_count().await?; @@ -126,7 +141,9 @@ impl DaTransactionQueueingTest { let (relevant_txs, _, _) = da_service.extract_relevant_blobs_with_proof(&block); assert_eq!(relevant_txs.len(), 7); + println!("1212"); da.generate(1).await?; + println!("1313"); Ok(()) } @@ -346,6 +363,7 @@ impl TestCase for DaTransactionQueueingTest { .header .state_root; + println!("1"); self.test_package_mempool_limits( da, &da_service, @@ -357,6 +375,7 @@ impl TestCase for DaTransactionQueueingTest { ) .await?; + println!("2"); self.test_package_too_large( da, &da_service, diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index 9be690675f..03eec6e231 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -232,7 +232,10 @@ pub async fn spawn_bitcoin_da_service( let fee_service = FeeService::new(client.clone(), network, da_config.mempool_space_url.clone()); - let ledger_db_path = test_dir.join("da_ledger_db"); + let ledger_db_dir = tempfile::TempDir::new() + .expect("Failed to create temporary directory") + .keep(); + let ledger_db_path = ledger_db_dir.join("da_ledger_db"); let rocksdb_config = RocksdbConfig::new(&ledger_db_path, None, None); let ledger_db = LedgerDB::with_config(&rocksdb_config).unwrap(); @@ -461,6 +464,7 @@ pub async fn generate_mock_txs( let wrong_key_str = "wrong_key"; let wrong_key_wallet = PathBuf::from_str(wrong_key_str).unwrap(); create_and_fund_wallet(wrong_key_str.to_string(), da_node).await; + let wrong_key_da_service = spawn_bitcoin_da_service( task_executor, &da_node.config, @@ -504,6 +508,7 @@ pub async fn generate_mock_txs( signatures_with_index, }; valid_method_ids.push(method_id.clone()); + da_service .send_transaction(DaTxRequest::BatchProofMethodId(method_id)) .await diff --git a/crates/bitcoin-da/src/fee.rs b/crates/bitcoin-da/src/fee.rs index 42c8f1c3fd..7eaf8ce498 100644 --- a/crates/bitcoin-da/src/fee.rs +++ b/crates/bitcoin-da/src/fee.rs @@ -5,7 +5,7 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; -use bitcoin::{Amount, Network, Sequence, Txid}; +use bitcoin::{Amount, Network, Sequence, Transaction, Txid}; use bitcoincore_rpc::json::{ BumpFeeResult, CreateRawTransactionInput, EstimateMode, WalletCreateFundedPsbtOptions, }; @@ -14,6 +14,7 @@ use thiserror::Error; use tracing::{debug, instrument, trace, warn}; use crate::error::BitcoinServiceError; +use crate::job::service::SentChunks; use crate::monitoring::{MonitoredTx, MonitoredTxKind}; use crate::spec::utxo::UTXO; use crate::tx_signer::SignedTxPair; @@ -262,6 +263,7 @@ pub(crate) async fn get_fee_rate_from_mempool_space( pub(crate) fn validate_txs_fee_rate( txs: &[SignedTxPair], + sent_chunks: &SentChunks, fee_rate: u64, utxos: Vec, prev_utxo: Option, @@ -277,6 +279,21 @@ pub(crate) fn validate_txs_fee_rate( ); } + // Add sent chunks as available inputs + let get_tx_outputs = |txs: &[Transaction]| { + txs.iter() + .flat_map(|tx| { + let txid = tx.compute_txid(); + tx.output + .iter() + .enumerate() + .map(move |(idx, out)| ((txid, idx as u32), out.value)) + }) + .collect::>() + }; + utxo_map.extend(get_tx_outputs(&sent_chunks.commit_txs)); + utxo_map.extend(get_tx_outputs(&sent_chunks.reveal_txs)); + for tx in txs { // Validate commit let commit_tx = &tx.commit.tx; diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index f23bc09124..0d6f6cf16a 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -242,14 +242,11 @@ impl DaJobService { reveals: Vec, ) -> Result<()> { progress.sent_chunks.extend(commits, reveals); - progress.status = JobStatus::InProgress; - progress.last_updated = get_timestamp(); - - self.upsert_progress(progress)?; - Ok(()) + self.update_job_status(progress, JobStatus::InProgress) } /// Get all pending commit and reveals txids. + /// /// This is required for removing from the utxo set and prevent selecting UTXOs twice #[instrument(level = "trace", skip_all, ret)] pub(crate) fn get_pending_chunks(&self) -> Result> { @@ -318,4 +315,13 @@ impl DaJobService { } } } + + /// Check if any job is in progress. + pub async fn has_job_in_progress(&self) -> Result { + let in_progress_jobs = self + .ledger_db + .get_job_ids_by_status(JobStatus::InProgress.as_u8())?; + + Ok(!in_progress_jobs.is_empty()) + } } diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 3c60ee13e8..ddc66c90aa 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -299,27 +299,19 @@ impl BitcoinService { } } - let mut previous_job_was_partially_sent = false; - for (job, mut progress) in jobs_to_process { info!("Processing job {}", job.id); - match self - .process_job(&job, &mut progress, previous_job_was_partially_sent) - .await - { + match self.process_job(&job, &mut progress).await { Ok(completed) => { if completed { info!("Job {} completed successfully", job.id); - previous_job_was_partially_sent = false; } else { info!("Job {} partially sent", job.id); - previous_job_was_partially_sent = true; } } Err(e) => { error!("Error processing job {}: {:?}", job.id, e); - previous_job_was_partially_sent = true; self.job_service.update_job_status( &mut progress, JobStatus::Failed { @@ -333,29 +325,20 @@ impl BitcoinService { Ok(()) } - async fn process_job( - &self, - job: &Job, - progress: &mut JobProgress, - previous_job_was_partially_sent: bool, - ) -> Result { + async fn process_job(&self, job: &Job, progress: &mut JobProgress) -> Result { info!( "Processing job {} with status {:?}", job.id, progress.status ); - if matches!(progress.status, JobStatus::Pending) { - self.job_service - .update_job_status(progress, JobStatus::InProgress)?; - } - - let prev_utxo = self - .select_prev_utxo(previous_job_was_partially_sent) - .await?; - // get all available utxos let utxos = self.get_utxos().await?; + let prev_utxo = match &progress.status { + JobStatus::InProgress => None, // Will use previous reveal utxo in create_inscription_type_1 + _ => self.select_prev_utxo(&utxos).await?, + }; + // Get current fee rate as sat/vb let fee_sat_per_vbyte = self.fee.get_fee_rate().await?; @@ -379,7 +362,13 @@ impl BitcoinService { if !self.test_mempool_accept_queue_tx(&signed_txs).await? { // If it failed on mempool policy limit, it can also fail on meeting min relay fee // Stateless validation of signed txs fee - validate_txs_fee_rate(&signed_txs, fee_sat_per_vbyte, utxos, prev_utxo)?; + validate_txs_fee_rate( + &signed_txs, + &progress.sent_chunks, + fee_sat_per_vbyte, + utxos, + prev_utxo, + )?; } // backup to file after mempool acceptance @@ -436,6 +425,9 @@ impl BitcoinService { info!("Job {} marked as completed", job.id); } else if sent_count > 0 { // Job partially sent + self.job_service + .update_job_status(progress, JobStatus::InProgress)?; + info!( "Job {} progress recorded: {}/{} transactions sent", job.id, total_sent, total_needed @@ -445,9 +437,11 @@ impl BitcoinService { Ok(completed) } - async fn select_prev_utxo(&self, should_select_new_utxo: bool) -> Result> { + async fn select_prev_utxo(&self, utxos: &[UTXO]) -> Result> { let prev_utxo = self.get_prev_utxo().await; - if !should_select_new_utxo { + let job_in_progress = self.job_service.has_job_in_progress().await?; + + if !job_in_progress { return Ok(prev_utxo); } @@ -456,14 +450,8 @@ impl BitcoinService { // Prevent UTXO conflicts when queue is not empty and running UtxoSelectionMode::Chained mode Err(BitcoinServiceError::PreviousJobInProgress) } - UtxoSelectionMode::Oldest => Ok(if should_select_new_utxo { - // Latest monitored TX has `Queued` status and internal `get_tx_out` errors. - - self.get_highest_confirmation_utxo().await? - } else { - // Latest monitored TX has been successfully accepted to mempool and can be used as starting point for another utxo chain - prev_utxo - }), + // Latest monitored TX has `Queued` status and internal `get_tx_out` errors. + UtxoSelectionMode::Oldest => self.get_highest_confirmation_utxo(utxos.to_vec()).await, } } @@ -535,8 +523,7 @@ impl BitcoinService { /// Returns the UTXO with the highest number of confirmations #[instrument(level = "trace", skip_all, ret)] - async fn get_highest_confirmation_utxo(&self) -> Result> { - let mut utxos = self.get_utxos().await?; + async fn get_highest_confirmation_utxo(&self, mut utxos: Vec) -> Result> { utxos.sort_by(|a, b| b.confirmations.cmp(&a.confirmations)); Ok(utxos.first().cloned()) } @@ -1268,6 +1255,7 @@ impl DaService for BitcoinService { // TODO handle chaining job request if self.utxo_selection_mode == UtxoSelectionMode::Chained { let active_jobs = self.job_service.get_all_active_job_ids()?; + if !active_jobs.is_empty() { return Err(BitcoinServiceError::PreviousJobInProgress); } diff --git a/crates/sequencer/src/commitment/service.rs b/crates/sequencer/src/commitment/service.rs index 84f53bbc29..0fc6b301e0 100644 --- a/crates/sequencer/src/commitment/service.rs +++ b/crates/sequencer/src/commitment/service.rs @@ -227,7 +227,6 @@ where .await .map_err(|e| anyhow!("Failed to submit job to DA {e}"))?; - println!("sent job"); info!( "Sent commitment to DA queue. L2 range: #{}-{}, index: {}", l2_start.0, l2_end.0, commitment_index, @@ -236,13 +235,11 @@ where let start = Instant::now(); let ledger_db = self.ledger_db.clone(); - println!("awaiting txid"); let _txid = self .da_service .wait_for_completion(job_id, None) .await .map_err(|e| anyhow!(e))?; - println!("awaited rx"); SM.send_commitment_execution.record( Instant::now() From f05d2cd32c973b1659cbdbaef8a476682a6323dc Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Fri, 10 Oct 2025 10:20:13 +0100 Subject: [PATCH 18/81] job RPCs --- bin/citrea/src/rollup/bitcoin.rs | 4 + crates/bitcoin-da/src/job/error.rs | 10 +- crates/bitcoin-da/src/job/mod.rs | 4 + crates/bitcoin-da/src/job/rpc.rs | 334 +++++++++++++++++++++++++++ crates/bitcoin-da/src/job/service.rs | 92 +++++++- crates/bitcoin-da/src/service.rs | 2 +- 6 files changed, 442 insertions(+), 4 deletions(-) diff --git a/bin/citrea/src/rollup/bitcoin.rs b/bin/citrea/src/rollup/bitcoin.rs index 45a2e7d3f1..358e55435c 100644 --- a/bin/citrea/src/rollup/bitcoin.rs +++ b/bin/citrea/src/rollup/bitcoin.rs @@ -4,6 +4,7 @@ use std::time::Duration; use async_trait::async_trait; use bitcoin_da::fee::FeeService; +use bitcoin_da::job::rpc::create_rpc_module as create_da_job_rpc_module; use bitcoin_da::monitoring::MonitoringService; use bitcoin_da::network_constants::get_network_constants; use bitcoin_da::rpc::create_rpc_module as create_da_rpc_module; @@ -80,6 +81,9 @@ impl RollupBlueprint for BitcoinRollup { let da_methods = create_da_rpc_module(da_service.clone()); rpc_methods.merge(da_methods)?; + let da_methods = create_da_job_rpc_module(da_service.clone()); + rpc_methods.merge(da_methods)?; + Ok(rpc_methods) } diff --git a/crates/bitcoin-da/src/job/error.rs b/crates/bitcoin-da/src/job/error.rs index 791ab2cd2b..9f992d2743 100644 --- a/crates/bitcoin-da/src/job/error.rs +++ b/crates/bitcoin-da/src/job/error.rs @@ -1,6 +1,6 @@ use thiserror::Error; -use crate::job::service::JobId; +use crate::job::service::{JobId, JobStatus}; /// Job errors #[derive(Error, Debug)] @@ -32,4 +32,12 @@ pub enum JobServiceError { /// Job was cancelled before completion #[error("Job {0} was cancelled")] JobCancelled(JobId), + + /// Job cancellation failure + #[error("Job {0} cannot be cancelled as it is in status: {1:?}")] + JobCancellationFailure(JobId, JobStatus), + + /// Job retry failure + #[error("Job {0} cannot be retried as it is in status: {1:?}")] + JobRetryFailure(JobId, JobStatus), } diff --git a/crates/bitcoin-da/src/job/mod.rs b/crates/bitcoin-da/src/job/mod.rs index 6f227d7af3..13ad1c8f53 100644 --- a/crates/bitcoin-da/src/job/mod.rs +++ b/crates/bitcoin-da/src/job/mod.rs @@ -4,9 +4,13 @@ //! Jobs are stored in the database by uuidv7 and processed chronologically. //! Supports partial sending of chunked transactions and recovery +use crate::job::error::JobServiceError; + /// Job related error types pub mod error; /// TODO: RPC API pub mod rpc; /// Core job queue implementation and state management pub mod service; + +type Result = std::result::Result; diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs index 8b13789179..603313cb9a 100644 --- a/crates/bitcoin-da/src/job/rpc.rs +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -1 +1,335 @@ +//! Provides the RPC interface for the bitcoin-da job da. +//! The namespace for these RPC methods is "da" (Data Availability). +//! This module defines methods to interact with bitcoin-da jobs, +//! including cancelling, retrying and listing jobs. +use std::sync::Arc; + +use citrea_common::rpc::utils::internal_rpc_error; +use jsonrpsee::core::RpcResult; +use jsonrpsee::proc_macros::rpc; +use serde::{Deserialize, Serialize}; +use sov_db::ledger_db::DaLedgerOps; + +use super::Result; +use crate::job::service::{DaJobService, Job, JobId, JobProgress, JobStatus}; +use crate::service::BitcoinService; + +/// RPC provider trait for da job da +pub(super) trait DaJobRpcProvider { + /// Cancel a pending or in-progress job by job id + /// + /// # Arguments + /// * `job_id` - The job uuid + /// + /// # Returns + /// * `Ok(())` if the job was successfully cancelled + /// * `Err` if the job doesn't exist, is already completed, or cannot be cancelled + fn cancel_job(&self, job_id: JobId) -> Result<()>; + + /// Retry a failed or cancelled job by creating a new job with the same data + /// + /// # Arguments + /// * `job_id` - The unique identifier of the job to retry + /// + /// # Returns + /// * `Ok(JobId)` - The ID of the newly created retry job + /// * `Err` if the job doesn't exist or is not in a retryable state + fn retry_job(&self, job_id: JobId) -> Result; + + /// List jobs with optional filtering and pagination + /// + /// # Arguments + /// * `filter` - Optional filter criteria for jobs + /// + /// # Returns + /// * `Ok(Vec)` - List of jobs matching the filter criteria + /// * `Err` on database or serialization errors + fn list_jobs(&self, filter: JobListFilter) -> Result>; + + /// Get detailed information about a specific job + /// + /// # Arguments + /// * `job_id` - The unique identifier of the job + /// + /// # Returns + /// * `Ok(JobInfoResponse)` - Detailed information about the job + /// * `Err` on database error + fn get_job_info(&self, job_id: JobId) -> Result<(Job, JobProgress)>; +} + +/// Filter criteria for listing jobs +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct JobListFilter { + /// Optional status filter (e.g., only show "Pending" jobs) + pub status: Option, + /// Maximum number of jobs to return (default: 100, max: 1000) + pub limit: Option, + /// Skip first N jobs (for pagination) + pub offset: Option, +} + +impl Default for JobListFilter { + fn default() -> Self { + Self { + status: None, + limit: Some(100), + offset: None, + } + } +} + +/// Job status filter for RPC queries +#[derive(Default, Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum JobStatusFilter { + /// Only pending jobs + Pending, + /// Only in-progress jobs + InProgress, + /// Only completed jobs + Completed, + /// Only cancelled jobs + Cancelled, + /// Only failed jobs + Failed, + /// All active jobs (Pending + InProgress) + Active, + /// All terminal jobs (Completed + Cancelled + Failed) + Terminal, + /// All jobs + #[default] + All, +} + +impl JobStatusFilter { + /// Convert filter to list of status codes to query + pub(super) fn to_status_codes(&self) -> Vec { + match self { + JobStatusFilter::Pending => vec![JobStatus::Pending.as_u8()], + JobStatusFilter::InProgress => vec![JobStatus::InProgress.as_u8()], + JobStatusFilter::Completed => vec![JobStatus::Completed.as_u8()], + JobStatusFilter::Cancelled => vec![JobStatus::Cancelled.as_u8()], + JobStatusFilter::Failed => { + vec![JobStatus::Failed { + error: String::new(), + } + .as_u8()] + } + JobStatusFilter::Active => { + vec![JobStatus::Pending.as_u8(), JobStatus::InProgress.as_u8()] + } + JobStatusFilter::Terminal => vec![ + JobStatus::Completed.as_u8(), + JobStatus::Cancelled.as_u8(), + JobStatus::Failed { + error: String::new(), + } + .as_u8(), + ], + JobStatusFilter::All => vec![ + JobStatus::Pending.as_u8(), + JobStatus::InProgress.as_u8(), + JobStatus::Completed.as_u8(), + JobStatus::Cancelled.as_u8(), + JobStatus::Failed { + error: String::new(), + } + .as_u8(), + ], + } + } +} + +/// Detailed information about a job for RPC responses +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JobInfoResponse { + /// Unique job identifier + pub job_id: JobId, + /// Current job status + pub status: JobStatus, + /// Job creation timestamp (Unix seconds) + pub created_at: u64, + /// Last update timestamp (Unix seconds) + pub last_updated: u64, + /// Number of transactions already sent + pub sent_count: usize, + /// Error message if job failed + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, +} + +impl JobInfoResponse { + /// Create JobInfoResponse from Job and JobProgress + fn from_job_and_progress((job, progress): (Job, JobProgress)) -> Self { + let error = match &progress.status { + JobStatus::Failed { error } => Some(error.clone()), + _ => None, + }; + + Self { + job_id: job.id, + status: progress.status.clone(), + created_at: job.created_at, + last_updated: progress.last_updated, + sent_count: progress.sent_chunks.count(), + error, + } + } +} + +/// Response for job cancellation +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CancelJobResponse { + /// Whether the job was successfully cancelled + pub success: bool, +} + +/// Response for job retry +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RetryJobResponse { + /// uuid of the newly created retry job + pub new_job_id: JobId, + /// uuid of the original job that was retried + pub original_job_id: JobId, +} + +#[rpc(client, server, namespace = "daJob")] +pub trait DaJobRpc { + /// Cancels a pending or in-progress job. + /// + /// # Arguments + /// * `job_id` - The unique identifier of the job to cancel + /// + /// # Returns + /// * Success response + /// + /// # Errors + /// * Job not found + /// * Job cannot be cancelled (already completed, failed, or cancelled) + #[method(name = "cancel")] + async fn da_job_cancel(&self, job_id: JobId) -> RpcResult; + + /// Retries a failed or cancelled job by creating a new job with the same data. + /// + /// # Arguments + /// * `job_id` - The unique identifier of the job to retry + /// + /// # Returns + /// * Response containing the new job ID + /// + /// # Errors + /// * Job not found + /// * Job is not in a retryable state (pending, in-progress, or completed) + #[method(name = "retry")] + async fn da_job_retry(&self, job_id: JobId) -> RpcResult; + + /// Lists jobs with optional filtering and pagination. + /// + /// # Arguments + /// * `status` - Optional status filter (pending, inProgress, completed, cancelled, failed, active, terminal) + /// * `limit` - Maximum number of jobs to return (default: 100, max: 1000) + /// * `offset` - Number of jobs to skip for pagination (default: 0) + /// + /// # Returns + /// * List of job information matching the filter criteria + #[method(name = "list")] + async fn da_job_list( + &self, + status: Option, + limit: Option, + offset: Option, + ) -> RpcResult>; + + /// Gets detailed information about a specific job. + /// + /// # Arguments + /// * `job_id` - The unique identifier of the job + /// + /// # Returns + /// * Detailed job information including status, timestamps, and progress + /// + /// # Errors + /// * Database error related errors + #[method(name = "get")] + async fn da_job_get_info(&self, job_id: JobId) -> RpcResult; +} + +/// The implementation of the RPC itself. +pub struct DaJobRpcServerImpl { + da: Arc, +} + +impl DaJobRpcServerImpl { + /// Create a new RPC server implementation + pub fn new(da: Arc) -> Self { + Self { da } + } +} + +#[async_trait::async_trait] +impl DaJobRpcServer for DaJobRpcServerImpl { + async fn da_job_cancel(&self, job_id: JobId) -> RpcResult { + self.da + .job_service + .cancel_job(job_id) + .map(|_| CancelJobResponse { success: true }) + .map_err(internal_rpc_error) + } + + async fn da_job_retry(&self, job_id: JobId) -> RpcResult { + self.da + .job_service + .retry_job(job_id) + .map(|new_job_id| RetryJobResponse { + new_job_id, + original_job_id: job_id, + }) + .map_err(internal_rpc_error) + } + + async fn da_job_list( + &self, + status: Option, + limit: Option, + offset: Option, + ) -> RpcResult> { + let filter = JobListFilter { + status, + limit, + offset, + }; + + Ok(self + .da + .job_service + .list_jobs(filter) + .map_err(internal_rpc_error)? + .into_iter() + .map(JobInfoResponse::from_job_and_progress) + .collect()) + } + + async fn da_job_get_info(&self, job_id: JobId) -> RpcResult { + self.da + .job_service + .get_job_info(job_id) + .map_err(internal_rpc_error) + .map(JobInfoResponse::from_job_and_progress) + } +} + +/// Creates a new RPC module for the DA Job da. +/// +/// # Arguments +/// * `da.job_service` - Arc reference to the job da +/// +/// # Returns +/// * JSON-RPC module ready to be merged into the server +pub fn create_rpc_module(da: Arc) -> jsonrpsee::RpcModule { + let server = DaJobRpcServerImpl::new(da); + DaJobRpcServer::into_rpc(server) +} diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 0d6f6cf16a..778ce87cde 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -6,15 +6,15 @@ use sov_db::ledger_db::DaLedgerOps; use tracing::{info, instrument}; use uuid::Uuid; +use super::Result; use crate::helpers::builders::body_builders::RawTxData; use crate::helpers::get_timestamp; use crate::job::error::JobServiceError; +use crate::job::rpc::{DaJobRpcProvider, JobListFilter}; /// Unique job id using uuidv7 for ordering by creation time pub(crate) type JobId = Uuid; -type Result = std::result::Result; - /// Job status representing the current state of transaction processing #[derive(Debug, Clone, Serialize, Deserialize)] pub enum JobStatus { @@ -325,3 +325,91 @@ impl DaJobService { Ok(!in_progress_jobs.is_empty()) } } + +/// Implementation of RPC provider methods +impl DaJobRpcProvider for DaJobService { + fn cancel_job(&self, job_id: JobId) -> Result<()> { + // Get job progress to check status + let mut progress = self + .get_progress(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + + // Only allow cancellation of pending or in-progress jobs + match progress.status { + JobStatus::Pending | JobStatus::InProgress => { + self.update_job_status(&mut progress, JobStatus::Cancelled)?; + tracing::info!("Job {job_id} succesfully cancelled"); + Ok(()) + } + JobStatus::Completed | JobStatus::Cancelled | JobStatus::Failed { .. } => Err( + JobServiceError::JobCancellationFailure(job_id, progress.status), + ), + } + } + + fn retry_job(&self, job_id: JobId) -> Result { + // Get job progress to check status + let progress = self + .get_progress(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + + // Only allow retry of failed or cancelled jobs + match progress.status { + JobStatus::Failed { .. } | JobStatus::Cancelled => { + // Get original job to retrieve raw tx data + let original_job = self + .get_job(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + // Create new job with same data + let new_job = self.submit_job(original_job.data)?; + tracing::info!("Job {job_id} retried as new job {}", new_job.id); + Ok(new_job.id) + } + JobStatus::Pending | JobStatus::InProgress | JobStatus::Completed => { + Err(JobServiceError::JobRetryFailure(job_id, progress.status)) + } + } + } + + fn list_jobs(&self, filter: JobListFilter) -> Result> { + let limit = filter.limit.unwrap_or(25).min(1000); // Defaults to 25, capped at 1000 + let offset = filter.offset.unwrap_or(0); + + // Get job ids based on status filter + let status_filter = filter.status.unwrap_or_default(); + + let mut job_ids = Vec::new(); + for code in status_filter.to_status_codes() { + job_ids.extend(self.ledger_db.get_job_ids_by_status(code)?); + } + job_ids.sort(); // sort chronologically by uuidv7 + + // Apply pagination + // TODO paginate at the db level. This should be sufficient for now as we take/skip on uuid before fetching job info + let job_ids: Vec<_> = job_ids.into_iter().skip(offset).take(limit).collect(); + + // Return (job, progress) per id + let mut job_infos = Vec::new(); + for job_id in job_ids { + if let (Some(job), Some(progress)) = + (self.get_job(&job_id)?, self.get_progress(&job_id)?) + { + job_infos.push((job, progress)); + } + } + + Ok(job_infos) + } + + fn get_job_info(&self, job_id: JobId) -> Result<(Job, JobProgress)> { + let job = self + .get_job(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + + let progress = self + .get_progress(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + + Ok((job, progress)) + } +} diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index ddc66c90aa..3991beba60 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -169,7 +169,7 @@ pub struct BitcoinService { utxo_selection_mode: UtxoSelectionMode, // Persistent job queue - job_service: DaJobService, + pub(crate) job_service: DaJobService, } impl BitcoinService { From b16bda038f76a2fbe550e4e4b512f23ac21c28ad Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Fri, 10 Oct 2025 10:20:27 +0100 Subject: [PATCH 19/81] Cleanup --- bin/citrea/tests/bitcoin/da_queue.rs | 17 ----------------- crates/bitcoin-da/src/job/rpc.rs | 3 +-- 2 files changed, 1 insertion(+), 19 deletions(-) diff --git a/bin/citrea/tests/bitcoin/da_queue.rs b/bin/citrea/tests/bitcoin/da_queue.rs index d760b0f23f..710e6d5e15 100644 --- a/bin/citrea/tests/bitcoin/da_queue.rs +++ b/bin/citrea/tests/bitcoin/da_queue.rs @@ -62,32 +62,26 @@ impl DaTransactionQueueingTest { // Fill mempool for i in 1..=3 { - println!("i : {:?}", i); da_service .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await?; da.wait_mempool_len(8 * i, None).await?; } - println!("22"); da_service .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await?; - println!("223"); // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit // The three first proofs should hit the mempool + 1 chunk da.wait_mempool_len(8 * 3 + 2, None).await?; - println!("33"); assert_eq!(da.get_raw_mempool().await?.len(), 26); - println!("44"); // Assert that all sent txs are monitored let monitored_txs = da_service.monitoring.get_monitored_txs().await; assert_eq!(monitored_txs.len(), 26); - println!("55"); // Try to send when queue is already filled up. // This is to test that utxos is correctly selected and that it's doesn't hang on waiting for list of queued txids to be returned let res = da_service @@ -99,11 +93,8 @@ impl DaTransactionQueueingTest { Err(BitcoinServiceError::PreviousJobInProgress) )); - println!("66"); - da.generate(1).await?; - println!("77"); // We mine the first three proofs + the 1 chunk pair and make sure that the remaining chunks and aggregate // and the extra proof is properly queued and sent on next block when mempool size is freed // Assert that all chunks were mined and mempool space is freed @@ -116,23 +107,19 @@ impl DaTransactionQueueingTest { assert_eq!(relevant_txs.len(), 13); - println!("88"); tokio::time::sleep(std::time::Duration::from_secs(3)).await; // Send additional proof and make sure it doesn't hit PreviousJobInProgress error let res = da_service .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await; - println!("99"); assert!(res.is_ok()); assert_eq!(da.get_raw_mempool().await?.len(), 8 + 6); // Remaining chunks and aggregate + extra queued proof should now hit the mempool da.wait_mempool_len(8 + 6, None).await?; - println!("1010"); assert_eq!(da.get_raw_mempool().await?.len(), 8 + 6); da.generate(1).await?; - println!("1111"); assert_eq!(da.get_raw_mempool().await?.len(), 0); let height = da.get_block_count().await?; @@ -141,9 +128,7 @@ impl DaTransactionQueueingTest { let (relevant_txs, _, _) = da_service.extract_relevant_blobs_with_proof(&block); assert_eq!(relevant_txs.len(), 7); - println!("1212"); da.generate(1).await?; - println!("1313"); Ok(()) } @@ -363,7 +348,6 @@ impl TestCase for DaTransactionQueueingTest { .header .state_root; - println!("1"); self.test_package_mempool_limits( da, &da_service, @@ -375,7 +359,6 @@ impl TestCase for DaTransactionQueueingTest { ) .await?; - println!("2"); self.test_package_too_large( da, &da_service, diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs index 603313cb9a..88e960bf5f 100644 --- a/crates/bitcoin-da/src/job/rpc.rs +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -9,10 +9,9 @@ use citrea_common::rpc::utils::internal_rpc_error; use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; use serde::{Deserialize, Serialize}; -use sov_db::ledger_db::DaLedgerOps; use super::Result; -use crate::job::service::{DaJobService, Job, JobId, JobProgress, JobStatus}; +use crate::job::service::{Job, JobId, JobProgress, JobStatus}; use crate::service::BitcoinService; /// RPC provider trait for da job da From 635a477d61a8a95d8b65f0b25b30348e019a8017 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Fri, 10 Oct 2025 14:00:22 +0100 Subject: [PATCH 20/81] Cleanup database error propagation --- crates/bitcoin-da/src/job/service.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 778ce87cde..f259c86682 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -16,7 +16,7 @@ use crate::job::rpc::{DaJobRpcProvider, JobListFilter}; pub(crate) type JobId = Uuid; /// Job status representing the current state of transaction processing -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub enum JobStatus { /// Job is queued and waiting to be processed Pending, @@ -162,8 +162,7 @@ impl DaJobService { pub(crate) fn get_job(&self, job_id: &JobId) -> Result> { let job = self .ledger_db - .get_job(job_id) - .map_err(JobServiceError::DatabaseError)? + .get_job(job_id)? .map(|v| bincode::deserialize(&v)) .transpose()?; Ok(job) @@ -183,8 +182,7 @@ impl DaJobService { pub(crate) fn get_progress(&self, job_id: &JobId) -> Result> { let progress = self .ledger_db - .get_progress(job_id) - .map_err(JobServiceError::DatabaseError)? + .get_progress(job_id)? .map(|v| bincode::deserialize(&v)) .transpose()?; Ok(progress) @@ -205,7 +203,7 @@ impl DaJobService { .get_job_ids_by_status(JobStatus::InProgress.as_u8())?, ); - // Sort uuidv7 chronogically + // Sort uuidv7 chronologically active_jobs.sort(); Ok(active_jobs) @@ -338,7 +336,7 @@ impl DaJobRpcProvider for DaJobService { match progress.status { JobStatus::Pending | JobStatus::InProgress => { self.update_job_status(&mut progress, JobStatus::Cancelled)?; - tracing::info!("Job {job_id} succesfully cancelled"); + tracing::info!("Job {job_id} successfully cancelled"); Ok(()) } JobStatus::Completed | JobStatus::Cancelled | JobStatus::Failed { .. } => Err( From b16771dd40e489bca949fa51e7ae3f14f82fdc49 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 13 Oct 2025 11:01:16 +0100 Subject: [PATCH 21/81] Persistency test --- bin/citrea/tests/bitcoin/bitcoin_job.rs | 574 ++++++++++++++++++++++++ bin/citrea/tests/bitcoin/mod.rs | 1 + bin/citrea/tests/bitcoin/utils.rs | 75 +++- crates/bitcoin-da/src/job/rpc.rs | 33 +- crates/common/src/rpc/server.rs | 1 + 5 files changed, 653 insertions(+), 31 deletions(-) create mode 100644 bin/citrea/tests/bitcoin/bitcoin_job.rs diff --git a/bin/citrea/tests/bitcoin/bitcoin_job.rs b/bin/citrea/tests/bitcoin/bitcoin_job.rs new file mode 100644 index 0000000000..9354f16fda --- /dev/null +++ b/bin/citrea/tests/bitcoin/bitcoin_job.rs @@ -0,0 +1,574 @@ +use std::sync::Arc; +use std::time::Duration; + +use alloy_primitives::{U32, U64}; +use async_trait::async_trait; +use bitcoin::hashes::Hash; +use bitcoin_da::job::rpc::{DaJobRpcClient, JobInfoResponse, JobStatusFilter, RetryJobResponse}; +use bitcoin_da::job::service::JobStatus; +use bitcoin_da::service::BitcoinService; +use bitcoincore_rpc::RpcApi; +use citrea_e2e::bitcoin::{BitcoinNode, DEFAULT_FINALITY_DEPTH}; +use citrea_e2e::config::{BitcoinConfig, LightClientProverConfig, TestCaseConfig}; +use citrea_e2e::framework::TestFramework; +use citrea_e2e::test_case::{TestCase, TestCaseRunner}; +use citrea_e2e::Result; +use citrea_light_client_prover::rpc::LightClientProverRpcClient; +use jsonrpsee::http_client::HttpClient; +use reth_tasks::TaskManager; +use sov_ledger_rpc::LedgerRpcClient; +use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; +use sov_rollup_interface::services::da::DaService; +use tokio::time::sleep; + +use super::get_citrea_path; +use crate::bitcoin::full_node::create_serialized_fake_receipt_batch_proof_with_state_roots; +use crate::bitcoin::light_client_test::create_random_state_diff; +use crate::bitcoin::utils::spawn_bitcoin_da_prover_service_with_rpc_server; + +struct JobServiceTest { + task_manager: Option, +} + +impl JobServiceTest { + #[allow(clippy::too_many_arguments)] + async fn test_job_lifecycle( + &self, + da: &BitcoinNode, + da_service: &BitcoinService, + da_service_client: &HttpClient, + genesis_state_root: [u8; 32], + batch_proof_method_id: [u32; 8], + finalized_height: u64, + commitment: &SequencerCommitment, + commitment_state_root: [u8; 32], + ) -> Result<()> { + let state_diff = create_random_state_diff(10); + let l1_hash = da.get_block_hash(finalized_height).await?; + + let proof = create_serialized_fake_receipt_batch_proof_with_state_roots( + genesis_state_root, + 20, + batch_proof_method_id, + Some(state_diff), + false, + l1_hash.as_raw_hash().to_byte_array(), + vec![commitment.clone()], + vec![commitment_state_root], + None, + ); + + // Make sure we start with no jobs + let all_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::All), None, None) + .await?; + assert!(all_jobs.is_empty()); + + let job_id = da_service + .send_transaction(DaTxRequest::ZKProof(proof)) + .await?; + + da.wait_mempool_len(2, None).await?; + da.generate(1).await?; + + // Check that job is not active anymore and has been processed + let active_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert!(active_jobs.is_empty()); + + // Check Completed status + let completed_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::Completed), None, None) + .await?; + assert_eq!(completed_jobs.len(), 1); + + let completed_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::Terminal), None, None) + .await?; + assert_eq!(completed_jobs.len(), 1); + + let job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + + assert_eq!(job_by_id.status, JobStatus::Completed); + assert_eq!(job_by_id.sent_count, 1); + assert_eq!(job_by_id.error, None); + + Ok(()) + } + + /// Test job cancellation for in-progress jobs + /// Test job retry for cancelled jobs + #[allow(clippy::too_many_arguments)] + async fn test_job_cancellation_and_retry( + &self, + da: &BitcoinNode, + da_service: &BitcoinService, + da_service_client: &HttpClient, + genesis_state_root: [u8; 32], + batch_proof_method_id: [u32; 8], + finalized_height: u64, + commitment: &SequencerCommitment, + commitment_state_root: [u8; 32], + ) -> Result<()> { + let l1_hash = da.get_block_hash(finalized_height).await?; + + // Create a 400kb proof that will hit mempool limits and get stuck in progress + let state_diff_100kb = create_random_state_diff(400); + let proof = create_serialized_fake_receipt_batch_proof_with_state_roots( + genesis_state_root, + 20, + batch_proof_method_id, + Some(state_diff_100kb), + false, + l1_hash.as_raw_hash().to_byte_array(), + vec![commitment.clone()], + vec![commitment_state_root], + None, + ); + + let job_id = da_service + .send_transaction(DaTxRequest::ZKProof(proof.clone())) + .await?; + + // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit + // The three first proofs should hit the mempool + 1 chunk + da.wait_mempool_len(18, None).await?; + + assert_eq!(da.get_raw_mempool().await?.len(), 18); + + let job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(job_by_id.status, JobStatus::InProgress); + assert_eq!(job_by_id.sent_count, 9); // 9 commit/reveal pair + + // Cancel job + let cancel_job_response = da_service_client.da_job_cancel(job_id).await?; + assert_eq!(cancel_job_response.success, true); + + let job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(job_by_id.status, JobStatus::Cancelled); + + // Mine sent txs + da.generate(1).await?; + + // Make sure job doesn't get processed after freeing space in mempool + let res = da_service + .wait_for_completion(job_id, Some(Duration::from_secs(5))) + .await; + assert!(res.is_err()); + + let retry_job_response: RetryJobResponse = da_service_client.da_job_retry(job_id).await?; + + let old_job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(old_job_by_id.status, JobStatus::Cancelled); + + let new_job_by_id: JobInfoResponse = da_service_client + .da_job_get_info(retry_job_response.new_job_id) + .await?; + assert_eq!(new_job_by_id.status, JobStatus::Pending); + da.generate(1).await?; + + // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit + // The three first proofs should hit the mempool + 1 chunk + da.wait_mempool_len(18, None).await?; + + assert_eq!(da.get_raw_mempool().await?.len(), 18); + + let new_job_by_id: JobInfoResponse = da_service_client + .da_job_get_info(retry_job_response.new_job_id) + .await?; + assert_eq!(new_job_by_id.status, JobStatus::InProgress); + da.generate(1).await?; + + let res = da_service + .wait_for_completion(retry_job_response.new_job_id, None) + .await; + assert!(res.is_ok()); + + let new_job_by_id: JobInfoResponse = da_service_client + .da_job_get_info(retry_job_response.new_job_id) + .await?; + assert_eq!(new_job_by_id.status, JobStatus::Completed); + + Ok(()) + } + + /// Test job listing with various filters and pagination + #[allow(clippy::too_many_arguments)] + async fn test_job_listing( + &self, + da: &BitcoinNode, + da_service: &BitcoinService, + da_service_client: &HttpClient, + genesis_state_root: [u8; 32], + batch_proof_method_id: [u32; 8], + finalized_height: u64, + commitment: &SequencerCommitment, + commitment_state_root: [u8; 32], + ) -> Result<()> { + let state_diff = create_random_state_diff(400); + let l1_hash = da.get_block_hash(finalized_height).await?; + + let proof = create_serialized_fake_receipt_batch_proof_with_state_roots( + genesis_state_root, + 20, + batch_proof_method_id, + Some(state_diff), + false, + l1_hash.as_raw_hash().to_byte_array(), + vec![commitment.clone()], + vec![commitment_state_root], + None, + ); + + // Create multiple jobs to check list handling + let job_id_1 = da_service + .send_transaction(DaTxRequest::ZKProof(proof.clone())) + .await?; + + da.wait_mempool_len(18, None).await?; + + // List all jobs + let all_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::All), None, None) + .await?; + assert!(all_jobs.len() >= 3); + + // List active jobs + let active_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs.len(), 1); + + // List cancelled jobs + let cancelled_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::Cancelled), None, None) + .await?; + assert_eq!(cancelled_jobs.len(), 1); + + // List failed jobs + let failed_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::Failed), None, None) + .await?; + assert_eq!(failed_jobs.len(), 0); + + // Test pagination + let first_page = da_service_client + .da_job_list(Some(JobStatusFilter::All), Some(1), Some(0)) + .await?; + assert_eq!(first_page.len(), 1); + + // Test pagination + let second_page = da_service_client + .da_job_list(Some(JobStatusFilter::All), Some(1), Some(1)) + .await?; + assert_eq!(second_page.len(), 1); + + // Make sure we don't get the same job_id + assert_ne!(first_page[0].job_id, second_page[0].job_id); + + // Verify uuidv7 chronological ordering + assert!(first_page[0].created_at <= second_page[0].created_at,); + + // Test limit + let limited_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::All), Some(2), None) + .await?; + assert_eq!(limited_jobs.len(), 2); + + // Mine all sent txs + da.generate(1).await?; + + let res = da_service.wait_for_completion(job_id_1, None).await; + assert!(res.is_ok()); + + // Verify completed jobs + let completed_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::Completed), None, None) + .await?; + assert_eq!(completed_jobs.len(), 3); + + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + async fn test_job_persistence( + &mut self, + da: &BitcoinNode, + da_service: Arc, + da_service_client: HttpClient, + genesis_state_root: [u8; 32], + batch_proof_method_id: [u32; 8], + finalized_height: u64, + commitment: &SequencerCommitment, + commitment_state_root: [u8; 32], + ) -> Result<()> { + let l1_hash = da.get_block_hash(finalized_height).await?; + let state_diff_400kb = create_random_state_diff(400); + let proof = create_serialized_fake_receipt_batch_proof_with_state_roots( + genesis_state_root, + 20, + batch_proof_method_id, + Some(state_diff_400kb), + false, + l1_hash.as_raw_hash().to_byte_array(), + vec![commitment.clone()], + vec![commitment_state_root], + None, + ); + + let job_id = da_service + .send_transaction(DaTxRequest::ZKProof(proof)) + .await?; + + da.wait_mempool_len(18, None).await?; + assert_eq!(da.get_raw_mempool().await?.len(), 18); + + let job_before: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(job_before.job_id, job_id); + assert_eq!(job_before.status, JobStatus::InProgress); + assert_eq!(job_before.sent_count, 9); + + let active_jobs_before = da_service_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_before.len(), 1); + assert_eq!(active_jobs_before[0].job_id, job_id); + + // Send graceful shutdown to da_service and drop da_service + drop(da_service); + drop(da_service_client); + self.task_manager.take().unwrap().graceful_shutdown(); + sleep(Duration::from_secs(5)).await; + + // Create a new task_manager as previous was consumed + self.task_manager = Some(TaskManager::current()); + let task_executor = self.task_manager.as_ref().unwrap().executor(); + + let (da_service, da_service_client) = spawn_bitcoin_da_prover_service_with_rpc_server( + &task_executor, + &da.config, + Self::test_config().dir, + ) + .await; + + let job_after: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + + assert_eq!(job_after.job_id, job_before.job_id); + assert_eq!(job_after.status, job_before.status); + assert_eq!(job_after.created_at, job_before.created_at); + assert_eq!(job_after.sent_count, job_before.sent_count); + + let active_jobs_after = da_service_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_after.len(), 1); + assert_eq!(active_jobs_after[0].job_id, job_id); + assert_eq!(active_jobs_after[0].status, JobStatus::InProgress); + + da.generate(1).await?; + + da.wait_mempool_len(6, None).await?; + let res = da_service.wait_for_completion(job_id, None).await; + assert!(res.is_ok()); + + let completed_job: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(completed_job.status, JobStatus::Completed); + assert_eq!(completed_job.created_at, job_before.created_at); + assert_eq!(completed_job.error, None); + + let active_jobs_final = da_service_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_final.len(), 0); + + Ok(()) + } +} + +#[async_trait] +impl TestCase for JobServiceTest { + fn test_config() -> TestCaseConfig { + TestCaseConfig { + with_full_node: true, + with_sequencer: true, + with_light_client_prover: true, + ..Default::default() + } + } + + fn bitcoin_config() -> BitcoinConfig { + BitcoinConfig { + extra_args: vec![ + "-persistmempool=0", + "-walletbroadcast=0", + "-limitancestorcount=100", + "-limitdescendantcount=100", + "-fallbackfee=0.00001", + ], + ..Default::default() + } + } + + fn scan_l1_start_height() -> Option { + Some(170) + } + + fn light_client_prover_config() -> LightClientProverConfig { + LightClientProverConfig { + initial_da_height: 171, + ..Default::default() + } + } + + async fn cleanup(self) -> Result<()> { + self.task_manager + .unwrap() + .graceful_shutdown_with_timeout(Duration::from_secs(1)); + Ok(()) + } + + async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { + let task_executor = self.task_manager.as_ref().unwrap().executor(); + let da = f.bitcoin_nodes.get_mut(0).unwrap(); + let sequencer = f.sequencer.as_mut().unwrap(); + let full_node = f.full_node.as_mut().unwrap(); + let light_client_prover = f.light_client_prover.as_mut().unwrap(); + + // Common setup + let (da_service, da_service_client) = spawn_bitcoin_da_prover_service_with_rpc_server( + &task_executor, + &da.config, + Self::test_config().dir, + ) + .await; + + let max_l2_blocks_per_commitment = sequencer.max_l2_blocks_per_commitment(); + + da.generate(DEFAULT_FINALITY_DEPTH).await?; + let finalized_height = da.get_finalized_height(None).await?; + + light_client_prover + .wait_for_l1_height(finalized_height, None) + .await?; + + let lcp = light_client_prover + .client + .http_client() + .get_light_client_proof_by_l1_height(U64::from(finalized_height)) + .await?; + let lcp_output = lcp.unwrap().light_client_proof_output; + + let batch_proof_method_ids = light_client_prover + .client + .http_client() + .get_batch_proof_method_ids() + .await?; + let genesis_state_root = lcp_output.l2_state_root; + + // Generate sequencer commitment + for _ in 0..max_l2_blocks_per_commitment { + sequencer.client.send_publish_batch_request().await?; + } + + da.wait_mempool_len(2, None).await?; + da.generate(DEFAULT_FINALITY_DEPTH).await?; + let finalized_height = da.get_finalized_height(None).await?; + + full_node + .wait_for_l2_height(max_l2_blocks_per_commitment, None) + .await?; + full_node.wait_for_l1_height(finalized_height, None).await?; + + let commitment = full_node + .client + .http_client() + .get_sequencer_commitment_by_index(U32::from(1)) + .await? + .map(|c| SequencerCommitment { + merkle_root: c.merkle_root, + l2_end_block_number: c.l2_end_block_number.to::(), + index: c.index.to::(), + }) + .unwrap(); + + let commitment_state_root = sequencer + .client + .http_client() + .get_l2_block_by_number(U64::from(commitment.l2_end_block_number)) + .await? + .unwrap() + .header + .state_root; + + let batch_proof_method_id: [u32; 8] = batch_proof_method_ids[0].method_id.into(); + + self.test_job_lifecycle( + da, + &da_service, + &da_service_client, + genesis_state_root, + batch_proof_method_id, + finalized_height, + &commitment, + commitment_state_root, + ) + .await?; + + // Clean mempool between each step + da.generate(1).await?; + + self.test_job_cancellation_and_retry( + da, + &da_service, + &da_service_client, + genesis_state_root, + batch_proof_method_id, + finalized_height, + &commitment, + commitment_state_root, + ) + .await?; + + // Clean mempool between each step + da.generate(1).await?; + + self.test_job_listing( + da, + &da_service, + &da_service_client, + genesis_state_root, + batch_proof_method_id, + finalized_height, + &commitment, + commitment_state_root, + ) + .await?; + // Clean mempool between each step + da.generate(1).await?; + + self.test_job_persistence( + da, + da_service, + da_service_client, + genesis_state_root, + batch_proof_method_id, + finalized_height, + &commitment, + commitment_state_root, + ) + .await?; + + Ok(()) + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_bitcoin_job_service() -> Result<()> { + TestCaseRunner::new(JobServiceTest { + task_manager: Some(TaskManager::current()), + }) + .set_citrea_path(get_citrea_path()) + .run() + .await +} diff --git a/bin/citrea/tests/bitcoin/mod.rs b/bin/citrea/tests/bitcoin/mod.rs index 1dd898a183..33a776a668 100644 --- a/bin/citrea/tests/bitcoin/mod.rs +++ b/bin/citrea/tests/bitcoin/mod.rs @@ -11,6 +11,7 @@ pub mod rollback; mod utils; // pub mod mempool_accept; pub mod backup; +pub mod bitcoin_job; pub mod bitcoin_service; pub mod bitcoin_test; pub mod bitcoin_verifier; diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index 03eec6e231..0c9a586b71 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -8,8 +8,10 @@ use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; use anyhow::bail; use bitcoin_da::fee::FeeService; +use bitcoin_da::job::rpc::create_rpc_module as create_da_job_rpc_module; use bitcoin_da::monitoring::{MonitoringConfig, MonitoringService}; use bitcoin_da::network_constants::get_network_constants; +use bitcoin_da::rpc::create_rpc_module as create_da_rpc_module; use bitcoin_da::service::{ network_to_bitcoin_network, BitcoinService, BitcoinServiceConfig, UtxoSelectionMode, }; @@ -17,6 +19,8 @@ use bitcoin_da::spec::block::BitcoinBlock; use bitcoin_da::spec::RollupParams; use bitcoincore_rpc::{Auth, Client, RpcApi}; use citrea_batch_prover::rpc::BatchProverRpcClient; +use citrea_common::rpc::server::start_rpc_server; +use citrea_common::RpcConfig; use citrea_e2e::bitcoin::BitcoinNode; use citrea_e2e::config::BitcoinConfig; use citrea_e2e::node::{BatchProver, FullNode, NodeKind}; @@ -26,6 +30,8 @@ use citrea_light_client_prover::circuit::{ SECURITY_COUNCIL_MEMBER_COUNT, }; use citrea_primitives::{MAX_TX_BODY_SIZE, REVEAL_TX_PREFIX}; +use jsonrpsee::http_client::{HttpClient, HttpClientBuilder}; +use jsonrpsee::RpcModule; use reth_tasks::TaskExecutor; use sov_db::ledger_db::LedgerDB; use sov_db::rocks_db_config::RocksdbConfig; @@ -144,6 +150,59 @@ pub async fn spawn_bitcoin_da_prover_service( .await } +pub async fn spawn_bitcoin_da_prover_service_with_rpc_server( + task_executor: &TaskExecutor, + config: &BitcoinConfig, + dir: PathBuf, +) -> (Arc, HttpClient) { + let service = spawn_bitcoin_da_service( + task_executor, + config, + dir, + DaServiceKeyKind::BatchProver, + REVEAL_TX_PREFIX.to_vec(), + None, + None, + ) + .await; + + let rpc_config = RpcConfig { + bind_host: "127.0.0.1".into(), + bind_port: 0, + max_connections: 100, + max_request_body_size: 10 * 1024 * 1024, + max_response_body_size: 10 * 1024 * 1024, + batch_requests_limit: 50, + enable_subscriptions: true, + max_subscriptions_per_connection: 100, + trace_chain_block_limit: None, + proving_jobs_limit: 100, + timeout: 30, + enable_js_tracer: true, + api_key: None, + }; + + // Add da rpc and da job rpc methods + let mut rpc_methods = RpcModule::new(()); + let da_methods = create_da_rpc_module(service.clone()); + rpc_methods.merge(da_methods).unwrap(); + + let da_methods = create_da_job_rpc_module(service.clone()); + rpc_methods.merge(da_methods).unwrap(); + + let (port_tx, port_rx) = tokio::sync::oneshot::channel(); + start_rpc_server(rpc_config, task_executor, rpc_methods, Some(port_tx)); + + let addr = port_rx.await.unwrap(); + let http_host = format!("http://localhost:{}", addr.port()); + let http_client = HttpClientBuilder::default() + .request_timeout(Duration::from_secs(120)) + .build(http_host) + .unwrap(); + + (service, http_client) +} + #[cfg(feature = "testing")] pub async fn spawn_bitcoin_da_prover_service_with_utxo_selection_mode( task_executor: &TaskExecutor, @@ -165,7 +224,7 @@ pub async fn spawn_bitcoin_da_prover_service_with_utxo_selection_mode( pub async fn spawn_bitcoin_da_service( task_executor: &TaskExecutor, - da_config: &BitcoinConfig, + bitcoin_config: &BitcoinConfig, test_dir: PathBuf, kind: DaServiceKeyKind, reveal_tx_prefix: Vec, @@ -179,9 +238,12 @@ pub async fn spawn_bitcoin_da_service( }; let wallet = wallet.unwrap_or(NodeKind::Bitcoin.to_string()); let da_config = BitcoinServiceConfig { - node_url: format!("http://127.0.0.1:{}/wallet/{}", da_config.rpc_port, wallet), - node_username: da_config.rpc_user.clone(), - node_password: da_config.rpc_password.clone(), + node_url: format!( + "http://127.0.0.1:{}/wallet/{}", + bitcoin_config.rpc_port, wallet + ), + node_username: bitcoin_config.rpc_user.clone(), + node_password: bitcoin_config.rpc_password.clone(), da_private_key: Some(da_private_key), tx_backup_dir: test_dir.join("tx_backup_dir").display().to_string(), monitoring: Some(MonitoringConfig { @@ -232,10 +294,7 @@ pub async fn spawn_bitcoin_da_service( let fee_service = FeeService::new(client.clone(), network, da_config.mempool_space_url.clone()); - let ledger_db_dir = tempfile::TempDir::new() - .expect("Failed to create temporary directory") - .keep(); - let ledger_db_path = ledger_db_dir.join("da_ledger_db"); + let ledger_db_path = bitcoin_config.data_dir.join("da_ledger_db"); let rocksdb_config = RocksdbConfig::new(&ledger_db_path, None, None); let ledger_db = LedgerDB::with_config(&rocksdb_config).unwrap(); diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs index 88e960bf5f..c2365a69f3 100644 --- a/crates/bitcoin-da/src/job/rpc.rs +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -14,7 +14,7 @@ use super::Result; use crate::job::service::{Job, JobId, JobProgress, JobStatus}; use crate::service::BitcoinService; -/// RPC provider trait for da job da +/// RPC provider trait for da job service pub(super) trait DaJobRpcProvider { /// Cancel a pending or in-progress job by job id /// @@ -32,7 +32,7 @@ pub(super) trait DaJobRpcProvider { /// * `job_id` - The unique identifier of the job to retry /// /// # Returns - /// * `Ok(JobId)` - The ID of the newly created retry job + /// * `Ok(JobId)` - The uuid of the newly created retry job /// * `Err` if the job doesn't exist or is not in a retryable state fn retry_job(&self, job_id: JobId) -> Result; @@ -62,7 +62,7 @@ pub(super) trait DaJobRpcProvider { pub struct JobListFilter { /// Optional status filter (e.g., only show "Pending" jobs) pub status: Option, - /// Maximum number of jobs to return (default: 100, max: 1000) + /// Maximum number of jobs to return (default: 25, max: 1000) pub limit: Option, /// Skip first N jobs (for pagination) pub offset: Option, @@ -72,7 +72,7 @@ impl Default for JobListFilter { fn default() -> Self { Self { status: None, - limit: Some(100), + limit: Some(25), offset: None, } } @@ -148,9 +148,9 @@ pub struct JobInfoResponse { pub job_id: JobId, /// Current job status pub status: JobStatus, - /// Job creation timestamp (Unix seconds) + /// Job creation timestamp pub created_at: u64, - /// Last update timestamp (Unix seconds) + /// Last update timestamp pub last_updated: u64, /// Number of transactions already sent pub sent_count: usize, @@ -190,9 +190,9 @@ pub struct CancelJobResponse { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RetryJobResponse { - /// uuid of the newly created retry job + /// Uuid of the newly created retry job pub new_job_id: JobId, - /// uuid of the original job that was retried + /// Uuid of the original job that was retried pub original_job_id: JobId, } @@ -262,13 +262,6 @@ pub struct DaJobRpcServerImpl { da: Arc, } -impl DaJobRpcServerImpl { - /// Create a new RPC server implementation - pub fn new(da: Arc) -> Self { - Self { da } - } -} - #[async_trait::async_trait] impl DaJobRpcServer for DaJobRpcServerImpl { async fn da_job_cancel(&self, job_id: JobId) -> RpcResult { @@ -321,14 +314,8 @@ impl DaJobRpcServer for DaJobRpcServerImpl { } } -/// Creates a new RPC module for the DA Job da. -/// -/// # Arguments -/// * `da.job_service` - Arc reference to the job da -/// -/// # Returns -/// * JSON-RPC module ready to be merged into the server +/// Creates a new module for the bitcoin-da job service RPCs. pub fn create_rpc_module(da: Arc) -> jsonrpsee::RpcModule { - let server = DaJobRpcServerImpl::new(da); + let server = DaJobRpcServerImpl { da }; DaJobRpcServer::into_rpc(server) } diff --git a/crates/common/src/rpc/server.rs b/crates/common/src/rpc/server.rs index 3659ef7ec7..ad52955514 100644 --- a/crates/common/src/rpc/server.rs +++ b/crates/common/src/rpc/server.rs @@ -66,6 +66,7 @@ pub fn start_rpc_server( return; } }; + if let Some(channel) = channel { if let Err(e) = channel.send(bound_address) { error!("Could not send bound_address {}: {}", bound_address, e); From 57f4233a480c41fc468188bb6728da63bfa5c1d9 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 13 Oct 2025 11:27:37 +0100 Subject: [PATCH 22/81] Cleanup --- crates/bitcoin-da/src/job/rpc.rs | 6 +++--- crates/bitcoin-da/src/job/service.rs | 8 ++------ 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs index c2365a69f3..2295da779b 100644 --- a/crates/bitcoin-da/src/job/rpc.rs +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -111,7 +111,7 @@ impl JobStatusFilter { JobStatusFilter::Cancelled => vec![JobStatus::Cancelled.as_u8()], JobStatusFilter::Failed => { vec![JobStatus::Failed { - error: String::new(), + error: Default::default(), } .as_u8()] } @@ -122,7 +122,7 @@ impl JobStatusFilter { JobStatus::Completed.as_u8(), JobStatus::Cancelled.as_u8(), JobStatus::Failed { - error: String::new(), + error: Default::default(), } .as_u8(), ], @@ -132,7 +132,7 @@ impl JobStatusFilter { JobStatus::Completed.as_u8(), JobStatus::Cancelled.as_u8(), JobStatus::Failed { - error: String::new(), + error: Default::default(), } .as_u8(), ], diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index f259c86682..c3a6aa3104 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -152,9 +152,7 @@ impl DaJobService { #[instrument(level = "trace", skip(self))] fn insert_job(&self, job: &Job) -> Result<()> { let value = bincode::serialize(job)?; - self.ledger_db - .insert_job(job.id, value) - .map_err(JobServiceError::DatabaseError) + Ok(self.ledger_db.insert_job(job.id, value)?) } /// Get a job by id @@ -172,9 +170,7 @@ impl DaJobService { #[instrument(level = "trace", skip(self))] pub(crate) fn upsert_progress(&self, progress: &JobProgress) -> Result<()> { let value = bincode::serialize(progress)?; - self.ledger_db - .upsert_progress(&progress.job_id, value) - .map_err(JobServiceError::DatabaseError) + Ok(self.ledger_db.upsert_progress(&progress.job_id, value)?) } /// Retrieve and deserialize job progress by id From 8c3a891c4787fc969d8a2d3e227ac705357c95dc Mon Sep 17 00:00:00 2001 From: Rakan Al-Huneiti Date: Mon, 13 Oct 2025 15:53:45 +0300 Subject: [PATCH 23/81] chore: da jobs schema types (#2966) --- .../src/helpers/builders/body_builders.rs | 2 +- crates/bitcoin-da/src/helpers/mod.rs | 2 + crates/bitcoin-da/src/job/error.rs | 5 +- crates/bitcoin-da/src/job/service.rs | 172 +++++++++++------- crates/bitcoin-da/src/service.rs | 35 ++-- .../full-node/db/sov-db/src/ledger_db/mod.rs | 13 +- .../db/sov-db/src/ledger_db/traits.rs | 9 +- .../full-node/db/sov-db/src/schema/tables.rs | 5 +- .../db/sov-db/src/schema/types/da_jobs.rs | 114 ++++++++++++ .../db/sov-db/src/schema/types/mod.rs | 2 + 10 files changed, 259 insertions(+), 100 deletions(-) create mode 100644 crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs diff --git a/crates/bitcoin-da/src/helpers/builders/body_builders.rs b/crates/bitcoin-da/src/helpers/builders/body_builders.rs index 6c235c1e40..5a08e205f0 100644 --- a/crates/bitcoin-da/src/helpers/builders/body_builders.rs +++ b/crates/bitcoin-da/src/helpers/builders/body_builders.rs @@ -28,7 +28,7 @@ use crate::service::split_proof; use crate::spec::utxo::UTXO; use crate::{REVEAL_OUTPUT_AMOUNT, REVEAL_OUTPUT_THRESHOLD}; -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, borsh::BorshSerialize, borsh::BorshDeserialize)] /// These are real blobs we put on DA. pub(crate) enum RawTxData { /// borsh(DataOnDa::Complete(compress(Proof))) diff --git a/crates/bitcoin-da/src/helpers/mod.rs b/crates/bitcoin-da/src/helpers/mod.rs index 6e58568b66..61a97ed007 100644 --- a/crates/bitcoin-da/src/helpers/mod.rs +++ b/crates/bitcoin-da/src/helpers/mod.rs @@ -2,6 +2,7 @@ //! It includes transaction kind definitions, transaction builders, parsers, and Merkle tree utilities. use core::num::NonZero; +#[cfg(feature = "native")] use std::time::{SystemTime, UNIX_EPOCH}; use bitcoin::consensus::Encodable; @@ -71,6 +72,7 @@ impl TransactionKind { } /// Return UNIX timestamp in seconds +#[cfg(feature = "native")] pub(crate) fn get_timestamp() -> u64 { SystemTime::now() .duration_since(UNIX_EPOCH) diff --git a/crates/bitcoin-da/src/job/error.rs b/crates/bitcoin-da/src/job/error.rs index 9f992d2743..875808415a 100644 --- a/crates/bitcoin-da/src/job/error.rs +++ b/crates/bitcoin-da/src/job/error.rs @@ -1,3 +1,4 @@ +use sov_db::schema::types::da_jobs::JobId; use thiserror::Error; use crate::job::service::{JobId, JobStatus}; @@ -18,8 +19,8 @@ pub enum JobServiceError { NoTransactionsFound(JobId), /// Failed to serialize or deserialize job data - #[error("Job serialization failed: {0}")] - SerializationError(#[from] bincode::Error), + #[error("Job borsh serialization failed: {0}")] + SerializationError(#[from] std::io::Error), /// Database operation failed #[error("Database error: {0}")] diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index c3a6aa3104..b648d7e613 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -3,8 +3,10 @@ use std::time::{Duration, Instant}; use bitcoin::{Transaction, Txid}; use serde::{Deserialize, Serialize}; use sov_db::ledger_db::DaLedgerOps; +use sov_db::schema::types::da_jobs::{ + Job, JobId, JobProgress as DbJobProgress, JobStatus, SentChunks as DbSentChunks, +}; use tracing::{info, instrument}; -use uuid::Uuid; use super::Result; use crate::helpers::builders::body_builders::RawTxData; @@ -12,39 +14,7 @@ use crate::helpers::get_timestamp; use crate::job::error::JobServiceError; use crate::job::rpc::{DaJobRpcProvider, JobListFilter}; -/// Unique job id using uuidv7 for ordering by creation time -pub(crate) type JobId = Uuid; - -/// Job status representing the current state of transaction processing -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub enum JobStatus { - /// Job is queued and waiting to be processed - Pending, - /// Job is in progress - InProgress, - /// Job completed successfully - Completed, - /// Job was cancelled before completion - Cancelled, - /// Job failed with error - Failed { - /// Error associated to the failure - error: String, - }, -} - -impl JobStatus { - /// u8 representation of `JobStatus` - pub fn as_u8(&self) -> u8 { - match self { - JobStatus::Pending => 0, - JobStatus::InProgress => 1, - JobStatus::Completed => 2, - JobStatus::Cancelled => 3, - JobStatus::Failed { .. } => 4, - } - } -} +type Result = std::result::Result; /// Tracks progress of a job including sent transactions for recovery. /// @@ -100,22 +70,72 @@ impl SentChunks { } } -#[derive(Debug, Clone, Serialize, Deserialize)] -pub(crate) struct Job { - /// Job id as uuidv7 - pub id: JobId, - /// Raw job data - pub data: RawTxData, - /// Time of job creation - pub created_at: u64, +impl From for SentChunks { + fn from(db_chunks: DbSentChunks) -> Self { + let commit_txs = db_chunks + .commit_txs + .iter() + .map(|bytes| { + bitcoin::consensus::deserialize(bytes) + .expect("Failed to deserialize commit transaction from database") + }) + .collect(); + + let reveal_txs = db_chunks + .reveal_txs + .iter() + .map(|bytes| { + bitcoin::consensus::deserialize(bytes) + .expect("Failed to deserialize reveal transaction from database") + }) + .collect(); + + Self { + commit_txs, + reveal_txs, + } + } +} + +impl From for DbSentChunks { + fn from(chunks: SentChunks) -> Self { + let commit_txs = chunks + .commit_txs + .iter() + .map(bitcoin::consensus::serialize) + .collect(); + + let reveal_txs = chunks + .reveal_txs + .iter() + .map(bitcoin::consensus::serialize) + .collect(); + + Self { + commit_txs, + reveal_txs, + } + } +} + +impl From for JobProgress { + fn from(db_progress: DbJobProgress) -> Self { + Self { + job_id: db_progress.job_id, + status: db_progress.status, + sent_chunks: db_progress.sent_chunks.into(), + last_updated: db_progress.last_updated, + } + } } -impl Job { - pub(crate) fn new(data: RawTxData) -> Self { +impl From for DbJobProgress { + fn from(progress: JobProgress) -> Self { Self { - id: Uuid::now_v7(), - data, - created_at: get_timestamp(), + job_id: progress.job_id, + status: progress.status, + sent_chunks: progress.sent_chunks.into(), + last_updated: progress.last_updated, } } } @@ -133,11 +153,15 @@ impl DaJobService { /// Create a new job and save to db #[instrument(level = "trace", skip(self), ret)] - pub fn submit_job(&self, raw_tx_data: RawTxData) -> Result { - let job = Job::new(raw_tx_data); - let job_id = job.id; + pub fn submit_job(&self, raw_tx_data: RawTxData) -> Result { + let job_id = uuid::Uuid::now_v7(); + let created_at = get_timestamp(); + + // Serialize RawTxData to Vec + let data = borsh::to_vec(&raw_tx_data)?; - let progress = JobProgress::new(job_id, job.created_at); + let job = Job::new(job_id, data, created_at); + let progress = JobProgress::new(job_id, created_at); self.insert_job(&job)?; self.upsert_progress(&progress)?; @@ -145,43 +169,53 @@ impl DaJobService { .insert_job_status_index(progress.status.as_u8(), job_id)?; info!("Job {job_id} submitted and persisted"); - Ok(job) + Ok(job_id) } /// Save a new job to db #[instrument(level = "trace", skip(self))] fn insert_job(&self, job: &Job) -> Result<()> { - let value = bincode::serialize(job)?; - Ok(self.ledger_db.insert_job(job.id, value)?) + self.ledger_db + .insert_job(job.id, job) + .map_err(JobServiceError::DatabaseError) } - /// Get a job by id + /// Get a job by id, deserializing RawTxData #[instrument(level = "trace", skip(self), ret)] - pub(crate) fn get_job(&self, job_id: &JobId) -> Result> { + pub(crate) fn get_job(&self, job_id: &JobId) -> Result> { let job = self .ledger_db - .get_job(job_id)? - .map(|v| bincode::deserialize(&v)) - .transpose()?; - Ok(job) + .get_job(job_id) + .map_err(JobServiceError::DatabaseError)?; + + match job { + Some(j) => { + let raw_tx_data = borsh::from_slice(&j.data)?; + Ok(Some(raw_tx_data)) + } + None => Ok(None), + } } - /// Upsert job progress after serialization + /// Upsert job progress - convert local JobProgress to DB format #[instrument(level = "trace", skip(self))] pub(crate) fn upsert_progress(&self, progress: &JobProgress) -> Result<()> { - let value = bincode::serialize(progress)?; - Ok(self.ledger_db.upsert_progress(&progress.job_id, value)?) + let db_progress: DbJobProgress = progress.clone().into(); + + self.ledger_db + .upsert_progress(&progress.job_id, &db_progress) + .map_err(JobServiceError::DatabaseError) } - /// Retrieve and deserialize job progress by id + /// Retrieve job progress by id and convert to local format #[instrument(level = "trace", skip(self), ret)] pub(crate) fn get_progress(&self, job_id: &JobId) -> Result> { - let progress = self + let db_progress = self .ledger_db - .get_progress(job_id)? - .map(|v| bincode::deserialize(&v)) - .transpose()?; - Ok(progress) + .get_progress(job_id) + .map_err(JobServiceError::DatabaseError)?; + + Ok(db_progress.map(|p| p.into())) } /// Get all `Pending` and `InProgress` job ids from storage diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 3991beba60..480d31179d 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -29,6 +29,7 @@ use lru::LruCache; use reth_tasks::shutdown::GracefulShutdown; use serde::{Deserialize, Serialize}; use sov_db::ledger_db::LedgerDB; +use sov_db::schema::types::da_jobs::{JobId, JobStatus}; use sov_rollup_interface::da::{DaSpec, DaTxRequest, DataOnDa, SequencerCommitment}; use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::zk::Proof; @@ -46,7 +47,7 @@ use crate::helpers::builders::TxWithId; use crate::helpers::merkle_tree::BitcoinMerkleTree; use crate::helpers::parsers::{parse_relevant_transaction, ParsedTransaction, VerifyParsed}; use crate::helpers::{merkle_tree, TransactionKind}; -use crate::job::service::{DaJobService, Job, JobId, JobProgress, JobStatus, SentChunks}; +use crate::job::service::{DaJobService, JobProgress, SentChunks}; use crate::metrics::BITCOIN_DA_METRICS as BM; use crate::monitoring::{MonitoredTxKind, MonitoringConfig, MonitoringService, TxStatus}; use crate::network_constants::NetworkConstants; @@ -292,26 +293,28 @@ impl BitcoinService { let mut jobs_to_process = Vec::new(); for job_id in active_job_ids { - if let Some(job) = self.job_service.get_job(&job_id)? { + if let Some(job_data) = self.job_service.get_job(&job_id)? { if let Some(progress) = self.job_service.get_progress(&job_id)? { - jobs_to_process.push((job, progress)); + // get_progress returns LocalJobProgress directly + jobs_to_process.push((job_data, progress)); } } } - for (job, mut progress) in jobs_to_process { - info!("Processing job {}", job.id); + for (job_data, mut progress) in jobs_to_process { + let job_id = progress.job_id; + info!("Processing job {}", job_id); - match self.process_job(&job, &mut progress).await { + match self.process_job(&job_data, &mut progress).await { Ok(completed) => { if completed { - info!("Job {} completed successfully", job.id); + info!("Job {} completed successfully", job_id); } else { - info!("Job {} partially sent", job.id); + info!("Job {} partially sent", job_id); } } Err(e) => { - error!("Error processing job {}: {:?}", job.id, e); + error!("Error processing job {}: {:?}", job_id, e); self.job_service.update_job_status( &mut progress, JobStatus::Failed { @@ -325,10 +328,10 @@ impl BitcoinService { Ok(()) } - async fn process_job(&self, job: &Job, progress: &mut JobProgress) -> Result { + async fn process_job(&self, job_data: &RawTxData, progress: &mut JobProgress) -> Result { info!( "Processing job {} with status {:?}", - job.id, progress.status + progress.job_id, progress.status ); // get all available utxos @@ -347,7 +350,7 @@ impl BitcoinService { fee_sat_per_vbyte, utxos.clone(), prev_utxo.clone(), - job.data.clone(), + job_data.clone(), progress.sent_chunks.clone(), ) .await?; @@ -422,7 +425,7 @@ impl BitcoinService { self.job_service .update_job_status(progress, JobStatus::Completed)?; - info!("Job {} marked as completed", job.id); + info!("Job {} marked as completed", progress.job_id); } else if sent_count > 0 { // Job partially sent self.job_service @@ -430,7 +433,7 @@ impl BitcoinService { info!( "Job {} progress recorded: {}/{} transactions sent", - job.id, total_sent, total_needed + progress.job_id, total_sent, total_needed ); } @@ -1261,11 +1264,11 @@ impl DaService for BitcoinService { } } - let job = self.job_service.submit_job(tx_request.try_into()?)?; + let job_id = self.job_service.submit_job(tx_request.try_into()?)?; self.process_job_service().await?; - Ok(job.id) + Ok(job_id) } async fn wait_for_completion( diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs index 1eb018fdb6..63fdeedfd0 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs @@ -28,6 +28,7 @@ use crate::schema::tables::{ use crate::schema::types::batch_proof::{ StoredBatchProof, StoredBatchProofOutput, StoredVerifiedProof, }; +use crate::schema::types::da_jobs::{Job, JobProgress}; use crate::schema::types::job_status::JobStatus; use crate::schema::types::l2_block::{StoredL2Block, StoredTransaction}; use crate::schema::types::light_client_proof::{ @@ -973,25 +974,25 @@ impl ForkMigration for LedgerDB { } impl DaLedgerOps for LedgerDB { - fn insert_job(&self, job_id: Uuid, job: Vec) -> anyhow::Result<()> { + fn insert_job(&self, job_id: Uuid, job: &Job) -> anyhow::Result<()> { let mut batch = SchemaBatch::new(); - batch.put::(&job_id, &job)?; + batch.put::(&job_id, job)?; self.db.write_schemas(batch)?; Ok(()) } - fn get_job(&self, job_id: &Uuid) -> anyhow::Result>> { + fn get_job(&self, job_id: &Uuid) -> anyhow::Result> { self.db.get::(job_id) } - fn upsert_progress(&self, job_id: &Uuid, progress: Vec) -> anyhow::Result<()> { + fn upsert_progress(&self, job_id: &Uuid, progress: &JobProgress) -> anyhow::Result<()> { let mut batch = SchemaBatch::new(); - batch.put::(job_id, &progress)?; + batch.put::(job_id, progress)?; self.db.write_schemas(batch)?; Ok(()) } - fn get_progress(&self, job_id: &Uuid) -> anyhow::Result>> { + fn get_progress(&self, job_id: &Uuid) -> anyhow::Result> { self.db.get::(job_id) } diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs index 7c666459ee..2d57272039 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs @@ -12,6 +12,7 @@ use uuid::Uuid; use crate::schema::tables::{PendingProofs, PendingSequencerCommitments}; use crate::schema::types::batch_proof::{StoredBatchProof, StoredBatchProofOutput}; +use crate::schema::types::da_jobs::{Job, JobProgress}; use crate::schema::types::job_status::JobStatus; use crate::schema::types::l2_block::StoredL2Block; use crate::schema::types::light_client_proof::{ @@ -329,15 +330,15 @@ pub trait SequencerLedgerOps: SharedLedgerOps { pub trait DaLedgerOps { /// DaJobById related methods /// Insert a DA job by id - fn insert_job(&self, job_id: Uuid, job: Vec) -> Result<()>; + fn insert_job(&self, job_id: Uuid, job: &Job) -> Result<()>; /// Get a DA job by id - fn get_job(&self, job_id: &Uuid) -> Result>>; + fn get_job(&self, job_id: &Uuid) -> Result>; /// DaJobProgressById related methods /// Update a DA job progress by id - fn upsert_progress(&self, job_id: &Uuid, progress: Vec) -> Result<()>; + fn upsert_progress(&self, job_id: &Uuid, progress: &JobProgress) -> Result<()>; /// Get a DA job progress by id - fn get_progress(&self, job_id: &Uuid) -> Result>>; + fn get_progress(&self, job_id: &Uuid) -> Result>; /// DaJobStatusIndex related methods /// Insert a job status index entry diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs index dc9a618da1..e510fc0950 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs @@ -20,6 +20,7 @@ use sov_schema_db::{CodecError, SeekKeyEncoder}; use uuid::Uuid; use super::types::batch_proof::{StoredBatchProof, StoredVerifiedProof}; +use super::types::da_jobs::{Job, JobProgress}; use super::types::l2_block::StoredL2Block; use super::types::light_client_proof::StoredLightClientProof; use super::types::{ @@ -504,12 +505,12 @@ define_table_with_seek_key_codec!( define_table_with_seek_key_codec!( /// Da job by uuid - (DaJobById) Uuid => Vec + (DaJobById) Uuid => Job ); define_table_with_seek_key_codec!( /// Da job progress by uuid - (DaJobProgressById) Uuid => Vec + (DaJobProgressById) Uuid => JobProgress ); define_table_with_seek_key_codec!( diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs new file mode 100644 index 0000000000..840bf8b437 --- /dev/null +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs @@ -0,0 +1,114 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +/// Unique job id using uuidv7 for ordering by creation time +pub type JobId = Uuid; + +/// Job status representing the current state of transaction processing +#[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +pub enum JobStatus { + /// Job is queued and waiting to be processed + Pending, + /// Job is in progress + InProgress, + /// Job completed successfully + Completed, + /// Job was cancelled before completion + Cancelled, + /// Job failed with error + Failed { + /// Error associated to the failure + error: String, + }, +} + +impl JobStatus { + /// u8 representation of `JobStatus` + pub fn as_u8(&self) -> u8 { + match self { + JobStatus::Pending => 0, + JobStatus::InProgress => 1, + JobStatus::Completed => 2, + JobStatus::Cancelled => 3, + JobStatus::Failed { .. } => 4, + } + } +} + +/// Track sent chunk for partial sending and recovery +#[derive(Debug, Default, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +pub struct SentChunks { + /// Sent commit txs (serialized bitcoin::Transaction) + pub commit_txs: Vec>, + /// Sent reveal txs (serialized bitcoin::Transaction) + pub reveal_txs: Vec>, +} + +impl SentChunks { + /// Return a default SentChunk with empty vectors + pub fn new() -> Self { + Self::default() + } + + /// Return the number of sent chunks + pub fn count(&self) -> usize { + self.reveal_txs.len() + } + + /// Extend with sent commit and reveal chunks + pub fn extend(&mut self, commits: Vec>, reveals: Vec>) { + self.commit_txs.extend(commits); + self.reveal_txs.extend(reveals); + } +} + +/// Tracks progress of a job including sent transactions for recovery. +/// +/// This state is persisted to the database and updated as transactions +/// are sent to bitcoin da. +#[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +pub struct JobProgress { + /// Job id as uuidv7 + pub job_id: JobId, + /// Current job status + pub status: JobStatus, + /// Partially sent commit/reveal chunks for partial sending and recovery + pub sent_chunks: SentChunks, + /// Last update timestamp + pub last_updated: u64, +} + +impl JobProgress { + /// Create a new JobProgress with pending status + pub fn new(job_id: JobId, last_updated: u64) -> Self { + Self { + job_id, + status: JobStatus::Pending, + sent_chunks: SentChunks::new(), + last_updated, + } + } +} + +/// DA Job representing a transaction to be sent to the DA layer +#[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +pub struct Job { + /// Job id as uuidv7 + pub id: JobId, + /// Raw job data (serialized RawTxData) + pub data: Vec, + /// Time of job creation + pub created_at: u64, +} + +impl Job { + /// Create a new job with the given serialized data + pub fn new(id: JobId, data: Vec, created_at: u64) -> Self { + Self { + id, + data, + created_at, + } + } +} diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/mod.rs index a3f84e8cd2..1387fa39c1 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/mod.rs @@ -3,6 +3,8 @@ use sov_rollup_interface::zk::{Proof, ReceiptType}; /// Batch proof related storage types pub mod batch_proof; +/// DA job related storage types +pub mod da_jobs; /// Job status pub mod job_status; /// L2 block related storage types From eef70f629d1880241de8f43964c2e6c572e7ba81 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Mon, 13 Oct 2025 16:00:50 +0300 Subject: [PATCH 24/81] Fix clippy issues --- crates/bitcoin-da/src/job/error.rs | 4 +-- crates/bitcoin-da/src/job/rpc.rs | 3 +- crates/bitcoin-da/src/job/service.rs | 34 +++++++------------ crates/bitcoin-da/src/service.rs | 10 ++++-- .../db/sov-db/src/schema/types/da_jobs.rs | 23 ------------- 5 files changed, 23 insertions(+), 51 deletions(-) diff --git a/crates/bitcoin-da/src/job/error.rs b/crates/bitcoin-da/src/job/error.rs index 875808415a..cd0794b6e7 100644 --- a/crates/bitcoin-da/src/job/error.rs +++ b/crates/bitcoin-da/src/job/error.rs @@ -1,8 +1,6 @@ -use sov_db::schema::types::da_jobs::JobId; +use sov_db::schema::types::da_jobs::{JobId, JobStatus}; use thiserror::Error; -use crate::job::service::{JobId, JobStatus}; - /// Job errors #[derive(Error, Debug)] pub enum JobServiceError { diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs index 2295da779b..dd09d7bd5c 100644 --- a/crates/bitcoin-da/src/job/rpc.rs +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -9,9 +9,10 @@ use citrea_common::rpc::utils::internal_rpc_error; use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; use serde::{Deserialize, Serialize}; +use sov_db::schema::types::da_jobs::Job; use super::Result; -use crate::job::service::{Job, JobId, JobProgress, JobStatus}; +use crate::job::service::{JobId, JobProgress, JobStatus}; use crate::service::BitcoinService; /// RPC provider trait for da job service diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index b648d7e613..7ac07c69d0 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -3,12 +3,10 @@ use std::time::{Duration, Instant}; use bitcoin::{Transaction, Txid}; use serde::{Deserialize, Serialize}; use sov_db::ledger_db::DaLedgerOps; -use sov_db::schema::types::da_jobs::{ - Job, JobId, JobProgress as DbJobProgress, JobStatus, SentChunks as DbSentChunks, -}; +pub use sov_db::schema::types::da_jobs::{Job, JobId, JobStatus}; +use sov_db::schema::types::da_jobs::{JobProgress as DbJobProgress, SentChunks as DbSentChunks}; use tracing::{info, instrument}; -use super::Result; use crate::helpers::builders::body_builders::RawTxData; use crate::helpers::get_timestamp; use crate::job::error::JobServiceError; @@ -180,21 +178,12 @@ impl DaJobService { .map_err(JobServiceError::DatabaseError) } - /// Get a job by id, deserializing RawTxData + /// Get a job by id #[instrument(level = "trace", skip(self), ret)] - pub(crate) fn get_job(&self, job_id: &JobId) -> Result> { - let job = self - .ledger_db + pub(crate) fn get_job(&self, job_id: &JobId) -> Result> { + self.ledger_db .get_job(job_id) - .map_err(JobServiceError::DatabaseError)?; - - match job { - Some(j) => { - let raw_tx_data = borsh::from_slice(&j.data)?; - Ok(Some(raw_tx_data)) - } - None => Ok(None), - } + .map_err(JobServiceError::DatabaseError) } /// Upsert job progress - convert local JobProgress to DB format @@ -384,14 +373,17 @@ impl DaJobRpcProvider for DaJobService { // Only allow retry of failed or cancelled jobs match progress.status { JobStatus::Failed { .. } | JobStatus::Cancelled => { - // Get original job to retrieve raw tx data + // Get original job and deserialize data let original_job = self .get_job(&job_id)? .ok_or(JobServiceError::JobNotFound(job_id))?; + + let raw_data: RawTxData = borsh::from_slice(&original_job.data)?; + // Create new job with same data - let new_job = self.submit_job(original_job.data)?; - tracing::info!("Job {job_id} retried as new job {}", new_job.id); - Ok(new_job.id) + let new_job_id = self.submit_job(raw_data)?; + tracing::info!("Job {job_id} retried as new job {new_job_id}"); + Ok(new_job_id) } JobStatus::Pending | JobStatus::InProgress | JobStatus::Completed => { Err(JobServiceError::JobRetryFailure(job_id, progress.status)) diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 480d31179d..6714be8e49 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -47,6 +47,7 @@ use crate::helpers::builders::TxWithId; use crate::helpers::merkle_tree::BitcoinMerkleTree; use crate::helpers::parsers::{parse_relevant_transaction, ParsedTransaction, VerifyParsed}; use crate::helpers::{merkle_tree, TransactionKind}; +use crate::job::error::JobServiceError; use crate::job::service::{DaJobService, JobProgress, SentChunks}; use crate::metrics::BITCOIN_DA_METRICS as BM; use crate::monitoring::{MonitoredTxKind, MonitoringConfig, MonitoringService, TxStatus}; @@ -293,10 +294,13 @@ impl BitcoinService { let mut jobs_to_process = Vec::new(); for job_id in active_job_ids { - if let Some(job_data) = self.job_service.get_job(&job_id)? { + if let Some(job) = self.job_service.get_job(&job_id)? { if let Some(progress) = self.job_service.get_progress(&job_id)? { - // get_progress returns LocalJobProgress directly - jobs_to_process.push((job_data, progress)); + // Deserialize RawTxData from job + let raw_data: RawTxData = borsh::from_slice(&job.data) + .map_err(JobServiceError::SerializationError)?; + + jobs_to_process.push((raw_data, progress)); } } } diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs index 840bf8b437..9ce9f418e4 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs @@ -50,17 +50,6 @@ impl SentChunks { pub fn new() -> Self { Self::default() } - - /// Return the number of sent chunks - pub fn count(&self) -> usize { - self.reveal_txs.len() - } - - /// Extend with sent commit and reveal chunks - pub fn extend(&mut self, commits: Vec>, reveals: Vec>) { - self.commit_txs.extend(commits); - self.reveal_txs.extend(reveals); - } } /// Tracks progress of a job including sent transactions for recovery. @@ -79,18 +68,6 @@ pub struct JobProgress { pub last_updated: u64, } -impl JobProgress { - /// Create a new JobProgress with pending status - pub fn new(job_id: JobId, last_updated: u64) -> Self { - Self { - job_id, - status: JobStatus::Pending, - sent_chunks: SentChunks::new(), - last_updated, - } - } -} - /// DA Job representing a transaction to be sent to the DA layer #[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct Job { From aaeaa616a32d6193c43187bfe21dd341d65b3982 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 13 Oct 2025 14:11:54 +0100 Subject: [PATCH 25/81] Remove job wrapper in RPCs --- bin/citrea/tests/bitcoin/bitcoin_job.rs | 2 +- crates/bitcoin-da/src/job/rpc.rs | 27 +++++++++---------- crates/bitcoin-da/src/job/service.rs | 24 +++++------------ .../db/sov-db/src/schema/types/da_jobs.rs | 2 +- 4 files changed, 22 insertions(+), 33 deletions(-) diff --git a/bin/citrea/tests/bitcoin/bitcoin_job.rs b/bin/citrea/tests/bitcoin/bitcoin_job.rs index 9354f16fda..4c92d1940f 100644 --- a/bin/citrea/tests/bitcoin/bitcoin_job.rs +++ b/bin/citrea/tests/bitcoin/bitcoin_job.rs @@ -143,7 +143,7 @@ impl JobServiceTest { // Cancel job let cancel_job_response = da_service_client.da_job_cancel(job_id).await?; - assert_eq!(cancel_job_response.success, true); + assert!(cancel_job_response.success); let job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; assert_eq!(job_by_id.status, JobStatus::Cancelled); diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs index dd09d7bd5c..0359c00ed1 100644 --- a/crates/bitcoin-da/src/job/rpc.rs +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -9,7 +9,6 @@ use citrea_common::rpc::utils::internal_rpc_error; use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; use serde::{Deserialize, Serialize}; -use sov_db::schema::types::da_jobs::Job; use super::Result; use crate::job::service::{JobId, JobProgress, JobStatus}; @@ -45,7 +44,7 @@ pub(super) trait DaJobRpcProvider { /// # Returns /// * `Ok(Vec)` - List of jobs matching the filter criteria /// * `Err` on database or serialization errors - fn list_jobs(&self, filter: JobListFilter) -> Result>; + fn list_jobs(&self, filter: JobListFilter) -> Result>; /// Get detailed information about a specific job /// @@ -55,7 +54,7 @@ pub(super) trait DaJobRpcProvider { /// # Returns /// * `Ok(JobInfoResponse)` - Detailed information about the job /// * `Err` on database error - fn get_job_info(&self, job_id: JobId) -> Result<(Job, JobProgress)>; + fn get_job_info(&self, job_id: JobId) -> Result; } /// Filter criteria for listing jobs @@ -160,20 +159,20 @@ pub struct JobInfoResponse { pub error: Option, } -impl JobInfoResponse { - /// Create JobInfoResponse from Job and JobProgress - fn from_job_and_progress((job, progress): (Job, JobProgress)) -> Self { - let error = match &progress.status { +impl From for JobInfoResponse { + fn from(value: JobProgress) -> Self { + let error = match &value.status { JobStatus::Failed { error } => Some(error.clone()), _ => None, }; + let created_at = value.job_id.get_timestamp().map_or(0, |ts| ts.to_unix().0); Self { - job_id: job.id, - status: progress.status.clone(), - created_at: job.created_at, - last_updated: progress.last_updated, - sent_count: progress.sent_chunks.count(), + job_id: value.job_id, + status: value.status.clone(), + created_at, + last_updated: value.last_updated, + sent_count: value.sent_chunks.count(), error, } } @@ -302,7 +301,7 @@ impl DaJobRpcServer for DaJobRpcServerImpl { .list_jobs(filter) .map_err(internal_rpc_error)? .into_iter() - .map(JobInfoResponse::from_job_and_progress) + .map(Into::into) .collect()) } @@ -311,7 +310,7 @@ impl DaJobRpcServer for DaJobRpcServerImpl { .job_service .get_job_info(job_id) .map_err(internal_rpc_error) - .map(JobInfoResponse::from_job_and_progress) + .map(Into::into) } } diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 7ac07c69d0..f18339ae9a 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -7,13 +7,12 @@ pub use sov_db::schema::types::da_jobs::{Job, JobId, JobStatus}; use sov_db::schema::types::da_jobs::{JobProgress as DbJobProgress, SentChunks as DbSentChunks}; use tracing::{info, instrument}; +use super::Result; use crate::helpers::builders::body_builders::RawTxData; use crate::helpers::get_timestamp; use crate::job::error::JobServiceError; use crate::job::rpc::{DaJobRpcProvider, JobListFilter}; -type Result = std::result::Result; - /// Tracks progress of a job including sent transactions for recovery. /// /// This state is persisted to the database and updated as transactions @@ -391,7 +390,7 @@ impl DaJobRpcProvider for DaJobService { } } - fn list_jobs(&self, filter: JobListFilter) -> Result> { + fn list_jobs(&self, filter: JobListFilter) -> Result> { let limit = filter.limit.unwrap_or(25).min(1000); // Defaults to 25, capped at 1000 let offset = filter.offset.unwrap_or(0); @@ -411,25 +410,16 @@ impl DaJobRpcProvider for DaJobService { // Return (job, progress) per id let mut job_infos = Vec::new(); for job_id in job_ids { - if let (Some(job), Some(progress)) = - (self.get_job(&job_id)?, self.get_progress(&job_id)?) - { - job_infos.push((job, progress)); + if let Some(progress) = self.get_progress(&job_id)? { + job_infos.push(progress); } } Ok(job_infos) } - fn get_job_info(&self, job_id: JobId) -> Result<(Job, JobProgress)> { - let job = self - .get_job(&job_id)? - .ok_or(JobServiceError::JobNotFound(job_id))?; - - let progress = self - .get_progress(&job_id)? - .ok_or(JobServiceError::JobNotFound(job_id))?; - - Ok((job, progress)) + fn get_job_info(&self, job_id: JobId) -> Result { + self.get_progress(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id)) } } diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs index 9ce9f418e4..574d74aafc 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs @@ -6,7 +6,7 @@ use uuid::Uuid; pub type JobId = Uuid; /// Job status representing the current state of transaction processing -#[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize, PartialEq)] pub enum JobStatus { /// Job is queued and waiting to be processed Pending, From 1d582a6b99498ab1e6bc6c2adcaeb26a533c796e Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 13 Oct 2025 15:55:15 +0100 Subject: [PATCH 26/81] Keep writes atomic --- crates/bitcoin-da/src/job/rpc.rs | 33 +++++----- crates/bitcoin-da/src/job/service.rs | 65 +++++-------------- .../full-node/db/sov-db/src/ledger_db/mod.rs | 45 +++++++------ .../db/sov-db/src/ledger_db/traits.rs | 18 ++--- .../full-node/db/sov-db/src/schema/tables.rs | 3 +- .../db/sov-db/src/schema/types/da_jobs.rs | 15 +---- 6 files changed, 67 insertions(+), 112 deletions(-) diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs index 0359c00ed1..555b626fa6 100644 --- a/crates/bitcoin-da/src/job/rpc.rs +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -103,38 +103,35 @@ pub enum JobStatusFilter { impl JobStatusFilter { /// Convert filter to list of status codes to query - pub(super) fn to_status_codes(&self) -> Vec { + pub(super) fn to_job_status(&self) -> Vec { match self { - JobStatusFilter::Pending => vec![JobStatus::Pending.as_u8()], - JobStatusFilter::InProgress => vec![JobStatus::InProgress.as_u8()], - JobStatusFilter::Completed => vec![JobStatus::Completed.as_u8()], - JobStatusFilter::Cancelled => vec![JobStatus::Cancelled.as_u8()], + JobStatusFilter::Pending => vec![JobStatus::Pending], + JobStatusFilter::InProgress => vec![JobStatus::InProgress], + JobStatusFilter::Completed => vec![JobStatus::Completed], + JobStatusFilter::Cancelled => vec![JobStatus::Cancelled], JobStatusFilter::Failed => { vec![JobStatus::Failed { error: Default::default(), - } - .as_u8()] + }] } JobStatusFilter::Active => { - vec![JobStatus::Pending.as_u8(), JobStatus::InProgress.as_u8()] + vec![JobStatus::Pending, JobStatus::InProgress] } JobStatusFilter::Terminal => vec![ - JobStatus::Completed.as_u8(), - JobStatus::Cancelled.as_u8(), + JobStatus::Completed, + JobStatus::Cancelled, JobStatus::Failed { error: Default::default(), - } - .as_u8(), + }, ], JobStatusFilter::All => vec![ - JobStatus::Pending.as_u8(), - JobStatus::InProgress.as_u8(), - JobStatus::Completed.as_u8(), - JobStatus::Cancelled.as_u8(), + JobStatus::Pending, + JobStatus::InProgress, + JobStatus::Completed, + JobStatus::Cancelled, JobStatus::Failed { error: Default::default(), - } - .as_u8(), + }, ], } } diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index f18339ae9a..0b103e60fb 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -160,23 +160,12 @@ impl DaJobService { let job = Job::new(job_id, data, created_at); let progress = JobProgress::new(job_id, created_at); - self.insert_job(&job)?; - self.upsert_progress(&progress)?; - self.ledger_db - .insert_job_status_index(progress.status.as_u8(), job_id)?; + self.ledger_db.submit_job(&job, &progress.into())?; info!("Job {job_id} submitted and persisted"); Ok(job_id) } - /// Save a new job to db - #[instrument(level = "trace", skip(self))] - fn insert_job(&self, job: &Job) -> Result<()> { - self.ledger_db - .insert_job(job.id, job) - .map_err(JobServiceError::DatabaseError) - } - /// Get a job by id #[instrument(level = "trace", skip(self), ret)] pub(crate) fn get_job(&self, job_id: &JobId) -> Result> { @@ -185,25 +174,13 @@ impl DaJobService { .map_err(JobServiceError::DatabaseError) } - /// Upsert job progress - convert local JobProgress to DB format - #[instrument(level = "trace", skip(self))] - pub(crate) fn upsert_progress(&self, progress: &JobProgress) -> Result<()> { - let db_progress: DbJobProgress = progress.clone().into(); - - self.ledger_db - .upsert_progress(&progress.job_id, &db_progress) - .map_err(JobServiceError::DatabaseError) - } - /// Retrieve job progress by id and convert to local format #[instrument(level = "trace", skip(self), ret)] pub(crate) fn get_progress(&self, job_id: &JobId) -> Result> { - let db_progress = self - .ledger_db + self.ledger_db .get_progress(job_id) - .map_err(JobServiceError::DatabaseError)?; - - Ok(db_progress.map(|p| p.into())) + .map_err(JobServiceError::DatabaseError) + .map(|opt| opt.map(Into::into)) } /// Get all `Pending` and `InProgress` job ids from storage @@ -211,14 +188,11 @@ impl DaJobService { pub(crate) fn get_all_active_job_ids(&self) -> Result> { let mut active_jobs = Vec::new(); - active_jobs.extend( - self.ledger_db - .get_job_ids_by_status(JobStatus::Pending.as_u8())?, - ); + active_jobs.extend(self.ledger_db.get_job_ids_by_status(JobStatus::Pending)?); active_jobs.extend( self.ledger_db - .get_job_ids_by_status(JobStatus::InProgress.as_u8())?, + .get_job_ids_by_status(JobStatus::InProgress)?, ); // Sort uuidv7 chronologically @@ -229,22 +203,19 @@ impl DaJobService { /// Update job status by id #[instrument(level = "debug", skip(self))] - pub fn update_job_status(&self, progress: &mut JobProgress, status: JobStatus) -> Result<()> { - let old_status = progress.status.as_u8(); - let new_status = status.as_u8(); + pub fn update_job_status( + &self, + progress: &mut JobProgress, + new_status: JobStatus, + ) -> Result<()> { + let previous_status = progress.status.clone(); - progress.status = status; + progress.status = new_status; progress.last_updated = get_timestamp(); - self.upsert_progress(progress)?; - - // Update status indexing - if old_status != new_status { - self.ledger_db - .remove_job_status_index(old_status, progress.job_id)?; - self.ledger_db - .insert_job_status_index(new_status, progress.job_id)?; - } + let db_progress = progress.clone().into(); + self.ledger_db + .upsert_progress(&db_progress, previous_status)?; Ok(()) } @@ -336,7 +307,7 @@ impl DaJobService { pub async fn has_job_in_progress(&self) -> Result { let in_progress_jobs = self .ledger_db - .get_job_ids_by_status(JobStatus::InProgress.as_u8())?; + .get_job_ids_by_status(JobStatus::InProgress)?; Ok(!in_progress_jobs.is_empty()) } @@ -398,7 +369,7 @@ impl DaJobRpcProvider for DaJobService { let status_filter = filter.status.unwrap_or_default(); let mut job_ids = Vec::new(); - for code in status_filter.to_status_codes() { + for code in status_filter.to_job_status() { job_ids.extend(self.ledger_db.get_job_ids_by_status(code)?); } job_ids.sort(); // sort chronologically by uuidv7 diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs index 63fdeedfd0..94fbc75522 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs @@ -28,7 +28,7 @@ use crate::schema::tables::{ use crate::schema::types::batch_proof::{ StoredBatchProof, StoredBatchProofOutput, StoredVerifiedProof, }; -use crate::schema::types::da_jobs::{Job, JobProgress}; +use crate::schema::types::da_jobs::{Job, JobProgress, JobStatus as DaJobStatus}; use crate::schema::types::job_status::JobStatus; use crate::schema::types::l2_block::{StoredL2Block, StoredTransaction}; use crate::schema::types::light_client_proof::{ @@ -974,9 +974,15 @@ impl ForkMigration for LedgerDB { } impl DaLedgerOps for LedgerDB { - fn insert_job(&self, job_id: Uuid, job: &Job) -> anyhow::Result<()> { + fn submit_job(&self, job: &Job, progress: &JobProgress) -> anyhow::Result<()> { let mut batch = SchemaBatch::new(); + let job_id = job.id; + let status = progress.status.clone(); + batch.put::(&job_id, job)?; + batch.put::(&job_id, progress)?; + batch.put::(&(status, job_id), &())?; + self.db.write_schemas(batch)?; Ok(()) } @@ -985,35 +991,34 @@ impl DaLedgerOps for LedgerDB { self.db.get::(job_id) } - fn upsert_progress(&self, job_id: &Uuid, progress: &JobProgress) -> anyhow::Result<()> { + fn upsert_progress( + &self, + progress: &JobProgress, + previous_status: DaJobStatus, + ) -> anyhow::Result<()> { let mut batch = SchemaBatch::new(); - batch.put::(job_id, progress)?; - self.db.write_schemas(batch)?; - Ok(()) - } - fn get_progress(&self, job_id: &Uuid) -> anyhow::Result> { - self.db.get::(job_id) - } + let job_id = progress.job_id; + + if previous_status != progress.status { + batch.delete::(&(previous_status.clone(), job_id))?; + batch.put::(&(progress.status.clone(), job_id), &())?; + } + + batch.put::(&job_id, progress)?; - fn insert_job_status_index(&self, status: u8, job_id: Uuid) -> anyhow::Result<()> { - let mut batch = SchemaBatch::new(); - batch.put::(&(status, job_id), &())?; self.db.write_schemas(batch)?; Ok(()) } - fn remove_job_status_index(&self, status: u8, job_id: Uuid) -> anyhow::Result<()> { - let mut batch = SchemaBatch::new(); - batch.delete::(&(status, job_id))?; - self.db.write_schemas(batch)?; - Ok(()) + fn get_progress(&self, job_id: &Uuid) -> anyhow::Result> { + self.db.get::(job_id) } - fn get_job_ids_by_status(&self, status: u8) -> anyhow::Result> { + fn get_job_ids_by_status(&self, status: DaJobStatus) -> anyhow::Result> { let mut iter = self.db.iter::()?; - iter.seek(&(status, Uuid::nil()))?; + iter.seek(&(status.clone(), Uuid::nil()))?; let mut job_ids = Vec::new(); for item in iter { diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs index 2d57272039..1089317e8a 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs @@ -12,7 +12,7 @@ use uuid::Uuid; use crate::schema::tables::{PendingProofs, PendingSequencerCommitments}; use crate::schema::types::batch_proof::{StoredBatchProof, StoredBatchProofOutput}; -use crate::schema::types::da_jobs::{Job, JobProgress}; +use crate::schema::types::da_jobs::{Job, JobProgress, JobStatus as DaJobStatus}; use crate::schema::types::job_status::JobStatus; use crate::schema::types::l2_block::StoredL2Block; use crate::schema::types::light_client_proof::{ @@ -328,25 +328,19 @@ pub trait SequencerLedgerOps: SharedLedgerOps { /// Bitcoin da ledger operations pub trait DaLedgerOps { - /// DaJobById related methods - /// Insert a DA job by id - fn insert_job(&self, job_id: Uuid, job: &Job) -> Result<()>; + /// Store a job to db + fn submit_job(&self, job: &Job, progress: &JobProgress) -> anyhow::Result<()>; /// Get a DA job by id fn get_job(&self, job_id: &Uuid) -> Result>; - /// DaJobProgressById related methods /// Update a DA job progress by id - fn upsert_progress(&self, job_id: &Uuid, progress: &JobProgress) -> Result<()>; + fn upsert_progress(&self, progress: &JobProgress, previous_status: DaJobStatus) -> Result<()>; + /// Get a DA job progress by id fn get_progress(&self, job_id: &Uuid) -> Result>; - /// DaJobStatusIndex related methods - /// Insert a job status index entry - fn insert_job_status_index(&self, status: u8, job_id: Uuid) -> Result<()>; - /// Remove a job status index entry - fn remove_job_status_index(&self, status: u8, job_id: Uuid) -> Result<()>; /// Get all job ids for a specific status - fn get_job_ids_by_status(&self, status: u8) -> Result>; + fn get_job_ids_by_status(&self, status: DaJobStatus) -> Result>; } /// Test ledger operations diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs index e510fc0950..4b671c13c1 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs @@ -27,6 +27,7 @@ use super::types::{ AccessoryKey, AccessoryStateValue, BonsaiSession, DbHash, JmtValue, L1Height, L2BlockNumber, L2HeightAndIndex, L2HeightRange, L2HeightStatus, SlotNumber, StateKey, }; +use crate::schema::types::da_jobs::JobStatus; /// A list of all tables used by the StateDB. These tables store rollup state - meaning /// account balances, nonces, etc. @@ -515,7 +516,7 @@ define_table_with_seek_key_codec!( define_table_with_seek_key_codec!( /// Index by (status, jobid) - (DaJobStatusIndex) (u8, Uuid) => () + (DaJobStatusIndex) (JobStatus, Uuid) => () ); #[cfg(test)] diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs index 574d74aafc..62c2a5e5f8 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs @@ -18,24 +18,11 @@ pub enum JobStatus { Cancelled, /// Job failed with error Failed { - /// Error associated to the failure + /// Error associated with the failure error: String, }, } -impl JobStatus { - /// u8 representation of `JobStatus` - pub fn as_u8(&self) -> u8 { - match self { - JobStatus::Pending => 0, - JobStatus::InProgress => 1, - JobStatus::Completed => 2, - JobStatus::Cancelled => 3, - JobStatus::Failed { .. } => 4, - } - } -} - /// Track sent chunk for partial sending and recovery #[derive(Debug, Default, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct SentChunks { From 7dc964254260e35e026256c4476ecc4a64a8e2cb Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 13 Oct 2025 16:20:18 +0100 Subject: [PATCH 27/81] Restore as_u8 --- crates/bitcoin-da/src/job/service.rs | 13 ++++++---- .../full-node/db/sov-db/src/ledger_db/mod.rs | 24 ++++++++----------- .../db/sov-db/src/ledger_db/traits.rs | 6 ++--- .../full-node/db/sov-db/src/schema/tables.rs | 3 +-- .../db/sov-db/src/schema/types/da_jobs.rs | 13 ++++++++++ 5 files changed, 35 insertions(+), 24 deletions(-) diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 0b103e60fb..39b1ebf97c 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -188,11 +188,14 @@ impl DaJobService { pub(crate) fn get_all_active_job_ids(&self) -> Result> { let mut active_jobs = Vec::new(); - active_jobs.extend(self.ledger_db.get_job_ids_by_status(JobStatus::Pending)?); + active_jobs.extend( + self.ledger_db + .get_job_ids_by_status(JobStatus::Pending.as_u8())?, + ); active_jobs.extend( self.ledger_db - .get_job_ids_by_status(JobStatus::InProgress)?, + .get_job_ids_by_status(JobStatus::InProgress.as_u8())?, ); // Sort uuidv7 chronologically @@ -208,7 +211,7 @@ impl DaJobService { progress: &mut JobProgress, new_status: JobStatus, ) -> Result<()> { - let previous_status = progress.status.clone(); + let previous_status = progress.status.as_u8(); progress.status = new_status; progress.last_updated = get_timestamp(); @@ -307,7 +310,7 @@ impl DaJobService { pub async fn has_job_in_progress(&self) -> Result { let in_progress_jobs = self .ledger_db - .get_job_ids_by_status(JobStatus::InProgress)?; + .get_job_ids_by_status(JobStatus::InProgress.as_u8())?; Ok(!in_progress_jobs.is_empty()) } @@ -370,7 +373,7 @@ impl DaJobRpcProvider for DaJobService { let mut job_ids = Vec::new(); for code in status_filter.to_job_status() { - job_ids.extend(self.ledger_db.get_job_ids_by_status(code)?); + job_ids.extend(self.ledger_db.get_job_ids_by_status(code.as_u8())?); } job_ids.sort(); // sort chronologically by uuidv7 diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs index 94fbc75522..72dc2ae32d 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs @@ -28,7 +28,7 @@ use crate::schema::tables::{ use crate::schema::types::batch_proof::{ StoredBatchProof, StoredBatchProofOutput, StoredVerifiedProof, }; -use crate::schema::types::da_jobs::{Job, JobProgress, JobStatus as DaJobStatus}; +use crate::schema::types::da_jobs::{Job, JobProgress}; use crate::schema::types::job_status::JobStatus; use crate::schema::types::l2_block::{StoredL2Block, StoredTransaction}; use crate::schema::types::light_client_proof::{ @@ -977,7 +977,7 @@ impl DaLedgerOps for LedgerDB { fn submit_job(&self, job: &Job, progress: &JobProgress) -> anyhow::Result<()> { let mut batch = SchemaBatch::new(); let job_id = job.id; - let status = progress.status.clone(); + let status = progress.status.as_u8(); batch.put::(&job_id, job)?; batch.put::(&job_id, progress)?; @@ -991,21 +991,17 @@ impl DaLedgerOps for LedgerDB { self.db.get::(job_id) } - fn upsert_progress( - &self, - progress: &JobProgress, - previous_status: DaJobStatus, - ) -> anyhow::Result<()> { + fn upsert_progress(&self, progress: &JobProgress, previous_status: u8) -> anyhow::Result<()> { let mut batch = SchemaBatch::new(); let job_id = progress.job_id; - - if previous_status != progress.status { - batch.delete::(&(previous_status.clone(), job_id))?; - batch.put::(&(progress.status.clone(), job_id), &())?; - } + let new_status = progress.status.as_u8(); batch.put::(&job_id, progress)?; + if previous_status != new_status { + batch.delete::(&(previous_status, job_id))?; + batch.put::(&(new_status, job_id), &())?; + } self.db.write_schemas(batch)?; Ok(()) @@ -1015,10 +1011,10 @@ impl DaLedgerOps for LedgerDB { self.db.get::(job_id) } - fn get_job_ids_by_status(&self, status: DaJobStatus) -> anyhow::Result> { + fn get_job_ids_by_status(&self, status: u8) -> anyhow::Result> { let mut iter = self.db.iter::()?; - iter.seek(&(status.clone(), Uuid::nil()))?; + iter.seek(&(status, Uuid::nil()))?; let mut job_ids = Vec::new(); for item in iter { diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs index 1089317e8a..79e818c956 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs @@ -12,7 +12,7 @@ use uuid::Uuid; use crate::schema::tables::{PendingProofs, PendingSequencerCommitments}; use crate::schema::types::batch_proof::{StoredBatchProof, StoredBatchProofOutput}; -use crate::schema::types::da_jobs::{Job, JobProgress, JobStatus as DaJobStatus}; +use crate::schema::types::da_jobs::{Job, JobProgress}; use crate::schema::types::job_status::JobStatus; use crate::schema::types::l2_block::StoredL2Block; use crate::schema::types::light_client_proof::{ @@ -334,13 +334,13 @@ pub trait DaLedgerOps { fn get_job(&self, job_id: &Uuid) -> Result>; /// Update a DA job progress by id - fn upsert_progress(&self, progress: &JobProgress, previous_status: DaJobStatus) -> Result<()>; + fn upsert_progress(&self, progress: &JobProgress, previous_status: u8) -> Result<()>; /// Get a DA job progress by id fn get_progress(&self, job_id: &Uuid) -> Result>; /// Get all job ids for a specific status - fn get_job_ids_by_status(&self, status: DaJobStatus) -> Result>; + fn get_job_ids_by_status(&self, status: u8) -> Result>; } /// Test ledger operations diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs index 4b671c13c1..e510fc0950 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs @@ -27,7 +27,6 @@ use super::types::{ AccessoryKey, AccessoryStateValue, BonsaiSession, DbHash, JmtValue, L1Height, L2BlockNumber, L2HeightAndIndex, L2HeightRange, L2HeightStatus, SlotNumber, StateKey, }; -use crate::schema::types::da_jobs::JobStatus; /// A list of all tables used by the StateDB. These tables store rollup state - meaning /// account balances, nonces, etc. @@ -516,7 +515,7 @@ define_table_with_seek_key_codec!( define_table_with_seek_key_codec!( /// Index by (status, jobid) - (DaJobStatusIndex) (JobStatus, Uuid) => () + (DaJobStatusIndex) (u8, Uuid) => () ); #[cfg(test)] diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs index 62c2a5e5f8..b393c71ffa 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs @@ -23,6 +23,19 @@ pub enum JobStatus { }, } +impl JobStatus { + /// u8 representation of `JobStatus` + pub fn as_u8(&self) -> u8 { + match self { + JobStatus::Pending => 0, + JobStatus::InProgress => 1, + JobStatus::Completed => 2, + JobStatus::Cancelled => 3, + JobStatus::Failed { .. } => 4, + } + } +} + /// Track sent chunk for partial sending and recovery #[derive(Debug, Default, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct SentChunks { From 580e6bce1b626b5e02768d0159e2befff9656e00 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 14 Oct 2025 12:16:55 +0100 Subject: [PATCH 28/81] Prevent concurrent access to job service --- crates/bitcoin-da/src/job/rpc.rs | 8 + crates/bitcoin-da/src/job/service.rs | 82 +++----- crates/bitcoin-da/src/service.rs | 179 +++++++++++------- crates/bitcoin-da/src/test_utils.rs | 5 +- .../db/sov-db/src/schema/types/da_jobs.rs | 2 + 5 files changed, 146 insertions(+), 130 deletions(-) diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs index 555b626fa6..ec7565adeb 100644 --- a/crates/bitcoin-da/src/job/rpc.rs +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -264,6 +264,8 @@ impl DaJobRpcServer for DaJobRpcServerImpl { async fn da_job_cancel(&self, job_id: JobId) -> RpcResult { self.da .job_service + .lock() + .await .cancel_job(job_id) .map(|_| CancelJobResponse { success: true }) .map_err(internal_rpc_error) @@ -272,6 +274,8 @@ impl DaJobRpcServer for DaJobRpcServerImpl { async fn da_job_retry(&self, job_id: JobId) -> RpcResult { self.da .job_service + .lock() + .await .retry_job(job_id) .map(|new_job_id| RetryJobResponse { new_job_id, @@ -295,6 +299,8 @@ impl DaJobRpcServer for DaJobRpcServerImpl { Ok(self .da .job_service + .lock() + .await .list_jobs(filter) .map_err(internal_rpc_error)? .into_iter() @@ -305,6 +311,8 @@ impl DaJobRpcServer for DaJobRpcServerImpl { async fn da_job_get_info(&self, job_id: JobId) -> RpcResult { self.da .job_service + .lock() + .await .get_job_info(job_id) .map_err(internal_rpc_error) .map(Into::into) diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 39b1ebf97c..3889282307 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -1,5 +1,6 @@ -use std::time::{Duration, Instant}; +use std::collections::HashSet; +use bitcoin::hashes::Hash; use bitcoin::{Transaction, Txid}; use serde::{Deserialize, Serialize}; use sov_db::ledger_db::DaLedgerOps; @@ -47,6 +48,8 @@ pub struct SentChunks { pub commit_txs: Vec, /// Sent reveal txs pub reveal_txs: Vec, + /// All sent txids + pub txids: HashSet, } impl SentChunks { @@ -61,9 +64,15 @@ impl SentChunks { } /// Extend with sent commit and reveal chunks - pub fn extend(&mut self, commits: Vec, reveals: Vec) { + pub fn extend( + &mut self, + commits: Vec, + reveals: Vec, + txids: Vec, + ) { self.commit_txs.extend(commits); self.reveal_txs.extend(reveals); + self.txids.extend(txids); } } @@ -87,9 +96,16 @@ impl From for SentChunks { }) .collect(); + let txids = db_chunks + .txids + .into_iter() + .map(|tx| Txid::from_byte_array(tx)) + .collect(); + Self { commit_txs, reveal_txs, + txids, } } } @@ -108,9 +124,16 @@ impl From for DbSentChunks { .map(bitcoin::consensus::serialize) .collect(); + let txids = chunks + .txids + .into_iter() + .map(|tx| tx.to_byte_array()) + .collect(); + Self { commit_txs, reveal_txs, + txids, } } } @@ -223,24 +246,12 @@ impl DaJobService { Ok(()) } - /// Record sending DA transactions and keep track of sent chunks and reveals - #[instrument(level = "debug", skip(self))] - pub fn record_sent_transactions( - &self, - progress: &mut JobProgress, - commits: Vec, - reveals: Vec, - ) -> Result<()> { - progress.sent_chunks.extend(commits, reveals); - self.update_job_status(progress, JobStatus::InProgress) - } - /// Get all pending commit and reveals txids. /// /// This is required for removing from the utxo set and prevent selecting UTXOs twice #[instrument(level = "trace", skip_all, ret)] - pub(crate) fn get_pending_chunks(&self) -> Result> { - let mut txids = Vec::new(); + pub(crate) fn get_pending_chunks(&self) -> Result> { + let mut txids = HashSet::new(); let active_job_ids = self.get_all_active_job_ids()?; for job_id in active_job_ids { @@ -267,45 +278,6 @@ impl DaJobService { Ok(txids) } - /// Wait for job completion and return the transaction ID - #[instrument(level = "debug", skip(self, timeout), ret)] - pub async fn wait_for_completion( - &self, - job_id: JobId, - timeout: Option, - ) -> Result { - let start = Instant::now(); - let timeout = timeout.unwrap_or(Duration::from_secs(600)); // Defaults to 10min - - loop { - if start.elapsed() > timeout { - return Err(JobServiceError::JobTimeout(job_id, timeout.as_secs())); - } - - let progress = self - .get_progress(&job_id)? - .ok_or(JobServiceError::JobNotFound(job_id))?; - - match progress.status { - JobStatus::Completed => { - if let Some(last_reveal) = progress.sent_chunks.reveal_txs.last() { - return Ok(last_reveal.compute_txid()); - } - return Err(JobServiceError::NoTransactionsFound(job_id)); - } - JobStatus::Failed { error, .. } => { - return Err(JobServiceError::JobFailed(job_id, error)); - } - JobStatus::Cancelled => { - return Err(JobServiceError::JobCancelled(job_id)); - } - _ => { - tokio::time::sleep(Duration::from_millis(100)).await; - } - } - } - } - /// Check if any job is in progress. pub async fn has_job_in_progress(&self) -> Result { let in_progress_jobs = self diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 6714be8e49..20ec1966ee 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -6,10 +6,11 @@ use core::result::Result::Ok; use core::str::FromStr; use core::time::Duration; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::num::NonZeroUsize; use std::path::PathBuf; use std::sync::Arc; +use std::time::Instant; use anyhow::anyhow; use async_trait::async_trait; @@ -171,7 +172,7 @@ pub struct BitcoinService { utxo_selection_mode: UtxoSelectionMode, // Persistent job queue - pub(crate) job_service: DaJobService, + pub(crate) job_service: Mutex>, } impl BitcoinService { @@ -186,7 +187,7 @@ impl BitcoinService { reveal_tx_prefix: Vec, tx_backup_dir: PathBuf, utxo_selection_mode: UtxoSelectionMode, - job_service: DaJobService, + job_service: Mutex>, ) -> Self { Self { tx_signer: TxSigner::new(client.clone()), @@ -244,7 +245,7 @@ impl BitcoinService { let utxo_selection_mode = config.utxo_selection_mode.clone().unwrap_or_default(); - let job_service = DaJobService::new(ledger_db); + let job_service = Mutex::new(DaJobService::new(ledger_db)); Ok(Self::new( client, network, @@ -289,38 +290,48 @@ impl BitcoinService { // Process job queue async fn process_job_service(&self) -> Result<()> { + let job_service = self.job_service.lock().await; + // Get all pending/in-progress jobs - let active_job_ids = self.job_service.get_all_active_job_ids()?; - let mut jobs_to_process = Vec::new(); + let active_job_ids = job_service.get_all_active_job_ids()?; + let mut has_job_in_progress = false; + let mut sent_txids = job_service.get_pending_chunks()?; for job_id in active_job_ids { - if let Some(job) = self.job_service.get_job(&job_id)? { - if let Some(progress) = self.job_service.get_progress(&job_id)? { - // Deserialize RawTxData from job - let raw_data: RawTxData = borsh::from_slice(&job.data) - .map_err(JobServiceError::SerializationError)?; + info!("Processing job {}", job_id); - jobs_to_process.push((raw_data, progress)); - } - } - } + let job = job_service + .get_job(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + let progress = &mut job_service + .get_progress(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; - for (job_data, mut progress) in jobs_to_process { - let job_id = progress.job_id; - info!("Processing job {}", job_id); + // Deserialize RawTxData from job + let job_data: RawTxData = + borsh::from_slice(&job.data).map_err(JobServiceError::SerializationError)?; + + has_job_in_progress = + has_job_in_progress || matches!(progress.status, JobStatus::InProgress); - match self.process_job(&job_data, &mut progress).await { + match self + .process_job(job_data, progress, has_job_in_progress, &sent_txids) + .await + { Ok(completed) => { if completed { + job_service.update_job_status(progress, JobStatus::Completed)?; info!("Job {} completed successfully", job_id); } else { + job_service.update_job_status(progress, JobStatus::InProgress)?; + sent_txids.extend(&progress.sent_chunks.txids); info!("Job {} partially sent", job_id); } } Err(e) => { error!("Error processing job {}: {:?}", job_id, e); - self.job_service.update_job_status( - &mut progress, + job_service.update_job_status( + progress, JobStatus::Failed { error: e.to_string(), }, @@ -332,18 +343,24 @@ impl BitcoinService { Ok(()) } - async fn process_job(&self, job_data: &RawTxData, progress: &mut JobProgress) -> Result { + async fn process_job( + &self, + job_data: RawTxData, + progress: &mut JobProgress, + has_job_in_progress: bool, + sent_txids: &HashSet, + ) -> Result { info!( "Processing job {} with status {:?}", progress.job_id, progress.status ); // get all available utxos - let utxos = self.get_utxos().await?; + let utxos = self.get_utxos(sent_txids).await?; let prev_utxo = match &progress.status { JobStatus::InProgress => None, // Will use previous reveal utxo in create_inscription_type_1 - _ => self.select_prev_utxo(&utxos).await?, + _ => self.select_prev_utxo(&utxos, has_job_in_progress).await?, }; // Get current fee rate as sat/vb @@ -354,7 +371,7 @@ impl BitcoinService { fee_sat_per_vbyte, utxos.clone(), prev_utxo.clone(), - job_data.clone(), + job_data, progress.sent_chunks.clone(), ) .await?; @@ -382,8 +399,6 @@ impl BitcoinService { backup_txs_to_file(&self.tx_backup_dir, &signed_txs)?; let mut txids = Vec::new(); - let mut commits_sent = Vec::new(); - let mut reveals_sent = Vec::new(); let mut sent_count = 0; for signed_tx in &signed_txs { @@ -396,15 +411,13 @@ impl BitcoinService { match self.send_signed_transaction(signed_tx).await { Ok(ids) => { sent_count += 1; - txids.extend(ids); - commits_sent.push(signed_tx.commit.tx.clone()); - reveals_sent.push(signed_tx.reveal.tx.clone()); + txids.extend(&ids); - self.job_service.record_sent_transactions( - progress, + progress.sent_chunks.extend( vec![signed_tx.commit.tx.clone()], vec![signed_tx.reveal.tx.clone()], - )?; + ids, + ); let txs = signed_tx.clone().into_txs_with_id(); self.monitoring.monitor_transaction_chain(vec![txs]).await?; @@ -424,31 +437,16 @@ impl BitcoinService { let total_sent = current_idx + sent_count; let completed = total_sent >= total_needed; - if completed { - // Mark job as completed - self.job_service - .update_job_status(progress, JobStatus::Completed)?; - - info!("Job {} marked as completed", progress.job_id); - } else if sent_count > 0 { - // Job partially sent - self.job_service - .update_job_status(progress, JobStatus::InProgress)?; - - info!( - "Job {} progress recorded: {}/{} transactions sent", - progress.job_id, total_sent, total_needed - ); - } - Ok(completed) } - async fn select_prev_utxo(&self, utxos: &[UTXO]) -> Result> { + async fn select_prev_utxo( + &self, + utxos: &[UTXO], + has_job_in_progress: bool, + ) -> Result> { let prev_utxo = self.get_prev_utxo().await; - let job_in_progress = self.job_service.has_job_in_progress().await?; - - if !job_in_progress { + if !has_job_in_progress { return Ok(prev_utxo); } @@ -458,7 +456,13 @@ impl BitcoinService { Err(BitcoinServiceError::PreviousJobInProgress) } // Latest monitored TX has `Queued` status and internal `get_tx_out` errors. - UtxoSelectionMode::Oldest => self.get_highest_confirmation_utxo(utxos.to_vec()).await, + UtxoSelectionMode::Oldest => Ok(if prev_utxo.is_some() { + // Latest monitored TX has been successfully accepted to mempool and can be used as starting point for another utxo chain + prev_utxo + } else { + // Latest monitored TX has `Queued` status and internal `get_tx_out` errors. + self.get_highest_confirmation_utxo(utxos.to_vec()).await? + }), } } @@ -478,7 +482,7 @@ impl BitcoinService { } #[instrument(level = "trace", skip_all, ret)] - pub(crate) async fn get_utxos(&self) -> Result> { + pub(crate) async fn get_utxos(&self, sent_txids: &HashSet) -> Result> { let utxos = self .client .list_unspent(Some(0), None, None, None, None) @@ -504,15 +508,13 @@ impl BitcoinService { // To make sure there are no conflicts between parallel utxos chain, // this additional filters out any UTXO used by queued txs and any change UTXO that are not finalized UtxoSelectionMode::Oldest => { - let txids = self.job_service.get_pending_chunks()?; - utxos.into_iter().filter(|utxo| { utxo.spendable && utxo.solvable && utxo.safe && utxo.amount > Amount::from_sat(REVEAL_OUTPUT_AMOUNT) // Remove utxo already in use by queued txs - && !txids.contains(&utxo.txid) + && !sent_txids.contains(&utxo.txid) // Only keep finalized change output && (utxo.vout == 0 || utxo.confirmations as u64 >= self.network_constants.finality_depth) }) @@ -1259,32 +1261,63 @@ impl DaService for BitcoinService { /// Submit a new job to the queue async fn send_transaction(&self, tx_request: DaTxRequest) -> Result { - // TODO handle chaining job request - if self.utxo_selection_mode == UtxoSelectionMode::Chained { - let active_jobs = self.job_service.get_all_active_job_ids()?; - - if !active_jobs.is_empty() { - return Err(BitcoinServiceError::PreviousJobInProgress); + let job_id = { + let job_service = self.job_service.lock().await; + + // TODO handle chaining job request + if self.utxo_selection_mode == UtxoSelectionMode::Chained { + let active_jobs = job_service.get_all_active_job_ids()?; + if !active_jobs.is_empty() { + return Err(BitcoinServiceError::PreviousJobInProgress); + } } - } - - let job_id = self.job_service.submit_job(tx_request.try_into()?)?; + job_service.submit_job(tx_request.try_into()?)? + }; self.process_job_service().await?; Ok(job_id) } + /// Wait for job completion by job_id and returns the txid async fn wait_for_completion( &self, job_id: JobId, timeout: Option, ) -> Result { - Ok(self - .job_service - .wait_for_completion(job_id, timeout) - .await - .map(TxidWrapper)?) + let start = Instant::now(); + let timeout = timeout.unwrap_or(Duration::from_secs(600)); // Defaults to 10min + + loop { + if start.elapsed() > timeout { + return Err(JobServiceError::JobTimeout(job_id, timeout.as_secs()).into()); + } + + let progress = self + .job_service + .lock() + .await + .get_progress(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + + match progress.status { + JobStatus::Completed => { + if let Some(last_reveal) = progress.sent_chunks.reveal_txs.last() { + return Ok(TxidWrapper(last_reveal.compute_txid())); + } + return Err(JobServiceError::NoTransactionsFound(job_id).into()); + } + JobStatus::Failed { error, .. } => { + return Err(JobServiceError::JobFailed(job_id, error).into()); + } + JobStatus::Cancelled => { + return Err(JobServiceError::JobCancelled(job_id).into()); + } + _ => { + tokio::time::sleep(Duration::from_millis(500)).await; + } + } + } } #[instrument(level = "trace", skip(self))] diff --git a/crates/bitcoin-da/src/test_utils.rs b/crates/bitcoin-da/src/test_utils.rs index 4344e8d4a2..e71c314eb0 100644 --- a/crates/bitcoin-da/src/test_utils.rs +++ b/crates/bitcoin-da/src/test_utils.rs @@ -21,6 +21,7 @@ impl BitcoinService { let da_private_key = self.da_private_key.expect("No private key set"); + let sent_txids = Default::default(); match tx_request { DaTxRequest::ZKProof(zkproof) => { let mut txids = vec![]; @@ -33,7 +34,7 @@ impl BitcoinService { RawTxData::Chunks(chunks) => { for body in chunks { // get all available utxos that are not already spent - let utxos = self.get_utxos().await?; + let utxos = self.get_utxos(&sent_txids).await?; let utxos = utxos .into_iter() .filter(|utxo| { @@ -92,7 +93,7 @@ impl BitcoinService { borsh::to_vec(&aggregate).expect("Aggregate serialize must not fail"); // get all available utxos that are not already spent - let utxos = self.get_utxos().await?; + let utxos = self.get_utxos(&sent_txids).await?; let utxos = utxos .into_iter() .filter(|utxo| utxo.amount >= 50 * 10_u64.pow(8)) diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs index b393c71ffa..c86cc8d9bc 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs @@ -43,6 +43,8 @@ pub struct SentChunks { pub commit_txs: Vec>, /// Sent reveal txs (serialized bitcoin::Transaction) pub reveal_txs: Vec>, + /// Sent txids + pub txids: Vec<[u8; 32]>, } impl SentChunks { From 0c3b30cc1105b1b9cd56035666797a4e61d8427d Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 14 Oct 2025 12:42:23 +0100 Subject: [PATCH 29/81] Cleanup sent txids tracking --- crates/bitcoin-da/src/job/service.rs | 24 +++++++----------------- crates/bitcoin-da/src/service.rs | 18 ++++++------------ 2 files changed, 13 insertions(+), 29 deletions(-) diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 3889282307..1809fb8346 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -255,23 +255,13 @@ impl DaJobService { let active_job_ids = self.get_all_active_job_ids()?; for job_id in active_job_ids { - if let Some(progress) = self.get_progress(&job_id)? { - if matches!(progress.status, JobStatus::InProgress) { - txids.extend( - progress - .sent_chunks - .commit_txs - .iter() - .map(|tx| tx.compute_txid()), - ); - txids.extend( - progress - .sent_chunks - .reveal_txs - .iter() - .map(|tx| tx.compute_txid()), - ); - } + if let Some(JobProgress { + status: JobStatus::InProgress, + sent_chunks, + .. + }) = self.get_progress(&job_id)? + { + txids.extend(sent_chunks.txids); } } diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 20ec1966ee..112493fc7b 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -294,9 +294,6 @@ impl BitcoinService { // Get all pending/in-progress jobs let active_job_ids = job_service.get_all_active_job_ids()?; - let mut has_job_in_progress = false; - let mut sent_txids = job_service.get_pending_chunks()?; - for job_id in active_job_ids { info!("Processing job {}", job_id); @@ -311,20 +308,15 @@ impl BitcoinService { let job_data: RawTxData = borsh::from_slice(&job.data).map_err(JobServiceError::SerializationError)?; - has_job_in_progress = - has_job_in_progress || matches!(progress.status, JobStatus::InProgress); + let sent_txids = job_service.get_pending_chunks()?; - match self - .process_job(job_data, progress, has_job_in_progress, &sent_txids) - .await - { + match self.process_job(job_data, progress, &sent_txids).await { Ok(completed) => { if completed { job_service.update_job_status(progress, JobStatus::Completed)?; info!("Job {} completed successfully", job_id); } else { job_service.update_job_status(progress, JobStatus::InProgress)?; - sent_txids.extend(&progress.sent_chunks.txids); info!("Job {} partially sent", job_id); } } @@ -347,7 +339,6 @@ impl BitcoinService { &self, job_data: RawTxData, progress: &mut JobProgress, - has_job_in_progress: bool, sent_txids: &HashSet, ) -> Result { info!( @@ -360,7 +351,10 @@ impl BitcoinService { let prev_utxo = match &progress.status { JobStatus::InProgress => None, // Will use previous reveal utxo in create_inscription_type_1 - _ => self.select_prev_utxo(&utxos, has_job_in_progress).await?, + _ => { + self.select_prev_utxo(&utxos, !sent_txids.is_empty()) + .await? + } }; // Get current fee rate as sat/vb From ac4137e9b78043793f99ab0abad41b95c31cffe0 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 14 Oct 2025 13:50:27 +0100 Subject: [PATCH 30/81] Fix ledgerdb re-use between services --- bin/citrea/tests/bitcoin/utils.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index 0c9a586b71..fa88a8cb84 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -509,9 +509,13 @@ pub async fn generate_mock_txs( let prefix_str = "wrong_prefix"; let wrong_prefix_wallet = PathBuf::from_str(prefix_str).unwrap(); create_and_fund_wallet(prefix_str.to_string(), da_node).await; + + let mut first_config = da_node.config.clone(); + first_config.data_dir = first_config.data_dir.join("1"); + let wrong_prefix_da_service = spawn_bitcoin_da_service( task_executor, - &da_node.config, + &first_config, wrong_prefix_wallet, DaServiceKeyKind::Sequencer, vec![6], @@ -524,9 +528,12 @@ pub async fn generate_mock_txs( let wrong_key_wallet = PathBuf::from_str(wrong_key_str).unwrap(); create_and_fund_wallet(wrong_key_str.to_string(), da_node).await; + let mut second_config = da_node.config.clone(); + second_config.data_dir = second_config.data_dir.join("2"); + let wrong_key_da_service = spawn_bitcoin_da_service( task_executor, - &da_node.config, + &second_config, wrong_key_wallet, DaServiceKeyKind::Other( "E9873D79C6D87DC0FB6A5778633389F4453213303DA61F20BD67FC233AA33263".to_string(), From c3475d2cb49a22fe68b025dddcca5db47c826998 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 14 Oct 2025 14:29:46 +0100 Subject: [PATCH 31/81] Dedup ledgerdb test config --- bin/citrea/tests/bitcoin/utils.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index fa88a8cb84..6cbfd34f49 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -121,9 +121,12 @@ pub async fn spawn_bitcoin_da_sequencer_service( config: &BitcoinConfig, dir: PathBuf, ) -> Arc { + let mut sequencer_config = config.clone(); + sequencer_config.data_dir = sequencer_config.data_dir.join("sequencer"); + spawn_bitcoin_da_service( task_executor, - config, + &sequencer_config, dir, DaServiceKeyKind::Sequencer, REVEAL_TX_PREFIX.to_vec(), @@ -138,9 +141,12 @@ pub async fn spawn_bitcoin_da_prover_service( config: &BitcoinConfig, dir: PathBuf, ) -> Arc { + let mut prover_config = config.clone(); + prover_config.data_dir = prover_config.data_dir.join("prover"); + spawn_bitcoin_da_service( task_executor, - config, + &prover_config, dir, DaServiceKeyKind::BatchProver, REVEAL_TX_PREFIX.to_vec(), From a7eb889811f836f06f60a9f8a8a44d53d9ff486f Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 14 Oct 2025 21:47:08 +0100 Subject: [PATCH 32/81] Fix utxo selection --- bin/citrea/tests/bitcoin/da_queue.rs | 6 +++--- crates/bitcoin-da/src/service.rs | 32 +++++++++++++++++----------- 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/bin/citrea/tests/bitcoin/da_queue.rs b/bin/citrea/tests/bitcoin/da_queue.rs index 710e6d5e15..26c4311f72 100644 --- a/bin/citrea/tests/bitcoin/da_queue.rs +++ b/bin/citrea/tests/bitcoin/da_queue.rs @@ -448,12 +448,12 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { assert!(res.is_ok()); - let monitored_txs = da_service.monitoring.get_monitored_txs().await; - assert_eq!(monitored_txs.len(), 34); - // Txs starting from a new chain should be accepted to mempool da.wait_mempool_len(8 * 3 + 2 + 8, None).await?; + let monitored_txs = da_service.monitoring.get_monitored_txs().await; + assert_eq!(monitored_txs.len(), 34); + // We mine the first three proofs + the 1 chunk pair + the extra proof starting another UTXO chain // and make sure that the remaining chunks and aggregate and sent on next block when mempool size is freed // Assert that all chunks were mined and mempool space is freed diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 112493fc7b..e5310190a3 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -292,6 +292,11 @@ impl BitcoinService { async fn process_job_service(&self) -> Result<()> { let job_service = self.job_service.lock().await; + // Optimization for utxo selection. + // If previous job ends in progress, we need to select a new utxo in Oldest mode. + // If the subsequent job completes, we can continue chaining from its outputs. + let mut previous_job_in_progress = false; + // Get all pending/in-progress jobs let active_job_ids = job_service.get_all_active_job_ids()?; for job_id in active_job_ids { @@ -310,14 +315,21 @@ impl BitcoinService { let sent_txids = job_service.get_pending_chunks()?; - match self.process_job(job_data, progress, &sent_txids).await { + match self + .process_job(job_data, progress, &sent_txids, previous_job_in_progress) + .await + { Ok(completed) => { if completed { job_service.update_job_status(progress, JobStatus::Completed)?; info!("Job {} completed successfully", job_id); + + previous_job_in_progress = false; } else { job_service.update_job_status(progress, JobStatus::InProgress)?; info!("Job {} partially sent", job_id); + + previous_job_in_progress = true; } } Err(e) => { @@ -340,6 +352,7 @@ impl BitcoinService { job_data: RawTxData, progress: &mut JobProgress, sent_txids: &HashSet, + previous_job_in_progress: bool, ) -> Result { info!( "Processing job {} with status {:?}", @@ -352,7 +365,7 @@ impl BitcoinService { let prev_utxo = match &progress.status { JobStatus::InProgress => None, // Will use previous reveal utxo in create_inscription_type_1 _ => { - self.select_prev_utxo(&utxos, !sent_txids.is_empty()) + self.select_prev_utxo(&utxos, previous_job_in_progress) .await? } }; @@ -437,10 +450,10 @@ impl BitcoinService { async fn select_prev_utxo( &self, utxos: &[UTXO], - has_job_in_progress: bool, + previous_job_in_progress: bool, ) -> Result> { let prev_utxo = self.get_prev_utxo().await; - if !has_job_in_progress { + if !previous_job_in_progress { return Ok(prev_utxo); } @@ -449,14 +462,9 @@ impl BitcoinService { // Prevent UTXO conflicts when queue is not empty and running UtxoSelectionMode::Chained mode Err(BitcoinServiceError::PreviousJobInProgress) } - // Latest monitored TX has `Queued` status and internal `get_tx_out` errors. - UtxoSelectionMode::Oldest => Ok(if prev_utxo.is_some() { - // Latest monitored TX has been successfully accepted to mempool and can be used as starting point for another utxo chain - prev_utxo - } else { - // Latest monitored TX has `Queued` status and internal `get_tx_out` errors. - self.get_highest_confirmation_utxo(utxos.to_vec()).await? - }), + UtxoSelectionMode::Oldest => { + Ok(self.get_highest_confirmation_utxo(utxos.to_vec()).await?) + } } } From d7c06ed4276e579c04778ab30351b88a09670b47 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 14 Oct 2025 23:37:01 +0100 Subject: [PATCH 33/81] Fix lint --- bin/cli/src/main.rs | 4 +++- crates/bitcoin-da/src/job/service.rs | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/bin/cli/src/main.rs b/bin/cli/src/main.rs index f3a0d66325..263b56912e 100644 --- a/bin/cli/src/main.rs +++ b/bin/cli/src/main.rs @@ -109,7 +109,9 @@ async fn main() -> anyhow::Result<()> { } => { if l2_target.is_none() && l1_target.is_none() && sequencer_commitment_index.is_none() { // Invalid CLI usage: at least one rollback target must be provided - return Err(anyhow::anyhow!("Missing L2/L1 target or sequencer commitment")); + return Err(anyhow::anyhow!( + "Missing L2/L1 target or sequencer commitment" + )); } commands::rollback( node_type, diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 1809fb8346..b13aa2f537 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -99,7 +99,7 @@ impl From for SentChunks { let txids = db_chunks .txids .into_iter() - .map(|tx| Txid::from_byte_array(tx)) + .map(Txid::from_byte_array) .collect(); Self { From 1955c2b760cb1953b4065273cb039ea3943786d6 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 14 Oct 2025 23:37:31 +0100 Subject: [PATCH 34/81] Protect behind API_KEY --- crates/common/src/rpc/auth.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/crates/common/src/rpc/auth.rs b/crates/common/src/rpc/auth.rs index 9f75efc077..83361371f7 100644 --- a/crates/common/src/rpc/auth.rs +++ b/crates/common/src/rpc/auth.rs @@ -8,7 +8,13 @@ use jsonrpsee::MethodResponse; use serde_json::value::RawValue; use serde_json::Value; -const PROTECTED_METHODS: [&str; 3] = ["backup_create", "backup_validate", "backup_info"]; +const PROTECTED_METHODS: [&str; 5] = [ + "backup_create", + "backup_validate", + "backup_info", + "da_job_cancel", + "da_job_retry", +]; #[derive(Debug, Clone)] pub struct Auth { From 1d345e8a0e8c69d492f601808632d0dc3ccb71a8 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 15 Oct 2025 12:10:31 +0100 Subject: [PATCH 35/81] Fee cap --- crates/bitcoin-da/src/error.rs | 12 +++++++ crates/bitcoin-da/src/service.rs | 57 +++++++++++++++++++++++++++++--- 2 files changed, 65 insertions(+), 4 deletions(-) diff --git a/crates/bitcoin-da/src/error.rs b/crates/bitcoin-da/src/error.rs index 64ab50306b..bf3d1db63e 100644 --- a/crates/bitcoin-da/src/error.rs +++ b/crates/bitcoin-da/src/error.rs @@ -108,6 +108,18 @@ pub enum BitcoinServiceError { /// Body builders error. #[error("Body builders error: {0}")] TransactionBuilderError(String), + /// Fee cap exceeded + #[error("Fee cap exceeded: current rate {current_rate} sat/vb > max {max_rate} sat/vb (elapsed: {elapsed_secs}s / max: {max_duration_secs}s)")] + FeeCapExceeded { + /// Current fee rate as sat/vb + current_rate: u64, + /// Max fee rate in sat/vb + max_rate: u64, + /// Duration since the transaction has been blocked by max fee rate cap + elapsed_secs: u64, + /// Max duration before sending transaction above max fee rate + max_duration_secs: u64, + }, /// Fee service operation failure. #[error("Fee service error: {0}")] FeeServiceError(#[from] FeeServiceError), diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 006fbd99fe..2158d21651 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -47,7 +47,7 @@ use crate::helpers::builders::body_builders::{create_inscription_transactions, D use crate::helpers::builders::TxWithId; use crate::helpers::merkle_tree::BitcoinMerkleTree; use crate::helpers::parsers::{parse_relevant_transaction, ParsedTransaction, VerifyParsed}; -use crate::helpers::{merkle_tree, TransactionKind}; +use crate::helpers::{get_timestamp, merkle_tree, TransactionKind}; use crate::job::error::JobServiceError; use crate::job::service::{DaJobService, JobProgress, SentChunks}; use crate::metrics::BITCOIN_DA_METRICS as BM; @@ -360,6 +360,14 @@ impl BitcoinService { previous_job_in_progress = true; } } + Err(e @ BitcoinServiceError::FeeCapExceeded { .. }) => { + warn!("Job {job_id} hit fee cap: {e:?}"); + + // Save updated progress with last sent attempt value and continue + // Fee cap errors should be retried on next `process_job_service` call + job_service.update_job_status(progress, progress.status.clone())?; + continue; + } Err(e) => { error!("Error processing job {}: {:?}", job_id, e); job_service.update_job_status( @@ -387,6 +395,50 @@ impl BitcoinService { progress.job_id, progress.status ); + // Get current fee rate as sat/vb + let fee_sat_per_vbyte = self.fee.get_fee_rate().await?; + let current_time = get_timestamp(); + + let job_created_at = progress + .job_id + .get_timestamp() + .map(|ts| ts.to_unix().0) + .unwrap_or(0); + + let elapsed_secs = current_time.saturating_sub(job_created_at); + + // Cap fee at self.max_fee_rate_sat_to_pay for a maximum of `self.fee_rate_cap_duration_secs`. + // If `self.fee_rate_cap_duration_secs` is exceeded, send transaction with fee rate above `self.max_fee_rate_sat_to_pay` anyway + if fee_sat_per_vbyte > self.max_fee_rate_sat_to_pay { + if elapsed_secs < self.fee_rate_cap_duration_secs { + warn!( + "Job {} fee rate {} sat/vb exceeds cap of {} sat/vb. \ + Waiting (elapsed: {}s / max: {}s)", + progress.job_id, + fee_sat_per_vbyte, + self.max_fee_rate_sat_to_pay, + elapsed_secs, + self.fee_rate_cap_duration_secs + ); + + return Err(BitcoinServiceError::FeeCapExceeded { + current_rate: fee_sat_per_vbyte, + max_rate: self.max_fee_rate_sat_to_pay, + elapsed_secs, + max_duration_secs: self.fee_rate_cap_duration_secs, + }); + } + + warn!( + "Job {} fee rate {} sat/vb exceeds cap of {} sat/vb, \ + but cap duration of {}s exceeded. Sending anyway", + progress.job_id, + fee_sat_per_vbyte, + self.max_fee_rate_sat_to_pay, + self.fee_rate_cap_duration_secs + ); + } + // get all available utxos let utxos = self.get_utxos(sent_txids).await?; @@ -398,9 +450,6 @@ impl BitcoinService { } }; - // Get current fee rate as sat/vb - let fee_sat_per_vbyte = self.fee.get_fee_rate().await?; - let da_txs = self .create_da_transactions_with_fee_rate( fee_sat_per_vbyte, From 8c4d6c301d63f0a62b78eede0dc34cb69bf57ab6 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 15 Oct 2025 15:30:29 +0100 Subject: [PATCH 36/81] Da job metrics --- crates/bitcoin-da/src/job/metrics.rs | 122 +++++++++++++++++++++++++++ crates/bitcoin-da/src/job/mod.rs | 2 + crates/bitcoin-da/src/job/service.rs | 9 +- 3 files changed, 131 insertions(+), 2 deletions(-) create mode 100644 crates/bitcoin-da/src/job/metrics.rs diff --git a/crates/bitcoin-da/src/job/metrics.rs b/crates/bitcoin-da/src/job/metrics.rs new file mode 100644 index 0000000000..74a1756c8e --- /dev/null +++ b/crates/bitcoin-da/src/job/metrics.rs @@ -0,0 +1,122 @@ +use std::sync::LazyLock; + +use metrics::{Counter, Gauge, Histogram}; +use metrics_derive::Metrics; +use sov_db::schema::types::da_jobs::JobStatus; + +use crate::helpers::get_timestamp; +use crate::job::service::JobProgress; + +/// Defines the metrics being collected for the DA job service +#[derive(Metrics)] +#[metrics(scope = "da_job")] +pub struct DaJobMetrics { + /// Number of pending jobs + #[metric(describe = "Number of jobs in pending status")] + pub jobs_pending: Gauge, + + /// Number of in-progress jobs + #[metric(describe = "Number of jobs in progress status")] + pub jobs_in_progress: Gauge, + + /// Number of completed jobs + #[metric(describe = "Number of jobs in completed status")] + pub jobs_completed: Gauge, + + /// Number of cancelled jobs + #[metric(describe = "Number of jobs in cancelled status")] + pub jobs_cancelled: Gauge, + + /// Number of failed jobs + #[metric(describe = "Number of jobs in failed status")] + pub jobs_failed: Gauge, + + /// Total jobs submitted + #[metric(describe = "Total number of jobs submitted")] + pub jobs_submitted_total: Counter, + + /// Total jobs completed successfully + #[metric(describe = "Total number of jobs completed successfully")] + pub jobs_completed_total: Counter, + + /// Total jobs cancelled + #[metric(describe = "Total number of jobs cancelled")] + pub jobs_cancelled_total: Counter, + + /// Total jobs failed + #[metric(describe = "Total number of jobs failed")] + pub jobs_failed_total: Counter, + + /// Time taken to process a job from pending to completion + #[metric(describe = "Duration from job submission to completion in seconds")] + pub job_processing_duration: Histogram, + + /// Number of chunks sent per job + #[metric(describe = "Number of commit/reveal pairs sent per job")] + pub job_chunks_sent: Histogram, + + /// Size of job data in bytes + #[metric(describe = "Size of job data in bytes")] + pub job_data_size: Histogram, +} + +impl DaJobMetrics { + pub fn record_status_update(&self, old_status: &JobStatus, progress: &JobProgress) { + let new_status = &progress.status; + if old_status == new_status { + return; + } + + match old_status { + JobStatus::Pending => self.jobs_pending.decrement(1.0), + JobStatus::InProgress => self.jobs_in_progress.decrement(1.0), + JobStatus::Completed => self.jobs_completed.decrement(1.0), + JobStatus::Cancelled => self.jobs_cancelled.decrement(1.0), + JobStatus::Failed { .. } => self.jobs_failed.decrement(1.0), + } + + match new_status { + JobStatus::Pending => { + self.jobs_pending.increment(1.0); + } + JobStatus::InProgress => { + self.jobs_in_progress.increment(1.0); + } + JobStatus::Completed => { + self.jobs_completed.increment(1.0); + self.jobs_completed_total.increment(1); + + // Total time between job creation and completion + if let Some(created_at) = progress.job_id.get_timestamp() { + let duration = get_timestamp().saturating_sub(created_at.to_unix().0); + self.job_processing_duration.record(duration as f64); + } + + // Record total chunks sent + self.job_chunks_sent + .record(progress.sent_chunks.count() as f64); + } + JobStatus::Cancelled => { + self.jobs_cancelled.increment(1.0); + self.jobs_cancelled_total.increment(1); + } + JobStatus::Failed { .. } => { + self.jobs_failed.increment(1.0); + self.jobs_failed_total.increment(1); + } + } + } + + /// Record a job submission + pub fn record_job_submitted(&self, data_size: usize) { + self.jobs_submitted_total.increment(1); + self.jobs_pending.increment(1.0); + self.job_data_size.record(data_size as f64); + } +} + +/// DA job service metrics +pub static DA_JOB_METRICS: LazyLock = LazyLock::new(|| { + DaJobMetrics::describe(); + DaJobMetrics::default() +}); diff --git a/crates/bitcoin-da/src/job/mod.rs b/crates/bitcoin-da/src/job/mod.rs index 13ad1c8f53..51d4e187ae 100644 --- a/crates/bitcoin-da/src/job/mod.rs +++ b/crates/bitcoin-da/src/job/mod.rs @@ -13,4 +13,6 @@ pub mod rpc; /// Core job queue implementation and state management pub mod service; +mod metrics; + type Result = std::result::Result; diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index b13aa2f537..08e33dd138 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -12,6 +12,7 @@ use super::Result; use crate::helpers::builders::body_builders::RawTxData; use crate::helpers::get_timestamp; use crate::job::error::JobServiceError; +use crate::job::metrics::DA_JOB_METRICS as JM; use crate::job::rpc::{DaJobRpcProvider, JobListFilter}; /// Tracks progress of a job including sent transactions for recovery. @@ -185,6 +186,8 @@ impl DaJobService { self.ledger_db.submit_job(&job, &progress.into())?; + JM.record_job_submitted(job.data.len()); + info!("Job {job_id} submitted and persisted"); Ok(job_id) } @@ -234,14 +237,16 @@ impl DaJobService { progress: &mut JobProgress, new_status: JobStatus, ) -> Result<()> { - let previous_status = progress.status.as_u8(); + let previous_status = progress.status.clone(); progress.status = new_status; progress.last_updated = get_timestamp(); let db_progress = progress.clone().into(); self.ledger_db - .upsert_progress(&db_progress, previous_status)?; + .upsert_progress(&db_progress, previous_status.as_u8())?; + + JM.record_status_update(&previous_status, progress); Ok(()) } From e5f55232452d4652f9e9cb760045e9188151c642 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Thu, 16 Oct 2025 09:41:53 +0100 Subject: [PATCH 37/81] Rename to da_job --- bin/citrea/tests/bitcoin/{bitcoin_job.rs => da_job.rs} | 0 bin/citrea/tests/bitcoin/mod.rs | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename bin/citrea/tests/bitcoin/{bitcoin_job.rs => da_job.rs} (100%) diff --git a/bin/citrea/tests/bitcoin/bitcoin_job.rs b/bin/citrea/tests/bitcoin/da_job.rs similarity index 100% rename from bin/citrea/tests/bitcoin/bitcoin_job.rs rename to bin/citrea/tests/bitcoin/da_job.rs diff --git a/bin/citrea/tests/bitcoin/mod.rs b/bin/citrea/tests/bitcoin/mod.rs index 33a776a668..587c8a7cc0 100644 --- a/bin/citrea/tests/bitcoin/mod.rs +++ b/bin/citrea/tests/bitcoin/mod.rs @@ -11,10 +11,10 @@ pub mod rollback; mod utils; // pub mod mempool_accept; pub mod backup; -pub mod bitcoin_job; pub mod bitcoin_service; pub mod bitcoin_test; pub mod bitcoin_verifier; +pub mod da_job; #[cfg(feature = "testing")] pub mod da_queue; pub mod fork; From cb8ba05348125fd4654b50e329887677a49a380d Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Thu, 16 Oct 2025 11:50:35 +0100 Subject: [PATCH 38/81] Da job id by prover job id index --- .../full-node/db/sov-db/src/ledger_db/mod.rs | 30 ++++++++++++++----- .../db/sov-db/src/ledger_db/traits.rs | 6 ++++ .../full-node/db/sov-db/src/schema/tables.rs | 6 ++++ 3 files changed, 35 insertions(+), 7 deletions(-) diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs index 72dc2ae32d..d772d8923a 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs @@ -17,13 +17,13 @@ use crate::rocks_db_config::RocksdbConfig; use crate::schema::tables::TestTableNew; use crate::schema::tables::{ CommitmentIndicesByJobId, CommitmentIndicesByL1, CommitmentMerkleRoots, CommitmentsByNumber, - DaJobById, DaJobProgressById, DaJobStatusIndex, ExecutedMigrations, JobIdOfCommitment, - L2BlockByHash, L2BlockByNumber, L2GenesisStateRoot, L2RangeByL1Height, L2StatusHeights, - LastPrunedBlock, LightClientProofBySlotNumber, MempoolTxs, PendingBonsaiSessionByJobId, - PendingL1SubmissionJobs, PendingProofs, PendingSequencerCommitments, ProofByJobId, - ProverLastScannedSlot, ProverPendingCommitments, ProverStateDiffs, SequencerCommitmentByIndex, - ShortHeaderProofBySlotHash, SlotByHash, StateDiffByBlockNumber, - VerifiedBatchProofsBySlotNumber, LEDGER_TABLES, + DaJobById, DaJobIdByProvingJobId, DaJobProgressById, DaJobStatusIndex, ExecutedMigrations, + JobIdOfCommitment, L2BlockByHash, L2BlockByNumber, L2GenesisStateRoot, L2RangeByL1Height, + L2StatusHeights, LastPrunedBlock, LightClientProofBySlotNumber, MempoolTxs, + PendingBonsaiSessionByJobId, PendingL1SubmissionJobs, PendingProofs, + PendingSequencerCommitments, ProofByJobId, ProverLastScannedSlot, ProverPendingCommitments, + ProverStateDiffs, SequencerCommitmentByIndex, ShortHeaderProofBySlotHash, SlotByHash, + StateDiffByBlockNumber, VerifiedBatchProofsBySlotNumber, LEDGER_TABLES, }; use crate::schema::types::batch_proof::{ StoredBatchProof, StoredBatchProofOutput, StoredVerifiedProof, @@ -691,6 +691,22 @@ impl BatchProverLedgerOps for LedgerDB { JobStatus::Proving } } + + fn set_proving_job_da_job_id( + &self, + proving_job_id: Uuid, + da_job_id: Uuid, + ) -> anyhow::Result<()> { + let mut schema_batch = SchemaBatch::new(); + + schema_batch.put::(&proving_job_id, &da_job_id)?; + + self.db.write_schemas(schema_batch) + } + + fn get_proving_job_da_job_id(&self, proving_job_id: Uuid) -> anyhow::Result> { + self.db.get::(&proving_job_id) + } } impl BonsaiLedgerOps for LedgerDB { diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs index 79e818c956..ffe09da55a 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs @@ -271,6 +271,12 @@ pub trait BatchProverLedgerOps: SharedLedgerOps + Send + Sync { /// Get job status (non-existent job IS RUNNING) fn job_status(&self, id: Uuid) -> JobStatus; + + /// Set a da job_id by prover job_id + fn set_proving_job_da_job_id(&self, proving_job_id: Uuid, da_job_id: Uuid) -> Result<()>; + + /// Get da job_id by prover job_id + fn get_proving_job_da_job_id(&self, proving_job_id: Uuid) -> Result>; } /// Light client prover ledger operations diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs index e510fc0950..c3269ae9b1 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs @@ -98,6 +98,7 @@ pub const BATCH_PROVER_LEDGER_TABLES: &[&str] = &[ CommitmentIndicesByJobId::table_name(), CommitmentIndicesByL1::table_name(), DaJobById::table_name(), + DaJobIdByProvingJobId::table_name(), DaJobProgressById::table_name(), DaJobStatusIndex::table_name(), ExecutedMigrations::table_name(), @@ -518,6 +519,11 @@ define_table_with_seek_key_codec!( (DaJobStatusIndex) (u8, Uuid) => () ); +define_table_with_seek_key_codec!( + /// DA job id by proving job id + (DaJobIdByProvingJobId) Uuid => Uuid +); + #[cfg(test)] define_table_with_seek_key_codec!( /// Test table old From 76abe0cb5c4f9f53e4cee147675e503a3dd3fb60 Mon Sep 17 00:00:00 2001 From: Rakan Al-Huneiti Date: Thu, 16 Oct 2025 22:36:20 +0300 Subject: [PATCH 39/81] feat: DA job rollback (#2984) --- .../src/rollback/node/batch_prover.rs | 59 +++++++++++++++++-- .../src/rollback/node/sequencer.rs | 38 +++++++++++- 2 files changed, 91 insertions(+), 6 deletions(-) diff --git a/crates/storage-ops/src/rollback/node/batch_prover.rs b/crates/storage-ops/src/rollback/node/batch_prover.rs index a9668836db..619683f5ed 100644 --- a/crates/storage-ops/src/rollback/node/batch_prover.rs +++ b/crates/storage-ops/src/rollback/node/batch_prover.rs @@ -2,10 +2,10 @@ use std::collections::HashMap; use std::sync::Arc; use sov_db::schema::tables::{ - CommitmentIndicesByJobId, CommitmentIndicesByL1, JobIdOfCommitment, L2BlockByHash, - L2BlockByNumber, PendingL1SubmissionJobs, ProofByJobId, ProverLastScannedSlot, - ProverPendingCommitments, ProverStateDiffs, SequencerCommitmentByIndex, - ShortHeaderProofBySlotHash, SlotByHash, + CommitmentIndicesByJobId, CommitmentIndicesByL1, DaJobById, DaJobIdByProvingJobId, + DaJobProgressById, DaJobStatusIndex, JobIdOfCommitment, L2BlockByHash, L2BlockByNumber, + PendingL1SubmissionJobs, ProofByJobId, ProverLastScannedSlot, ProverPendingCommitments, + ProverStateDiffs, SequencerCommitmentByIndex, ShortHeaderProofBySlotHash, SlotByHash, }; use sov_db::schema::types::{L2BlockNumber, SlotNumber}; use sov_schema_db::{ScanDirection, SchemaBatch, DB}; @@ -190,6 +190,54 @@ impl BatchProverLedgerRollback { Ok(cache) } + + fn rollback_da_jobs(&self, mut rollback_result: RollbackResult) -> Result { + let mut batch = SchemaBatch::new(); + + // Iterate through all jobs and delete them during rollback + let mut jobs_iter = self.ledger_db.iter_with_direction::( + Default::default(), + ScanDirection::Backward, + )?; + jobs_iter.seek_to_last(); + + for record in jobs_iter { + let record = record?; + let job_id = record.key; + let progress = record.value; + let status_u8 = progress.status.as_u8(); + + // Delete from all DA job tables + batch.delete::(&job_id)?; + increment_table_counter!("DaJobById", rollback_result); + + batch.delete::(&job_id)?; + increment_table_counter!("DaJobProgressById", rollback_result); + + batch.delete::(&(status_u8, job_id))?; + increment_table_counter!("DaJobStatusIndex", rollback_result); + } + + // Delete all entries from DaJobIdByProvingJobId (secondary index table) + let mut proving_job_iter = self + .ledger_db + .iter_with_direction::( + Default::default(), + ScanDirection::Backward, + )?; + proving_job_iter.seek_to_last(); + + for record in proving_job_iter { + let record = record?; + let proving_job_id = record.key; + + batch.delete::(&proving_job_id)?; + increment_table_counter!("DaJobIdByProvingJobId", rollback_result); + } + + self.ledger_db.write_schemas(batch)?; + Ok(rollback_result) + } } impl LedgerNodeRollback for BatchProverLedgerRollback { @@ -213,6 +261,9 @@ impl LedgerNodeRollback for BatchProverLedgerRollback { .put::(&(), &SlotNumber(l1_target)); } + // Rollback DA jobs + rollback_result = self.rollback_da_jobs(rollback_result)?; + let _ = self.ledger_db.flush(); Ok(rollback_result) } diff --git a/crates/storage-ops/src/rollback/node/sequencer.rs b/crates/storage-ops/src/rollback/node/sequencer.rs index 3b778c8e6b..d7ae777bf4 100644 --- a/crates/storage-ops/src/rollback/node/sequencer.rs +++ b/crates/storage-ops/src/rollback/node/sequencer.rs @@ -1,8 +1,8 @@ use std::sync::Arc; use sov_db::schema::tables::{ - CommitmentsByNumber, L2BlockByHash, L2BlockByNumber, L2RangeByL1Height, - SequencerCommitmentByIndex, StateDiffByBlockNumber, + CommitmentsByNumber, DaJobById, DaJobProgressById, DaJobStatusIndex, L2BlockByHash, + L2BlockByNumber, L2RangeByL1Height, SequencerCommitmentByIndex, StateDiffByBlockNumber, }; use sov_db::schema::types::{L2BlockNumber, SlotNumber}; use sov_schema_db::{ScanDirection, SchemaBatch, DB}; @@ -112,6 +112,37 @@ impl SequencerLedgerRollback { self.ledger_db.write_schemas(batch)?; Ok(rollback_result) } + + fn rollback_da_jobs(&self, mut rollback_result: RollbackResult) -> Result { + let mut batch = SchemaBatch::new(); + + // Iterate through all jobs and delete them during rollback + let mut jobs_iter = self.ledger_db.iter_with_direction::( + Default::default(), + ScanDirection::Backward, + )?; + jobs_iter.seek_to_last(); + + for record in jobs_iter { + let record = record?; + let job_id = record.key; + let progress = record.value; + let status_u8 = progress.status.as_u8(); + + // Delete from all three tables + batch.delete::(&job_id)?; + increment_table_counter!("DaJobById", rollback_result); + + batch.delete::(&job_id)?; + increment_table_counter!("DaJobProgressById", rollback_result); + + batch.delete::(&(status_u8, job_id))?; + increment_table_counter!("DaJobStatusIndex", rollback_result); + } + + self.ledger_db.write_schemas(batch)?; + Ok(rollback_result) + } } impl LedgerNodeRollback for SequencerLedgerRollback { @@ -131,6 +162,9 @@ impl LedgerNodeRollback for SequencerLedgerRollback { rollback_result = self.rollback_slots(l1_target, rollback_result)?; } + // Rollback DA jobs + rollback_result = self.rollback_da_jobs(rollback_result)?; + let _ = self.ledger_db.flush(); Ok(rollback_result) From 3a6323d471774717fa92f7d53e4eef766fb8863b Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Thu, 16 Oct 2025 20:43:47 +0100 Subject: [PATCH 40/81] Handle finalize_proving_job --- bin/citrea/tests/bitcoin/batch_prover_test.rs | 2 +- bin/citrea/tests/bitcoin/da_job.rs | 165 +++++++++--------- bin/citrea/tests/bitcoin/da_queue.rs | 25 ++- bin/citrea/tests/bitcoin/full_node.rs | 73 ++++---- bin/citrea/tests/bitcoin/light_client_test.rs | 102 +++++------ .../tests/bitcoin/sequencer_commitments.rs | 5 +- bin/citrea/tests/bitcoin/utils.rs | 22 +-- crates/batch-prover/src/prover.rs | 90 +++++++--- crates/batch-prover/src/rpc.rs | 8 +- .../src/helpers/builders/body_builders.rs | 2 +- crates/bitcoin-da/src/job/service.rs | 62 ++++++- crates/bitcoin-da/src/service.rs | 85 ++++----- crates/bitcoin-da/src/test_utils.rs | 10 ++ crates/prover-services/src/parallel.rs | 24 ++- crates/sequencer/src/commitment/service.rs | 9 +- .../adapters/mock-da/src/service.rs | 31 ++-- .../rollup-interface/src/node/services/da.rs | 27 +-- 17 files changed, 444 insertions(+), 298 deletions(-) diff --git a/bin/citrea/tests/bitcoin/batch_prover_test.rs b/bin/citrea/tests/bitcoin/batch_prover_test.rs index f72137fb2b..7ffb745aba 100644 --- a/bin/citrea/tests/bitcoin/batch_prover_test.rs +++ b/bin/citrea/tests/bitcoin/batch_prover_test.rs @@ -311,7 +311,7 @@ async fn basic_prover_test() -> Result<()> { // // Send the same commitment that was already proven. // bitcoin_da_service -// .send_transaction( +// .send_transaction_and_wait( // DaTxRequest::SequencerCommitment(commitments.first().unwrap().clone()), // 1, // ) diff --git a/bin/citrea/tests/bitcoin/da_job.rs b/bin/citrea/tests/bitcoin/da_job.rs index 4c92d1940f..d78407ccc5 100644 --- a/bin/citrea/tests/bitcoin/da_job.rs +++ b/bin/citrea/tests/bitcoin/da_job.rs @@ -4,7 +4,7 @@ use std::time::Duration; use alloy_primitives::{U32, U64}; use async_trait::async_trait; use bitcoin::hashes::Hash; -use bitcoin_da::job::rpc::{DaJobRpcClient, JobInfoResponse, JobStatusFilter, RetryJobResponse}; +use bitcoin_da::job::rpc::{DaJobRpcClient, JobInfoResponse, JobStatusFilter}; use bitcoin_da::job::service::JobStatus; use bitcoin_da::service::BitcoinService; use bitcoincore_rpc::RpcApi; @@ -19,7 +19,6 @@ use reth_tasks::TaskManager; use sov_ledger_rpc::LedgerRpcClient; use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; use sov_rollup_interface::services::da::DaService; -use tokio::time::sleep; use super::get_citrea_path; use crate::bitcoin::full_node::create_serialized_fake_receipt_batch_proof_with_state_roots; @@ -65,7 +64,7 @@ impl JobServiceTest { assert!(all_jobs.is_empty()); let job_id = da_service - .send_transaction(DaTxRequest::ZKProof(proof)) + .send_transaction_and_wait(DaTxRequest::ZKProof(proof)) .await?; da.wait_mempool_len(2, None).await?; @@ -127,7 +126,7 @@ impl JobServiceTest { None, ); - let job_id = da_service + let (job_id, rx) = da_service .send_transaction(DaTxRequest::ZKProof(proof.clone())) .await?; @@ -152,43 +151,43 @@ impl JobServiceTest { da.generate(1).await?; // Make sure job doesn't get processed after freeing space in mempool - let res = da_service - .wait_for_completion(job_id, Some(Duration::from_secs(5))) - .await; + let res = rx.await.unwrap(); assert!(res.is_err()); - let retry_job_response: RetryJobResponse = da_service_client.da_job_retry(job_id).await?; + // // TODO find a way to deterministically wait for retry - let old_job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; - assert_eq!(old_job_by_id.status, JobStatus::Cancelled); + // let retry_job_response: RetryJobResponse = da_service_client.da_job_retry(job_id).await?; - let new_job_by_id: JobInfoResponse = da_service_client - .da_job_get_info(retry_job_response.new_job_id) - .await?; - assert_eq!(new_job_by_id.status, JobStatus::Pending); - da.generate(1).await?; + // let old_job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + // assert_eq!(old_job_by_id.status, JobStatus::Cancelled); - // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit - // The three first proofs should hit the mempool + 1 chunk - da.wait_mempool_len(18, None).await?; + // let new_job_by_id: JobInfoResponse = da_service_client + // .da_job_get_info(retry_job_response.new_job_id) + // .await?; + // assert_eq!(new_job_by_id.status, JobStatus::Pending); + // da.generate(1).await?; - assert_eq!(da.get_raw_mempool().await?.len(), 18); + // // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit + // // The three first proofs should hit the mempool + 1 chunk + // da.wait_mempool_len(18, None).await?; - let new_job_by_id: JobInfoResponse = da_service_client - .da_job_get_info(retry_job_response.new_job_id) - .await?; - assert_eq!(new_job_by_id.status, JobStatus::InProgress); - da.generate(1).await?; + // assert_eq!(da.get_raw_mempool().await?.len(), 18); - let res = da_service - .wait_for_completion(retry_job_response.new_job_id, None) - .await; - assert!(res.is_ok()); + // let new_job_by_id: JobInfoResponse = da_service_client + // .da_job_get_info(retry_job_response.new_job_id) + // .await?; + // assert_eq!(new_job_by_id.status, JobStatus::InProgress); + // da.generate(1).await?; - let new_job_by_id: JobInfoResponse = da_service_client - .da_job_get_info(retry_job_response.new_job_id) - .await?; - assert_eq!(new_job_by_id.status, JobStatus::Completed); + // let res = da_service + // .wait_for_completion(retry_job_response.new_job_id, None) + // .await; + // assert!(res.is_ok()); + + // let new_job_by_id: JobInfoResponse = da_service_client + // .da_job_get_info(retry_job_response.new_job_id) + // .await?; + // assert_eq!(new_job_by_id.status, JobStatus::Completed); Ok(()) } @@ -222,7 +221,7 @@ impl JobServiceTest { ); // Create multiple jobs to check list handling - let job_id_1 = da_service + let (_, rx) = da_service .send_transaction(DaTxRequest::ZKProof(proof.clone())) .await?; @@ -279,14 +278,14 @@ impl JobServiceTest { // Mine all sent txs da.generate(1).await?; - let res = da_service.wait_for_completion(job_id_1, None).await; + let res = rx.await.unwrap(); assert!(res.is_ok()); // Verify completed jobs let completed_jobs = da_service_client .da_job_list(Some(JobStatusFilter::Completed), None, None) .await?; - assert_eq!(completed_jobs.len(), 3); + assert_eq!(completed_jobs.len(), 2); Ok(()) } @@ -317,7 +316,7 @@ impl JobServiceTest { None, ); - let job_id = da_service + let (job_id, _) = da_service .send_transaction(DaTxRequest::ZKProof(proof)) .await?; @@ -335,52 +334,54 @@ impl JobServiceTest { assert_eq!(active_jobs_before.len(), 1); assert_eq!(active_jobs_before[0].job_id, job_id); - // Send graceful shutdown to da_service and drop da_service - drop(da_service); - drop(da_service_client); - self.task_manager.take().unwrap().graceful_shutdown(); - sleep(Duration::from_secs(5)).await; - - // Create a new task_manager as previous was consumed - self.task_manager = Some(TaskManager::current()); - let task_executor = self.task_manager.as_ref().unwrap().executor(); - - let (da_service, da_service_client) = spawn_bitcoin_da_prover_service_with_rpc_server( - &task_executor, - &da.config, - Self::test_config().dir, - ) - .await; - - let job_after: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; - - assert_eq!(job_after.job_id, job_before.job_id); - assert_eq!(job_after.status, job_before.status); - assert_eq!(job_after.created_at, job_before.created_at); - assert_eq!(job_after.sent_count, job_before.sent_count); - - let active_jobs_after = da_service_client - .da_job_list(Some(JobStatusFilter::Active), None, None) - .await?; - assert_eq!(active_jobs_after.len(), 1); - assert_eq!(active_jobs_after[0].job_id, job_id); - assert_eq!(active_jobs_after[0].status, JobStatus::InProgress); - - da.generate(1).await?; - - da.wait_mempool_len(6, None).await?; - let res = da_service.wait_for_completion(job_id, None).await; - assert!(res.is_ok()); - - let completed_job: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; - assert_eq!(completed_job.status, JobStatus::Completed); - assert_eq!(completed_job.created_at, job_before.created_at); - assert_eq!(completed_job.error, None); - - let active_jobs_final = da_service_client - .da_job_list(Some(JobStatusFilter::Active), None, None) - .await?; - assert_eq!(active_jobs_final.len(), 0); + // TODO handle proper recovery + + // // Send graceful shutdown to da_service and drop da_service + // drop(da_service); + // drop(da_service_client); + // self.task_manager.take().unwrap().graceful_shutdown(); + // sleep(Duration::from_secs(5)).await; + + // // Create a new task_manager as previous was consumed + // self.task_manager = Some(TaskManager::current()); + // let task_executor = self.task_manager.as_ref().unwrap().executor(); + + // let (da_service, da_service_client) = spawn_bitcoin_da_prover_service_with_rpc_server( + // &task_executor, + // &da.config, + // Self::test_config().dir, + // ) + // .await; + + // let job_after: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + + // assert_eq!(job_after.job_id, job_before.job_id); + // assert_eq!(job_after.status, job_before.status); + // assert_eq!(job_after.created_at, job_before.created_at); + // assert_eq!(job_after.sent_count, job_before.sent_count); + + // let active_jobs_after = da_service_client + // .da_job_list(Some(JobStatusFilter::Active), None, None) + // .await?; + // assert_eq!(active_jobs_after.len(), 1); + // assert_eq!(active_jobs_after[0].job_id, job_id); + // assert_eq!(active_jobs_after[0].status, JobStatus::InProgress); + + // da.generate(1).await?; + + // da.wait_mempool_len(6, None).await?; + // let res = da_service.wait_for_completion(job_id, None).await; + // assert!(res.is_ok()); + + // let completed_job: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + // assert_eq!(completed_job.status, JobStatus::Completed); + // assert_eq!(completed_job.created_at, job_before.created_at); + // assert_eq!(completed_job.error, None); + + // let active_jobs_final = da_service_client + // .da_job_list(Some(JobStatusFilter::Active), None, None) + // .await?; + // assert_eq!(active_jobs_final.len(), 0); Ok(()) } diff --git a/bin/citrea/tests/bitcoin/da_queue.rs b/bin/citrea/tests/bitcoin/da_queue.rs index 26c4311f72..9694d53a76 100644 --- a/bin/citrea/tests/bitcoin/da_queue.rs +++ b/bin/citrea/tests/bitcoin/da_queue.rs @@ -63,7 +63,9 @@ impl DaTransactionQueueingTest { // Fill mempool for i in 1..=3 { da_service - .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) + .send_transaction_and_wait(DaTxRequest::ZKProof( + verifiable_100kb_batch_proof.clone(), + )) .await?; da.wait_mempool_len(8 * i, None).await?; } @@ -85,7 +87,7 @@ impl DaTransactionQueueingTest { // Try to send when queue is already filled up. // This is to test that utxos is correctly selected and that it's doesn't hang on waiting for list of queued txids to be returned let res = da_service - .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await; assert!(matches!( @@ -110,7 +112,7 @@ impl DaTransactionQueueingTest { tokio::time::sleep(std::time::Duration::from_secs(3)).await; // Send additional proof and make sure it doesn't hit PreviousJobInProgress error let res = da_service - .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await; assert!(res.is_ok()); @@ -421,21 +423,29 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { // Fill mempool for i in 1..=3 { + println!("i : {:?}", i); da_service - .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) + .send_transaction_and_wait(DaTxRequest::ZKProof( + verifiable_100kb_batch_proof.clone(), + )) .await?; + da.wait_mempool_len(8 * i, None).await?; } - da_service + println!("11"); + + let (job_id, rx) = da_service .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await?; + println!("22"); // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit // The three first proofs should hit the mempool + 1 chunk da.wait_mempool_len(8 * 3 + 2, None).await?; assert_eq!(da.get_raw_mempool().await?.len(), 26); + println!("33"); // Assert that all sent txs are monitored let monitored_txs = da_service.monitoring.get_monitored_txs().await; assert_eq!(monitored_txs.len(), 26); @@ -446,6 +456,7 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await; + println!("44"); assert!(res.is_ok()); // Txs starting from a new chain should be accepted to mempool @@ -454,6 +465,7 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { let monitored_txs = da_service.monitoring.get_monitored_txs().await; assert_eq!(monitored_txs.len(), 34); + println!("55"); // We mine the first three proofs + the 1 chunk pair + the extra proof starting another UTXO chain // and make sure that the remaining chunks and aggregate and sent on next block when mempool size is freed // Assert that all chunks were mined and mempool space is freed @@ -467,6 +479,7 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { assert_eq!(relevant_txs.len(), 17); + println!("66"); // Remaining chunks and aggregate da.wait_mempool_len(6, None).await?; assert_eq!(da.get_raw_mempool().await?.len(), 6); @@ -711,6 +724,7 @@ impl TestCase for DaTransactionQueueingUtxoSelectionModeOldestTest { .header .state_root; + println!("1"); self.test_package_mempool_limits( da, &da_service, @@ -722,6 +736,7 @@ impl TestCase for DaTransactionQueueingUtxoSelectionModeOldestTest { ) .await?; + println!("2"); self.test_package_too_large( da, &da_service, diff --git a/bin/citrea/tests/bitcoin/full_node.rs b/bin/citrea/tests/bitcoin/full_node.rs index 11ed882b04..092edaca56 100644 --- a/bin/citrea/tests/bitcoin/full_node.rs +++ b/bin/citrea/tests/bitcoin/full_node.rs @@ -23,7 +23,6 @@ use sov_ledger_rpc::LedgerRpcClient; use sov_modules_api::BatchProofCircuitOutputV3; use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; use sov_rollup_interface::rpc::block::L2BlockResponse; -use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::zk::batch_proof::output::{BatchProofCircuitOutput, CumulativeStateDiff}; use tokio::time::sleep; @@ -157,7 +156,7 @@ impl TestCase for PreStateRootMismatchTest { // Send the first proof prover_da_service - .send_transaction(DaTxRequest::ZKProof(proof)) + .send_transaction_and_wait(DaTxRequest::ZKProof(proof)) .await .unwrap(); @@ -229,7 +228,7 @@ impl TestCase for PreStateRootMismatchTest { // Send the invalid proof prover_da_service - .send_transaction(DaTxRequest::ZKProof(invalid_proof)) + .send_transaction_and_wait(DaTxRequest::ZKProof(invalid_proof)) .await .unwrap(); @@ -375,7 +374,7 @@ impl TestCase for SequencerCommitmentHashMismatchTest { // Send the `correct_commitment` so it's stored and will trigger the pre-hash mismatch against `wrong_commitment` sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(correct_commitment.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(correct_commitment.clone())) .await .unwrap(); @@ -439,7 +438,7 @@ impl TestCase for SequencerCommitmentHashMismatchTest { None, ); prover_da_service - .send_transaction(DaTxRequest::ZKProof(fake_proof)) + .send_transaction_and_wait(DaTxRequest::ZKProof(fake_proof)) .await .unwrap(); @@ -535,7 +534,7 @@ impl TestCase for PendingCommitmentHaltingErrorTest { }; bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( wrong_merkle_root_commitment.clone(), )) .await @@ -998,7 +997,7 @@ impl TestCase for OutOfOrderCommitmentsTest { // Send the zero index commitment first, should be ignored bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( zero_index_commitment.clone(), )) .await @@ -1023,7 +1022,7 @@ impl TestCase for OutOfOrderCommitmentsTest { // Send the second commitment first bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment(second_commitment.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(second_commitment.clone())) .await .unwrap(); @@ -1047,7 +1046,7 @@ impl TestCase for OutOfOrderCommitmentsTest { // Send the first commitment bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment(first_commitment.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(first_commitment.clone())) .await .unwrap(); @@ -1177,7 +1176,7 @@ impl TestCase for ConflictingCommitmentsTest { // Send commitment A bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment_a.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment_a.clone())) .await .unwrap(); @@ -1199,7 +1198,7 @@ impl TestCase for ConflictingCommitmentsTest { // Send conflicting commitment with different merkle root, should be ignored bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( conflicting_commitment_different_root.clone(), )) .await @@ -1224,7 +1223,7 @@ impl TestCase for ConflictingCommitmentsTest { // Send conflicting commitment B bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment_b.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment_b.clone())) .await .unwrap(); @@ -1269,7 +1268,7 @@ impl TestCase for ConflictingCommitmentsTest { // Send commitment C that follows A bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment_c.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment_c.clone())) .await .unwrap(); @@ -1564,7 +1563,7 @@ impl TestCase for OutOfRangeProofTest { // Send the proof first. It should be discard as none of its commitments exist prover_da_service - .send_transaction(DaTxRequest::ZKProof(proof1.clone())) + .send_transaction_and_wait(DaTxRequest::ZKProof(proof1.clone())) .await .unwrap(); @@ -1585,7 +1584,7 @@ impl TestCase for OutOfRangeProofTest { ); sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment1.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment1.clone())) .await .unwrap(); @@ -1614,7 +1613,7 @@ impl TestCase for OutOfRangeProofTest { assert!(proven_height.is_none(), "Proof should have been discarded"); sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment2.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment2.clone())) .await .unwrap(); @@ -1687,12 +1686,12 @@ impl TestCase for OutOfRangeProofTest { full_node.start(None, None).await?; sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment1.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment1.clone())) .await .unwrap(); sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment2.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment2.clone())) .await .unwrap(); @@ -1715,7 +1714,7 @@ impl TestCase for OutOfRangeProofTest { // Send the proof first. It should be processed as its commitments exist prover_da_service - .send_transaction(DaTxRequest::ZKProof(proof1)) + .send_transaction_and_wait(DaTxRequest::ZKProof(proof1)) .await .unwrap(); @@ -1737,12 +1736,12 @@ impl TestCase for OutOfRangeProofTest { // Send commitments for proof 2 and proof 3 sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment3.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment3.clone())) .await .unwrap(); sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment4.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment4.clone())) .await .unwrap(); @@ -1795,7 +1794,7 @@ impl TestCase for OutOfRangeProofTest { ); // Send the third proof first. It should be set as pending as its commitments exist but it's starting commitment index is not proven proof last commitment index + 1 prover_da_service - .send_transaction(DaTxRequest::ZKProof(proof3)) + .send_transaction_and_wait(DaTxRequest::ZKProof(proof3)) .await .unwrap(); @@ -1854,7 +1853,7 @@ impl TestCase for OutOfRangeProofTest { // Now send the second proof. It should be processed and trigger a processing of pending proof3 prover_da_service - .send_transaction(DaTxRequest::ZKProof(proof2)) + .send_transaction_and_wait(DaTxRequest::ZKProof(proof2)) .await .unwrap(); @@ -2090,7 +2089,7 @@ impl TestCase for OverlappingProofRangesTest { full_node.start(None, None).await?; sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment1.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment1.clone())) .await .unwrap(); @@ -2121,12 +2120,12 @@ impl TestCase for OverlappingProofRangesTest { .state_root; sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment2.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment2.clone())) .await .unwrap(); sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment3.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment3.clone())) .await .unwrap(); @@ -2229,22 +2228,22 @@ impl TestCase for OverlappingProofRangesTest { // Send all 4 commitments in order sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment1.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment1.clone())) .await .unwrap(); sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment2.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment2.clone())) .await .unwrap(); sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment3.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment3.clone())) .await .unwrap(); sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment4.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment4.clone())) .await .unwrap(); @@ -2290,7 +2289,7 @@ impl TestCase for OverlappingProofRangesTest { // Send proof_a over commitments [1,2,3] prover_da_service - .send_transaction(DaTxRequest::ZKProof(proof_a.clone())) + .send_transaction_and_wait(DaTxRequest::ZKProof(proof_a.clone())) .await .unwrap(); @@ -2377,7 +2376,7 @@ impl TestCase for OverlappingProofRangesTest { // Send proof_b with overlapping range of [2,3,4] prover_da_service - .send_transaction(DaTxRequest::ZKProof(proof_b.clone())) + .send_transaction_and_wait(DaTxRequest::ZKProof(proof_b.clone())) .await .unwrap(); @@ -2582,7 +2581,7 @@ impl TestCase for UnsyncedCommitmentL2RangeTest { }; sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment_1.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment_1.clone())) .await .unwrap(); @@ -2625,7 +2624,7 @@ impl TestCase for UnsyncedCommitmentL2RangeTest { /*------- */ sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment_2.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment_2.clone())) .await .unwrap(); @@ -2667,7 +2666,7 @@ impl TestCase for UnsyncedCommitmentL2RangeTest { /*------- */ sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment_3.clone())) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment_3.clone())) .await .unwrap(); @@ -3670,7 +3669,7 @@ impl TestCase for FullNodeL1SyncHaltOnMerkleRootMismatch { .await; sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment(correct_commitment)) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(correct_commitment)) .await .unwrap(); @@ -3695,7 +3694,7 @@ impl TestCase for FullNodeL1SyncHaltOnMerkleRootMismatch { }; sequencer_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( wrong_merkle_root_commitment, )) .await diff --git a/bin/citrea/tests/bitcoin/light_client_test.rs b/bin/citrea/tests/bitcoin/light_client_test.rs index 9488ebc38e..8a22f4aa70 100644 --- a/bin/citrea/tests/bitcoin/light_client_test.rs +++ b/bin/citrea/tests/bitcoin/light_client_test.rs @@ -680,7 +680,7 @@ impl TestCase for LightClientBatchProofMethodIdUpdateTest { let signatures_with_index = create_valid_signatures(&signers, &prehash); bitcoin_da_service - .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { body: method_id_body, signatures_with_index, })) @@ -905,7 +905,7 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { let signatures_with_index = create_valid_signatures(&signers, &prehash); bitcoin_da_service - .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { body: method_id_body.clone(), signatures_with_index, })) @@ -943,7 +943,7 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { signatures_with_index[0].0[0] ^= 0xFF; bitcoin_da_service - .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { body: method_id_body2.clone(), signatures_with_index, })) @@ -981,7 +981,7 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { // Corrupt one signature signatures_with_index[0].1 = signatures_with_index[2].1; bitcoin_da_service - .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { body: method_id_body3.clone(), signatures_with_index, })) @@ -1018,7 +1018,7 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { // Corrupt one signature signatures_with_index[2].1 = 5; // out of bounds bitcoin_da_service - .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { body: method_id_body3.clone(), signatures_with_index, })) @@ -1059,7 +1059,7 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { signatures_with_index[2].1 = tmp; bitcoin_da_service - .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { body: method_id_body3.clone(), signatures_with_index, })) @@ -1092,7 +1092,7 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { let prehash4 = eip191_hash_message(msg4.as_slice()); let signatures_with_index = create_valid_signatures(&signers, &prehash4); bitcoin_da_service - .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { body: method_id_body4.clone(), signatures_with_index, })) @@ -1129,7 +1129,7 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { signatures_with_index.swap(0, 2); bitcoin_da_service - .send_transaction(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { body: method_id_body5.clone(), signatures_with_index, })) @@ -1247,7 +1247,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment.clone(), )) .await @@ -1260,7 +1260,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment_2.clone(), )) .await @@ -1273,7 +1273,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment_3.clone(), )) .await @@ -1286,7 +1286,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment_4.clone(), )) .await @@ -1308,7 +1308,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { None, ); let _ = bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(verifiable_batch_proof)) + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_batch_proof)) .await .unwrap(); @@ -1323,7 +1323,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { Some(fake_sequencer_commitment_2.serialize_and_calculate_sha_256()), ); let _ = bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(verifiable_batch_proof)) + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_batch_proof)) .await .unwrap(); @@ -1339,7 +1339,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { Some(fake_sequencer_commitment_3.serialize_and_calculate_sha_256()), ); let _ = bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(unparsable_batch_proof)) + .send_transaction_and_wait(DaTxRequest::ZKProof(unparsable_batch_proof)) .await .unwrap(); @@ -1354,7 +1354,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { Some(fake_sequencer_commitment.serialize_and_calculate_sha_256()), ); let _ = bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(verifiable_batch_proof)) + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_batch_proof)) .await .unwrap(); @@ -1371,7 +1371,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { Some(fake_sequencer_commitment_3.serialize_and_calculate_sha_256()), ); let _ = bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(unverifiable_batch_proof)) + .send_transaction_and_wait(DaTxRequest::ZKProof(unverifiable_batch_proof)) .await .unwrap(); @@ -1479,7 +1479,7 @@ impl TestCase for VerifyChunkedTxsInLightClient { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment.clone(), )) .await @@ -1492,7 +1492,7 @@ impl TestCase for VerifyChunkedTxsInLightClient { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment2.clone(), )) .await @@ -1505,7 +1505,7 @@ impl TestCase for VerifyChunkedTxsInLightClient { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment3.clone(), )) .await @@ -1560,7 +1560,7 @@ impl TestCase for VerifyChunkedTxsInLightClient { ); let _ = bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof)) + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_100kb_batch_proof)) .await .unwrap(); @@ -1621,7 +1621,7 @@ impl TestCase for VerifyChunkedTxsInLightClient { ); let _ = bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(verifiable_130kb_batch_proof)) + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_130kb_batch_proof)) .await .unwrap(); @@ -1733,7 +1733,7 @@ impl TestCase for VerifyChunkedTxsInLightClient { Some(fake_sequencer_commitment2.serialize_and_calculate_sha_256()), ); let _ = bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(unverifiable_100kb_batch_proof)) + .send_transaction_and_wait(DaTxRequest::ZKProof(unverifiable_100kb_batch_proof)) .await .unwrap(); @@ -1848,7 +1848,7 @@ impl TestCase for UnchainedBatchProofsTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment.clone(), )) .await @@ -1861,7 +1861,7 @@ impl TestCase for UnchainedBatchProofsTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment2.clone(), )) .await @@ -1874,7 +1874,7 @@ impl TestCase for UnchainedBatchProofsTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment3.clone(), )) .await @@ -1887,7 +1887,7 @@ impl TestCase for UnchainedBatchProofsTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment4.clone(), )) .await @@ -1968,17 +1968,17 @@ impl TestCase for UnchainedBatchProofsTest { ); bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(bp1)) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp1)) .await .unwrap(); bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(bp2)) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp2)) .await .unwrap(); bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(bp3)) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp3)) .await .unwrap(); @@ -2006,7 +2006,7 @@ impl TestCase for UnchainedBatchProofsTest { assert_eq!(lcp_output.last_sequencer_commitment_index, U32::from(1)); bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(bp4)) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp4)) .await .unwrap(); @@ -2104,7 +2104,7 @@ impl TestCase for UnknownL1HashBatchProofTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment.clone(), )) .await @@ -2149,7 +2149,7 @@ impl TestCase for UnknownL1HashBatchProofTest { ); bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(bp)) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp)) .await .unwrap(); @@ -2252,7 +2252,7 @@ impl TestCase for ChainProofByCommitmentIndex { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment.clone(), )) .await @@ -2265,7 +2265,7 @@ impl TestCase for ChainProofByCommitmentIndex { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment2.clone(), )) .await @@ -2278,7 +2278,7 @@ impl TestCase for ChainProofByCommitmentIndex { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment3.clone(), )) .await @@ -2333,7 +2333,7 @@ impl TestCase for ChainProofByCommitmentIndex { ); bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(bp)) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp)) .await .unwrap(); @@ -2352,7 +2352,7 @@ impl TestCase for ChainProofByCommitmentIndex { ); bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(bp)) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp)) .await .unwrap(); @@ -2497,7 +2497,7 @@ impl TestCase for ProofWithMissingCommitment { ); bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(bp)) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp)) .await .unwrap(); @@ -2617,7 +2617,7 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { }; let _ = malicious_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment.clone(), )) .await @@ -2661,7 +2661,7 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { ); batch_prover_bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(bp1)) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp1)) .await .unwrap(); @@ -2691,7 +2691,7 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { // Now send with the correct da service let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment.clone(), )) .await @@ -2720,7 +2720,7 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { ); batch_prover_bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(bp1)) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp1)) .await .unwrap(); @@ -2757,7 +2757,7 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { // Now send with the correct da service let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment2.clone(), )) .await @@ -2786,7 +2786,7 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { ); malicious_bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(bp2.clone())) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp2.clone())) .await .unwrap(); @@ -2816,7 +2816,7 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { // Now send batch proof with the correct da pub key and expect it to transition batch_prover_bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(bp2.clone())) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp2.clone())) .await .unwrap(); @@ -2947,7 +2947,7 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment.clone(), )) .await @@ -2960,7 +2960,7 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { }; let _ = sequencer_bitcoin_da_service - .send_transaction(DaTxRequest::SequencerCommitment( + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( fake_sequencer_commitment_2.clone(), )) .await @@ -2982,7 +2982,7 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { None, ); let _ = batch_prover_bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(verifiable_batch_proof)) + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_batch_proof)) .await .unwrap(); @@ -3021,7 +3021,7 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { ), ); let _ = batch_prover_bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(wrong_prev_hash_batch_proof)) + .send_transaction_and_wait(DaTxRequest::ZKProof(wrong_prev_hash_batch_proof)) .await .unwrap(); @@ -3053,7 +3053,7 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { Some(fake_sequencer_commitment.serialize_and_calculate_sha_256()), ); let _ = batch_prover_bitcoin_da_service - .send_transaction(DaTxRequest::ZKProof(correct_prev_hash_proof)) + .send_transaction_and_wait(DaTxRequest::ZKProof(correct_prev_hash_proof)) .await .unwrap(); diff --git a/bin/citrea/tests/bitcoin/sequencer_commitments.rs b/bin/citrea/tests/bitcoin/sequencer_commitments.rs index 175e94f96b..2efe1ed337 100644 --- a/bin/citrea/tests/bitcoin/sequencer_commitments.rs +++ b/bin/citrea/tests/bitcoin/sequencer_commitments.rs @@ -22,7 +22,6 @@ use rs_merkle::MerkleTree; use sov_ledger_rpc::LedgerRpcClient; use sov_rollup_interface::da::{BlobReaderTrait, DaTxRequest, DataOnDa, SequencerCommitment}; use sov_rollup_interface::rpc::SequencerCommitmentResponse; -use sov_rollup_interface::services::da::DaService; use tokio::time::sleep; use super::get_citrea_path; @@ -371,7 +370,7 @@ impl TestCase for SequencerCommitmentsFromDaTest { index: 1, }; da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment)) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment)) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -384,7 +383,7 @@ impl TestCase for SequencerCommitmentsFromDaTest { index: 2, }; da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment)) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment)) .await .unwrap(); // Restart sequencer, it should fetch commitment with index 1 and 2 diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index 0350dca788..dcfb416170 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -584,7 +584,7 @@ pub async fn generate_mock_txs( valid_method_ids.push(method_id.clone()); da_service - .send_transaction(DaTxRequest::BatchProofMethodId(method_id)) + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(method_id)) .await .expect("Failed to send transaction"); @@ -596,7 +596,7 @@ pub async fn generate_mock_txs( seq_index += 1; valid_commitments.push(commitment.clone()); da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment)) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment)) .await .expect("Failed to send transaction"); @@ -608,7 +608,7 @@ pub async fn generate_mock_txs( seq_index += 1; valid_commitments.push(commitment.clone()); da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment)) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment)) .await .expect("Failed to send transaction"); @@ -617,7 +617,7 @@ pub async fn generate_mock_txs( valid_proofs.push(blob.clone()); da_service - .send_transaction(DaTxRequest::ZKProof(blob)) + .send_transaction_and_wait(DaTxRequest::ZKProof(blob)) .await .expect("Failed to send transaction"); @@ -627,13 +627,13 @@ pub async fn generate_mock_txs( valid_proofs.push(blob.clone()); da_service - .send_transaction(DaTxRequest::ZKProof(blob)) + .send_transaction_and_wait(DaTxRequest::ZKProof(blob)) .await .expect("Failed to send transaction"); // Sequencer commitment with wrong tx prefix wrong_prefix_da_service - .send_transaction(DaTxRequest::SequencerCommitment(SequencerCommitment { + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(SequencerCommitment { merkle_root: [15; 32], index: seq_index, l2_end_block_number: 1268, @@ -646,13 +646,13 @@ pub async fn generate_mock_txs( valid_proofs.push(blob.clone()); da_service - .send_transaction(DaTxRequest::ZKProof(blob)) + .send_transaction_and_wait(DaTxRequest::ZKProof(blob)) .await .expect("Failed to send transaction"); // Sequencer commitment with wrong key and signature wrong_key_da_service - .send_transaction(DaTxRequest::SequencerCommitment(SequencerCommitment { + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(SequencerCommitment { merkle_root: [15; 32], index: seq_index, l2_end_block_number: 1268, @@ -667,7 +667,7 @@ pub async fn generate_mock_txs( }; valid_commitments.push(commitment.clone()); da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment)) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment)) .await .expect("Failed to send transaction"); @@ -677,7 +677,7 @@ pub async fn generate_mock_txs( valid_proofs.push(blob.clone()); da_service - .send_transaction(DaTxRequest::ZKProof(blob)) + .send_transaction_and_wait(DaTxRequest::ZKProof(blob)) .await .expect("Failed to send transaction"); @@ -704,7 +704,7 @@ pub async fn generate_mock_txs( }; valid_method_ids.push(method_id.clone()); da_service - .send_transaction(DaTxRequest::BatchProofMethodId(method_id)) + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(method_id)) .await .expect("Failed to send transaction"); diff --git a/crates/batch-prover/src/prover.rs b/crates/batch-prover/src/prover.rs index 1e0845ad15..16d823fe9b 100644 --- a/crates/batch-prover/src/prover.rs +++ b/crates/batch-prover/src/prover.rs @@ -164,9 +164,12 @@ where /// * `shutdown_signal` - A signal to gracefully shut down the prover service #[instrument(name = "BatchProver", skip_all)] pub async fn run(mut self, mut shutdown_signal: GracefulShutdown) { + println!("recovering session"); self.recover_proving_sessions(self.prover_config.enable_recovery) .await; + println!("recovered proving session"); + 'run_loop: loop { select! { biased; @@ -706,15 +709,15 @@ where // start watching the proving jobs to finish in the background tokio::spawn(async move { - while let Some((job_id, rx)) = proving_jobs.recv().await { + while let Some((proving_job_id, rx)) = proving_jobs.recv().await { let proof_with_duration = rx.await.expect("Proof channel should never close"); info!( "Proving job finished {}, took {:?} seconds", - job_id, proof_with_duration.duration + proving_job_id, proof_with_duration.duration ); let output = extract_proof_output::( - &job_id, + &proving_job_id, &proof_with_duration.proof, &code_commitments_by_spec, network, @@ -722,7 +725,11 @@ where // stores proof and marks job as waiting for da ledger_db - .put_proof_by_job_id(job_id, proof_with_duration.proof.clone(), output.into()) + .put_proof_by_job_id( + proving_job_id, + proof_with_duration.proof.clone(), + output.into(), + ) .expect("Should put proof to db"); // Record the proving time metric @@ -735,16 +742,29 @@ where // submit the proof to the DA service in the background tokio::spawn(async move { - let txid = prover_service + let (da_job_id, rx) = prover_service .submit_proof(proof_with_duration.proof) .await .expect("Failed to submit proof"); - info!("Job {} proof submitted to DA", job_id); + println!( + "Job {} proof submitted to DA. Da job id {da_job_id}", + proving_job_id + ); + + ledger_db + .set_proving_job_da_job_id(proving_job_id, da_job_id) + .expect("Failed to save da job by id"); + + // Todo handle da job sending failure + let txid = rx + .await + .expect("Da job channel should never close") + .unwrap(); - // // stores tx id and removes job from pending da submission + // stores tx id and removes job from pending da submission ledger_db - .finalize_proving_job(job_id, txid.into()) + .finalize_proving_job(proving_job_id, txid.into()) .expect("Should update proving job tx id"); }); } @@ -805,6 +825,7 @@ where .ledger_db .get_pending_l1_submission_jobs() .expect("Should get pending l1 jobs"); + for job_id in job_ids { if let hash_map::Entry::Vacant(entry) = proofs.entry(job_id) { let stored_proof = self @@ -821,23 +842,48 @@ where } // submit all proofs to da - for (job_id, proof) in proofs { + for (proving_job_id, proof) in proofs { let prover_service = self.prover_service.clone(); - let _ledger_db = self.ledger_db.clone(); - info!("Submitting recovered proof for job {}", job_id); - // submit in the background - tokio::spawn(async move { - let _id = prover_service + let ledger_db = self.ledger_db.clone(); + info!("Submitting recovered proof for job {}", proving_job_id); + + // Recovery on-going in progress proof on DA + let rx = if let Some(da_job_id) = ledger_db + .get_proving_job_da_job_id(proving_job_id) + .expect("DB call shouldn't fail") + { + info!( + "DA job {} already exists for proving job {}", + da_job_id, proving_job_id + ); + prover_service + .wait_for_existing_da_job(da_job_id) + .await + .expect("Should recover da job receiver") + } else { + // No on going da job, submit a new one + let (da_job_id, rx) = prover_service .submit_proof(proof) .await - .expect("Failed to submit transaction"); - info!("Recovered Job {} proof sent to DA", job_id); - - // // stores tx id and removes job from pending da submission - // ledger_db - // .finalize_proving_job(job_id, tx_id.into()) - // .expect("Should update proving job tx id"); - // info!("Finalized recovered proving job: {}", job_id); + .expect("Failed to submit proof"); + + ledger_db + .set_proving_job_da_job_id(proving_job_id, da_job_id) + .expect("Failed to set da job_id"); + info!("Recovered Job {} proof sent to DA", proving_job_id); + rx + }; + + // submit in the background + tokio::spawn(async move { + // TODO handle failure + let txid = rx.await.unwrap().expect("Failed to submit transaction"); + + // stores tx id and removes job from pending da submission + ledger_db + .finalize_proving_job(proving_job_id, txid.into()) + .expect("Should update proving job tx id"); + info!("Finalized recovered proving job: {}", proving_job_id); }); } } diff --git a/crates/batch-prover/src/rpc.rs b/crates/batch-prover/src/rpc.rs index 3b67ee1848..5c2c669684 100644 --- a/crates/batch-prover/src/rpc.rs +++ b/crates/batch-prover/src/rpc.rs @@ -479,18 +479,16 @@ where let receipt = InnerReceipt::Fake(fake_receipt); let proof = bincode::serialize(&receipt).expect("Receipt serialization cannot fail"); - let job_id = self + let (_, rx) = self .context .da_service .send_transaction(DaTxRequest::ZKProof(proof.clone())) .await .map_err(internal_rpc_error)?; - let txid = self - .context - .da_service - .wait_for_completion(job_id, None) + let txid = rx .await + .map_err(internal_rpc_error)? .map_err(internal_rpc_error)?; Ok(BatchProofResponse { diff --git a/crates/bitcoin-da/src/helpers/builders/body_builders.rs b/crates/bitcoin-da/src/helpers/builders/body_builders.rs index 5a08e205f0..b93cf6875e 100644 --- a/crates/bitcoin-da/src/helpers/builders/body_builders.rs +++ b/crates/bitcoin-da/src/helpers/builders/body_builders.rs @@ -30,7 +30,7 @@ use crate::{REVEAL_OUTPUT_AMOUNT, REVEAL_OUTPUT_THRESHOLD}; #[derive(Debug, Clone, Serialize, Deserialize, borsh::BorshSerialize, borsh::BorshDeserialize)] /// These are real blobs we put on DA. -pub(crate) enum RawTxData { +pub enum RawTxData { /// borsh(DataOnDa::Complete(compress(Proof))) Complete(Vec), /// let compressed = compress(borsh(Proof)) diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 08e33dd138..aa8b37a72f 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -1,4 +1,5 @@ -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; +use std::sync::{Arc, Mutex}; use bitcoin::hashes::Hash; use bitcoin::{Transaction, Txid}; @@ -6,14 +7,17 @@ use serde::{Deserialize, Serialize}; use sov_db::ledger_db::DaLedgerOps; pub use sov_db::schema::types::da_jobs::{Job, JobId, JobStatus}; use sov_db::schema::types::da_jobs::{JobProgress as DbJobProgress, SentChunks as DbSentChunks}; +use tokio::sync::oneshot; use tracing::{info, instrument}; use super::Result; +use crate::error::BitcoinServiceError; use crate::helpers::builders::body_builders::RawTxData; use crate::helpers::get_timestamp; use crate::job::error::JobServiceError; use crate::job::metrics::DA_JOB_METRICS as JM; use crate::job::rpc::{DaJobRpcProvider, JobListFilter}; +use crate::service::TxidWrapper; /// Tracks progress of a job including sent transactions for recovery. /// @@ -164,17 +168,28 @@ impl From for DbJobProgress { /// Job service pub struct DaJobService { ledger_db: DB, + job_waiters: Arc< + Mutex< + HashMap>>, + >, + >, } impl DaJobService { /// Creates a new DaJobService with ledger_db pub fn new(ledger_db: DB) -> Self { - Self { ledger_db } + Self { + ledger_db, + job_waiters: Arc::new(Mutex::new(HashMap::new())), + } } /// Create a new job and save to db - #[instrument(level = "trace", skip(self), ret)] - pub fn submit_job(&self, raw_tx_data: RawTxData) -> Result { + pub fn submit_job( + &self, + raw_tx_data: RawTxData, + tx: oneshot::Sender>, + ) -> Result { let job_id = uuid::Uuid::now_v7(); let created_at = get_timestamp(); @@ -188,6 +203,8 @@ impl DaJobService { JM.record_job_submitted(job.data.len()); + self.job_waiters.lock().unwrap().insert(job_id, tx); + info!("Job {job_id} submitted and persisted"); Ok(job_id) } @@ -237,6 +254,7 @@ impl DaJobService { progress: &mut JobProgress, new_status: JobStatus, ) -> Result<()> { + let job_id = progress.job_id; let previous_status = progress.status.clone(); progress.status = new_status; @@ -248,6 +266,8 @@ impl DaJobService { JM.record_status_update(&previous_status, progress); + self.notify_new_status(job_id, progress); + Ok(()) } @@ -281,6 +301,36 @@ impl DaJobService { Ok(!in_progress_jobs.is_empty()) } + + fn notify_new_status(&self, job_id: JobId, progress: &JobProgress) { + let result = match &progress.status { + JobStatus::Completed => { + if let Some(last_tx) = progress.sent_chunks.reveal_txs.last() { + Ok(TxidWrapper(last_tx.compute_txid())) + } else { + Err(JobServiceError::NoTransactionsFound(job_id).into()) + } + } + JobStatus::Cancelled => Err(JobServiceError::JobCancelled(job_id).into()), + JobStatus::Failed { error } => { + Err(JobServiceError::JobFailed(job_id, error.clone()).into()) + } + JobStatus::Pending | JobStatus::InProgress => return, + }; + + if let Some(tx) = self.job_waiters.lock().unwrap().remove(&job_id) { + println!("removing tx send"); + let _ = tx.send(result); + } + } + + pub(crate) fn insert_waiter( + &self, + job_id: JobId, + waiter: oneshot::Sender>, + ) { + self.job_waiters.lock().unwrap().insert(job_id, waiter); + } } /// Implementation of RPC provider methods @@ -320,9 +370,11 @@ impl DaJobRpcProvider for DaJobService { let raw_data: RawTxData = borsh::from_slice(&original_job.data)?; + let (tx, _rx) = oneshot::channel(); // Create new job with same data - let new_job_id = self.submit_job(raw_data)?; + let new_job_id = self.submit_job(raw_data, tx)?; tracing::info!("Job {job_id} retried as new job {new_job_id}"); + Ok(new_job_id) } JobStatus::Pending | JobStatus::InProgress | JobStatus::Completed => { diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 2158d21651..dc4189f260 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -10,7 +10,6 @@ use std::collections::{HashMap, HashSet}; use std::num::NonZeroUsize; use std::path::PathBuf; use std::sync::Arc; -use std::time::Instant; use anyhow::anyhow; use async_trait::async_trait; @@ -30,15 +29,16 @@ use lru::LruCache; use reth_tasks::shutdown::GracefulShutdown; use serde::{Deserialize, Serialize}; use sov_db::ledger_db::LedgerDB; -use sov_db::schema::types::da_jobs::{JobId, JobStatus}; +use sov_db::schema::types::da_jobs::JobStatus; use sov_rollup_interface::da::{DaSpec, DaTxRequest, DataOnDa, SequencerCommitment}; use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::zk::Proof; use sov_rollup_interface::Network; use tokio::select; use tokio::sync::mpsc::UnboundedReceiver; -use tokio::sync::Mutex; +use tokio::sync::{oneshot, Mutex}; use tracing::{debug, error, info, instrument, trace, warn}; +use uuid::Uuid; use crate::error::{BitcoinServiceError, MempoolRejection}; use crate::fee::{validate_txs_fee_rate, BumpFeeMethod, FeeService}; @@ -1339,7 +1339,11 @@ impl DaService for BitcoinService { } /// Submit a new job to the queue - async fn send_transaction(&self, tx_request: DaTxRequest) -> Result { + async fn send_transaction( + &self, + tx_request: DaTxRequest, + ) -> Result<(Uuid, oneshot::Receiver>)> { + let (tx, rx) = oneshot::channel(); let job_id = { let job_service = self.job_service.lock().await; @@ -1350,53 +1354,52 @@ impl DaService for BitcoinService { return Err(BitcoinServiceError::PreviousJobInProgress); } } - job_service.submit_job(tx_request.try_into()?)? + job_service.submit_job(tx_request.try_into()?, tx)? }; + // TODO maybe single job handling here self.process_job_service().await?; - Ok(job_id) + Ok((job_id, rx)) } - /// Wait for job completion by job_id and returns the txid - async fn wait_for_completion( + async fn recover_existing_job( &self, - job_id: JobId, - timeout: Option, - ) -> Result { - let start = Instant::now(); - let timeout = timeout.unwrap_or(Duration::from_secs(600)); // Defaults to 10min - - loop { - if start.elapsed() > timeout { - return Err(JobServiceError::JobTimeout(job_id, timeout.as_secs()).into()); - } - - let progress = self - .job_service - .lock() - .await - .get_progress(&job_id)? - .ok_or(JobServiceError::JobNotFound(job_id))?; - - match progress.status { - JobStatus::Completed => { - if let Some(last_reveal) = progress.sent_chunks.reveal_txs.last() { - return Ok(TxidWrapper(last_reveal.compute_txid())); - } - return Err(JobServiceError::NoTransactionsFound(job_id).into()); - } - JobStatus::Failed { error, .. } => { - return Err(JobServiceError::JobFailed(job_id, error).into()); - } - JobStatus::Cancelled => { - return Err(JobServiceError::JobCancelled(job_id).into()); - } - _ => { - tokio::time::sleep(Duration::from_millis(500)).await; + job_id: Uuid, + ) -> Result>> { + let progress = self + .job_service + .lock() + .await + .get_progress(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + + let (tx, rx) = oneshot::channel(); + + match progress.status { + JobStatus::Completed => { + // Job already finished before we subscribed + if let Some(last_tx) = progress.sent_chunks.reveal_txs.last() { + let _ = tx.send(Ok(TxidWrapper(last_tx.compute_txid()))); + } else { + let _ = tx.send(Err(JobServiceError::NoTransactionsFound(job_id).into())); } } + JobStatus::Failed { error } => { + // Job already failed + let _ = tx.send(Err(JobServiceError::JobFailed(job_id, error).into())); + } + JobStatus::Cancelled => { + // Job already cancelled + let _ = tx.send(Err(JobServiceError::JobCancelled(job_id).into())); + } + JobStatus::Pending | JobStatus::InProgress => { + // Job still running, register for notification + self.job_service.lock().await.insert_waiter(job_id, tx); + } } + + Ok(rx) } #[instrument(level = "trace", skip(self))] diff --git a/crates/bitcoin-da/src/test_utils.rs b/crates/bitcoin-da/src/test_utils.rs index e71c314eb0..7bf3348e59 100644 --- a/crates/bitcoin-da/src/test_utils.rs +++ b/crates/bitcoin-da/src/test_utils.rs @@ -2,6 +2,8 @@ use bitcoin::hashes::Hash; use sov_rollup_interface::da::{DaTxRequest, DataOnDa}; +use sov_rollup_interface::services::da::DaService; +use uuid::Uuid; use crate::error::BitcoinServiceError; use crate::helpers::builders::body_builders::{DaTxs, RawTxData}; @@ -11,6 +13,14 @@ use crate::helpers::builders::test_utils::{ use crate::service::{split_proof, BitcoinService, Result}; impl BitcoinService { + /// Send a transaction to da and wait until its completion + pub async fn send_transaction_and_wait(&self, tx_request: DaTxRequest) -> Result { + let (job_id, rx) = self.send_transaction(tx_request).await?; + println!("job_id : {:?}", job_id); + rx.await??; + Ok(job_id) + } + /// Sends chunks and aggregate as if they are of a Complete kind. pub async fn test_send_separate_chunk_transaction_with_fee_rate( &self, diff --git a/crates/prover-services/src/parallel.rs b/crates/prover-services/src/parallel.rs index 18b3fafbfc..fad7efcf69 100644 --- a/crates/prover-services/src/parallel.rs +++ b/crates/prover-services/src/parallel.rs @@ -207,17 +207,14 @@ where pub async fn submit_proof( &self, proof: Proof, - ) -> anyhow::Result<::TransactionId> { + ) -> anyhow::Result<( + Uuid, + oneshot::Receiver::TransactionId, ::Error>>, + )> { let tx_request = DaTxRequest::ZKProof(proof); info!("Submitting proof to DA service"); - let job_id = self - .da_service - .send_transaction(tx_request) - .await - .map_err(|e| anyhow::anyhow!(e))?; - self.da_service - .wait_for_completion(job_id, None) + .send_transaction(tx_request) .await .map_err(|e| anyhow::anyhow!(e)) } @@ -237,6 +234,17 @@ where let vm = self.vm.clone(); vm.start_session_recovery() } + + /// Used for recovery + pub async fn wait_for_existing_da_job( + &self, + da_job_id: Uuid, + ) -> Result< + oneshot::Receiver::TransactionId, ::Error>>, + ::Error, + > { + self.da_service.recover_existing_job(da_job_id).await + } } /// Runs the zkVM proving session. Decides on whether to produce a real proof or a fake proof based on the proof mode. diff --git a/crates/sequencer/src/commitment/service.rs b/crates/sequencer/src/commitment/service.rs index 0fc6b301e0..59690e3c3e 100644 --- a/crates/sequencer/src/commitment/service.rs +++ b/crates/sequencer/src/commitment/service.rs @@ -221,7 +221,7 @@ where let tx_request = DaTxRequest::SequencerCommitment(commitment.clone()); - let job_id = self + let (_, rx) = self .da_service .send_transaction(tx_request) .await @@ -235,11 +235,10 @@ where let start = Instant::now(); let ledger_db = self.ledger_db.clone(); - let _txid = self - .da_service - .wait_for_completion(job_id, None) + let _txid = rx .await - .map_err(|e| anyhow!(e))?; + .map_err(|_| anyhow!("DA notification channel closed"))? // Handle RecvError + .map_err(|e| anyhow!("DA job failed: {e}"))?; SM.send_commitment_execution.record( Instant::now() diff --git a/crates/sovereign-sdk/adapters/mock-da/src/service.rs b/crates/sovereign-sdk/adapters/mock-da/src/service.rs index 6a249c2ee4..4ffd9c59d2 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/service.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/service.rs @@ -10,7 +10,7 @@ use sov_rollup_interface::da::{ }; use sov_rollup_interface::services::da::{DaService, SlotData}; use sov_rollup_interface::zk::Proof; -use tokio::sync::{broadcast, Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; +use tokio::sync::{broadcast, oneshot, Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; use tokio::time; use tracing::instrument::Instrument; use uuid::Uuid; @@ -427,7 +427,16 @@ impl DaService for MockDaService { } #[tracing::instrument(name = "MockDA", level = "debug", skip_all)] - async fn send_transaction(&self, tx_request: DaTxRequest) -> Result { + async fn send_transaction( + &self, + tx_request: DaTxRequest, + ) -> Result< + ( + Uuid, + oneshot::Receiver>, + ), + Self::Error, + > { let blob = match tx_request { DaTxRequest::ZKProof(proof) => { tracing::debug!("Adding a zkproof"); @@ -447,15 +456,10 @@ impl DaService for MockDaService { }; let blocks = self.blocks.lock().await; let _ = self.add_blob(&blocks, blob, Default::default())?; - Ok(Uuid::default()) - } + let (tx, rx) = oneshot::channel(); - async fn wait_for_completion( - &self, - _job_id: Uuid, - _timeout: Option, - ) -> Result { - Ok(MockHash([0; 32])) + let _ = tx.send(Ok(MockHash([0; 32]))); + Ok((Uuid::default(), rx)) } async fn get_fee_rate(&self) -> Result { @@ -491,6 +495,13 @@ impl DaService for MockDaService { height: block.header.height, } } + + async fn recover_existing_job( + &self, + _job_id: Uuid, + ) -> Result>, Self::Error> { + unimplemented!() + } } fn hash_to_array(bytes: &[u8]) -> [u8; 32] { diff --git a/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs b/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs index 20cd3215ea..c6ab4e1453 100644 --- a/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs +++ b/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs @@ -1,12 +1,8 @@ //! The da module defines traits used by the full node to interact with the DA layer. - -#[cfg(feature = "native")] -use std::time::Duration; - use serde::de::DeserializeOwned; use serde::Serialize; #[cfg(feature = "native")] -use tokio::sync::oneshot::Sender as OneshotSender; +use tokio::sync::oneshot::{self, Sender as OneshotSender}; #[cfg(feature = "native")] use uuid::Uuid; @@ -107,14 +103,23 @@ pub trait DaService: Send + Sync + 'static { /// Send a transaction directly to the DA layer. /// blob is the serialized and signed transaction. /// Returns nothing if the transaction was successfully sent. - async fn send_transaction(&self, tx_request: DaTxRequest) -> Result; - - /// Wait for a job to finish - async fn wait_for_completion( + async fn send_transaction( + &self, + tx_request: DaTxRequest, + ) -> Result< + ( + Uuid, + oneshot::Receiver>, + ), + Self::Error, + >; + + /// Recover an ongoing da job sending session + /// Returns the receiver if available + async fn recover_existing_job( &self, job_id: Uuid, - timeout: Option, - ) -> Result; + ) -> Result>, Self::Error>; /// Returns fee rate per byte on DA layer. async fn get_fee_rate(&self) -> Result; From 72e7d126ed69ddd4446035f45aa77e45eb7a7781 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Thu, 16 Oct 2025 22:44:17 +0100 Subject: [PATCH 41/81] Remove prints --- bin/citrea/tests/bitcoin/da_queue.rs | 10 ---------- bin/citrea/tests/mock/proving.rs | 2 -- crates/batch-prover/src/prover.rs | 8 +------- crates/bitcoin-da/src/job/service.rs | 1 - crates/bitcoin-da/src/test_utils.rs | 1 - 5 files changed, 1 insertion(+), 21 deletions(-) diff --git a/bin/citrea/tests/bitcoin/da_queue.rs b/bin/citrea/tests/bitcoin/da_queue.rs index 9694d53a76..21c48d80c7 100644 --- a/bin/citrea/tests/bitcoin/da_queue.rs +++ b/bin/citrea/tests/bitcoin/da_queue.rs @@ -423,7 +423,6 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { // Fill mempool for i in 1..=3 { - println!("i : {:?}", i); da_service .send_transaction_and_wait(DaTxRequest::ZKProof( verifiable_100kb_batch_proof.clone(), @@ -433,19 +432,15 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { da.wait_mempool_len(8 * i, None).await?; } - println!("11"); - let (job_id, rx) = da_service .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await?; - println!("22"); // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit // The three first proofs should hit the mempool + 1 chunk da.wait_mempool_len(8 * 3 + 2, None).await?; assert_eq!(da.get_raw_mempool().await?.len(), 26); - println!("33"); // Assert that all sent txs are monitored let monitored_txs = da_service.monitoring.get_monitored_txs().await; assert_eq!(monitored_txs.len(), 26); @@ -456,7 +451,6 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await; - println!("44"); assert!(res.is_ok()); // Txs starting from a new chain should be accepted to mempool @@ -465,7 +459,6 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { let monitored_txs = da_service.monitoring.get_monitored_txs().await; assert_eq!(monitored_txs.len(), 34); - println!("55"); // We mine the first three proofs + the 1 chunk pair + the extra proof starting another UTXO chain // and make sure that the remaining chunks and aggregate and sent on next block when mempool size is freed // Assert that all chunks were mined and mempool space is freed @@ -479,7 +472,6 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { assert_eq!(relevant_txs.len(), 17); - println!("66"); // Remaining chunks and aggregate da.wait_mempool_len(6, None).await?; assert_eq!(da.get_raw_mempool().await?.len(), 6); @@ -724,7 +716,6 @@ impl TestCase for DaTransactionQueueingUtxoSelectionModeOldestTest { .header .state_root; - println!("1"); self.test_package_mempool_limits( da, &da_service, @@ -736,7 +727,6 @@ impl TestCase for DaTransactionQueueingUtxoSelectionModeOldestTest { ) .await?; - println!("2"); self.test_package_too_large( da, &da_service, diff --git a/bin/citrea/tests/mock/proving.rs b/bin/citrea/tests/mock/proving.rs index b5a2e89645..fa0c3c5a06 100644 --- a/bin/citrea/tests/mock/proving.rs +++ b/bin/citrea/tests/mock/proving.rs @@ -356,7 +356,6 @@ async fn test_batch_prover_prove_rpcs() { assert_eq!(job_ids.len(), 1); let job_id = job_ids[0]; - println!("111"); let response = wait_for_prover_job(&prover_client, job_id, None) .await .unwrap(); @@ -378,7 +377,6 @@ async fn test_batch_prover_prove_rpcs() { wait_for_l2_block(&test_client, 8, None).await; wait_for_commitment(&da_service, 6, None).await; - println!("3"); // invoke proving from RPC, since paused, should not start any job let job_ids = prover_client.batch_prover_prove(None).await; assert_eq!(job_ids.len(), 0); diff --git a/crates/batch-prover/src/prover.rs b/crates/batch-prover/src/prover.rs index 16d823fe9b..dad755a563 100644 --- a/crates/batch-prover/src/prover.rs +++ b/crates/batch-prover/src/prover.rs @@ -164,12 +164,9 @@ where /// * `shutdown_signal` - A signal to gracefully shut down the prover service #[instrument(name = "BatchProver", skip_all)] pub async fn run(mut self, mut shutdown_signal: GracefulShutdown) { - println!("recovering session"); self.recover_proving_sessions(self.prover_config.enable_recovery) .await; - println!("recovered proving session"); - 'run_loop: loop { select! { biased; @@ -747,10 +744,7 @@ where .await .expect("Failed to submit proof"); - println!( - "Job {} proof submitted to DA. Da job id {da_job_id}", - proving_job_id - ); + info!("Job {provig_job_id} proof submitted to DA. Da job id {da_job_id}"); ledger_db .set_proving_job_da_job_id(proving_job_id, da_job_id) diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index aa8b37a72f..90de39c634 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -319,7 +319,6 @@ impl DaJobService { }; if let Some(tx) = self.job_waiters.lock().unwrap().remove(&job_id) { - println!("removing tx send"); let _ = tx.send(result); } } diff --git a/crates/bitcoin-da/src/test_utils.rs b/crates/bitcoin-da/src/test_utils.rs index 7bf3348e59..fd22981703 100644 --- a/crates/bitcoin-da/src/test_utils.rs +++ b/crates/bitcoin-da/src/test_utils.rs @@ -16,7 +16,6 @@ impl BitcoinService { /// Send a transaction to da and wait until its completion pub async fn send_transaction_and_wait(&self, tx_request: DaTxRequest) -> Result { let (job_id, rx) = self.send_transaction(tx_request).await?; - println!("job_id : {:?}", job_id); rx.await??; Ok(job_id) } From c16aa692b53defca4c5a9cf0379fbe573e275f57 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Fri, 17 Oct 2025 09:59:50 +0100 Subject: [PATCH 42/81] Fix build --- crates/batch-prover/src/prover.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/batch-prover/src/prover.rs b/crates/batch-prover/src/prover.rs index dad755a563..4471ec279b 100644 --- a/crates/batch-prover/src/prover.rs +++ b/crates/batch-prover/src/prover.rs @@ -744,7 +744,7 @@ where .await .expect("Failed to submit proof"); - info!("Job {provig_job_id} proof submitted to DA. Da job id {da_job_id}"); + info!("Job {proving_job_id} proof submitted to DA. Da job id {da_job_id}"); ledger_db .set_proving_job_da_job_id(proving_job_id, da_job_id) From 8d176d232a5b70cb3f77c655ea864143f8e8ecd5 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Fri, 17 Oct 2025 10:30:08 +0100 Subject: [PATCH 43/81] Lint --- bin/citrea/tests/bitcoin/da_queue.rs | 2 +- crates/bitcoin-da/src/job/service.rs | 8 +++----- crates/prover-services/src/parallel.rs | 16 +++++----------- 3 files changed, 9 insertions(+), 17 deletions(-) diff --git a/bin/citrea/tests/bitcoin/da_queue.rs b/bin/citrea/tests/bitcoin/da_queue.rs index 21c48d80c7..552c92c927 100644 --- a/bin/citrea/tests/bitcoin/da_queue.rs +++ b/bin/citrea/tests/bitcoin/da_queue.rs @@ -432,7 +432,7 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { da.wait_mempool_len(8 * i, None).await?; } - let (job_id, rx) = da_service + da_service .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await?; diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 90de39c634..d2c219f477 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -19,6 +19,8 @@ use crate::job::metrics::DA_JOB_METRICS as JM; use crate::job::rpc::{DaJobRpcProvider, JobListFilter}; use crate::service::TxidWrapper; +type JobWaiters = + HashMap>>; /// Tracks progress of a job including sent transactions for recovery. /// /// This state is persisted to the database and updated as transactions @@ -168,11 +170,7 @@ impl From for DbJobProgress { /// Job service pub struct DaJobService { ledger_db: DB, - job_waiters: Arc< - Mutex< - HashMap>>, - >, - >, + job_waiters: Arc>, } impl DaJobService { diff --git a/crates/prover-services/src/parallel.rs b/crates/prover-services/src/parallel.rs index fad7efcf69..975ab39680 100644 --- a/crates/prover-services/src/parallel.rs +++ b/crates/prover-services/src/parallel.rs @@ -13,6 +13,9 @@ use uuid::Uuid; use crate::metrics::PARALLEL_PROVER_METRICS; use crate::{ProofData, ProofGenMode, ProofWithDuration}; +type DaJobWaiter = + oneshot::Receiver::TransactionId, ::Error>>; + /// Prover service capable of invoking the zkVM proving sessions in parallel. pub struct ParallelProverService where @@ -204,13 +207,7 @@ where /// Submits the zk proof to the DA service, returning transaction id. #[instrument(name = "ParallelProverService", skip_all)] - pub async fn submit_proof( - &self, - proof: Proof, - ) -> anyhow::Result<( - Uuid, - oneshot::Receiver::TransactionId, ::Error>>, - )> { + pub async fn submit_proof(&self, proof: Proof) -> anyhow::Result<(Uuid, DaJobWaiter)> { let tx_request = DaTxRequest::ZKProof(proof); info!("Submitting proof to DA service"); self.da_service @@ -239,10 +236,7 @@ where pub async fn wait_for_existing_da_job( &self, da_job_id: Uuid, - ) -> Result< - oneshot::Receiver::TransactionId, ::Error>>, - ::Error, - > { + ) -> Result, ::Error> { self.da_service.recover_existing_job(da_job_id).await } } From 00af97a10565a017095bb7ec9f3af0f973abc2c2 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Fri, 17 Oct 2025 10:35:46 +0100 Subject: [PATCH 44/81] Restore test --- bin/citrea/tests/bitcoin/da_job.rs | 145 ++++++++++++++--------------- 1 file changed, 69 insertions(+), 76 deletions(-) diff --git a/bin/citrea/tests/bitcoin/da_job.rs b/bin/citrea/tests/bitcoin/da_job.rs index d78407ccc5..a9b7d5ac26 100644 --- a/bin/citrea/tests/bitcoin/da_job.rs +++ b/bin/citrea/tests/bitcoin/da_job.rs @@ -4,6 +4,7 @@ use std::time::Duration; use alloy_primitives::{U32, U64}; use async_trait::async_trait; use bitcoin::hashes::Hash; +use bitcoin_da::job::rpc::RetryJobResponse; use bitcoin_da::job::rpc::{DaJobRpcClient, JobInfoResponse, JobStatusFilter}; use bitcoin_da::job::service::JobStatus; use bitcoin_da::service::BitcoinService; @@ -154,40 +155,36 @@ impl JobServiceTest { let res = rx.await.unwrap(); assert!(res.is_err()); - // // TODO find a way to deterministically wait for retry + let retry_job_response: RetryJobResponse = da_service_client.da_job_retry(job_id).await?; - // let retry_job_response: RetryJobResponse = da_service_client.da_job_retry(job_id).await?; + let old_job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(old_job_by_id.status, JobStatus::Cancelled); - // let old_job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; - // assert_eq!(old_job_by_id.status, JobStatus::Cancelled); - - // let new_job_by_id: JobInfoResponse = da_service_client - // .da_job_get_info(retry_job_response.new_job_id) - // .await?; - // assert_eq!(new_job_by_id.status, JobStatus::Pending); - // da.generate(1).await?; + let new_job_by_id: JobInfoResponse = da_service_client + .da_job_get_info(retry_job_response.new_job_id) + .await?; + assert_eq!(new_job_by_id.status, JobStatus::Pending); + da.generate(1).await?; - // // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit - // // The three first proofs should hit the mempool + 1 chunk - // da.wait_mempool_len(18, None).await?; + // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit + // The three first proofs should hit the mempool + 1 chunk + da.wait_mempool_len(18, None).await?; - // assert_eq!(da.get_raw_mempool().await?.len(), 18); + assert_eq!(da.get_raw_mempool().await?.len(), 18); - // let new_job_by_id: JobInfoResponse = da_service_client - // .da_job_get_info(retry_job_response.new_job_id) - // .await?; - // assert_eq!(new_job_by_id.status, JobStatus::InProgress); - // da.generate(1).await?; + let new_job_by_id: JobInfoResponse = da_service_client + .da_job_get_info(retry_job_response.new_job_id) + .await?; + assert_eq!(new_job_by_id.status, JobStatus::InProgress); + da.generate(1).await?; - // let res = da_service - // .wait_for_completion(retry_job_response.new_job_id, None) - // .await; - // assert!(res.is_ok()); + // TODO find a way to deterministically wait for retry completion + tokio::time::sleep(Duration::from_secs(3)).await; - // let new_job_by_id: JobInfoResponse = da_service_client - // .da_job_get_info(retry_job_response.new_job_id) - // .await?; - // assert_eq!(new_job_by_id.status, JobStatus::Completed); + let new_job_by_id: JobInfoResponse = da_service_client + .da_job_get_info(retry_job_response.new_job_id) + .await?; + assert_eq!(new_job_by_id.status, JobStatus::Completed); Ok(()) } @@ -285,7 +282,7 @@ impl JobServiceTest { let completed_jobs = da_service_client .da_job_list(Some(JobStatusFilter::Completed), None, None) .await?; - assert_eq!(completed_jobs.len(), 2); + assert_eq!(completed_jobs.len(), 3); Ok(()) } @@ -334,54 +331,50 @@ impl JobServiceTest { assert_eq!(active_jobs_before.len(), 1); assert_eq!(active_jobs_before[0].job_id, job_id); - // TODO handle proper recovery - - // // Send graceful shutdown to da_service and drop da_service - // drop(da_service); - // drop(da_service_client); - // self.task_manager.take().unwrap().graceful_shutdown(); - // sleep(Duration::from_secs(5)).await; - - // // Create a new task_manager as previous was consumed - // self.task_manager = Some(TaskManager::current()); - // let task_executor = self.task_manager.as_ref().unwrap().executor(); - - // let (da_service, da_service_client) = spawn_bitcoin_da_prover_service_with_rpc_server( - // &task_executor, - // &da.config, - // Self::test_config().dir, - // ) - // .await; - - // let job_after: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; - - // assert_eq!(job_after.job_id, job_before.job_id); - // assert_eq!(job_after.status, job_before.status); - // assert_eq!(job_after.created_at, job_before.created_at); - // assert_eq!(job_after.sent_count, job_before.sent_count); - - // let active_jobs_after = da_service_client - // .da_job_list(Some(JobStatusFilter::Active), None, None) - // .await?; - // assert_eq!(active_jobs_after.len(), 1); - // assert_eq!(active_jobs_after[0].job_id, job_id); - // assert_eq!(active_jobs_after[0].status, JobStatus::InProgress); - - // da.generate(1).await?; - - // da.wait_mempool_len(6, None).await?; - // let res = da_service.wait_for_completion(job_id, None).await; - // assert!(res.is_ok()); - - // let completed_job: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; - // assert_eq!(completed_job.status, JobStatus::Completed); - // assert_eq!(completed_job.created_at, job_before.created_at); - // assert_eq!(completed_job.error, None); - - // let active_jobs_final = da_service_client - // .da_job_list(Some(JobStatusFilter::Active), None, None) - // .await?; - // assert_eq!(active_jobs_final.len(), 0); + // Send graceful shutdown to da_service and drop da_service + drop(da_service); + drop(da_service_client); + self.task_manager.take().unwrap().graceful_shutdown(); + tokio::time::sleep(Duration::from_secs(5)).await; + + // Create a new task_manager as previous was consumed + self.task_manager = Some(TaskManager::current()); + let task_executor = self.task_manager.as_ref().unwrap().executor(); + + let (_, da_service_client) = spawn_bitcoin_da_prover_service_with_rpc_server( + &task_executor, + &da.config, + Self::test_config().dir, + ) + .await; + + let job_after: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + + assert_eq!(job_after.job_id, job_before.job_id); + assert_eq!(job_after.status, job_before.status); + assert_eq!(job_after.created_at, job_before.created_at); + assert_eq!(job_after.sent_count, job_before.sent_count); + + let active_jobs_after = da_service_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_after.len(), 1); + assert_eq!(active_jobs_after[0].job_id, job_id); + assert_eq!(active_jobs_after[0].status, JobStatus::InProgress); + + da.generate(1).await?; + + da.wait_mempool_len(6, None).await?; + + let completed_job: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(completed_job.status, JobStatus::Completed); + assert_eq!(completed_job.created_at, job_before.created_at); + assert_eq!(completed_job.error, None); + + let active_jobs_final = da_service_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_final.len(), 0); Ok(()) } From 54fa3e8e2c5704bfb0ca2e39bd0ecf965b6362fa Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Fri, 17 Oct 2025 10:36:38 +0100 Subject: [PATCH 45/81] Lint --- bin/citrea/tests/bitcoin/da_job.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/citrea/tests/bitcoin/da_job.rs b/bin/citrea/tests/bitcoin/da_job.rs index a9b7d5ac26..ffe21f0c52 100644 --- a/bin/citrea/tests/bitcoin/da_job.rs +++ b/bin/citrea/tests/bitcoin/da_job.rs @@ -4,8 +4,7 @@ use std::time::Duration; use alloy_primitives::{U32, U64}; use async_trait::async_trait; use bitcoin::hashes::Hash; -use bitcoin_da::job::rpc::RetryJobResponse; -use bitcoin_da::job::rpc::{DaJobRpcClient, JobInfoResponse, JobStatusFilter}; +use bitcoin_da::job::rpc::{DaJobRpcClient, JobInfoResponse, JobStatusFilter, RetryJobResponse}; use bitcoin_da::job::service::JobStatus; use bitcoin_da::service::BitcoinService; use bitcoincore_rpc::RpcApi; From ff7b77936f24680ff042d519d1bec294934b2d76 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Fri, 17 Oct 2025 13:23:40 +0100 Subject: [PATCH 46/81] Remove RPC todo --- crates/bitcoin-da/src/job/mod.rs | 1 - crates/bitcoin-da/src/job/rpc.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/bitcoin-da/src/job/mod.rs b/crates/bitcoin-da/src/job/mod.rs index 51d4e187ae..faf9fb6f1d 100644 --- a/crates/bitcoin-da/src/job/mod.rs +++ b/crates/bitcoin-da/src/job/mod.rs @@ -8,7 +8,6 @@ use crate::job::error::JobServiceError; /// Job related error types pub mod error; -/// TODO: RPC API pub mod rpc; /// Core job queue implementation and state management pub mod service; diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs index ec7565adeb..678826a509 100644 --- a/crates/bitcoin-da/src/job/rpc.rs +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -1,5 +1,5 @@ //! Provides the RPC interface for the bitcoin-da job da. -//! The namespace for these RPC methods is "da" (Data Availability). +//! The namespace for these RPC methods is `daJob` //! This module defines methods to interact with bitcoin-da jobs, //! including cancelling, retrying and listing jobs. From 6b14d8ec6f650187b77ab20bba033fe8febfd7f9 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Sun, 19 Oct 2025 22:44:11 +0100 Subject: [PATCH 47/81] Store only txid in SentChunks --- bin/citrea/tests/bitcoin/da_job.rs | 22 +- crates/bitcoin-da/src/fee.rs | 174 ++++++++------- .../src/helpers/builders/body_builders.rs | 8 +- .../bitcoin-da/src/helpers/builders/tests.rs | 4 +- crates/bitcoin-da/src/job/error.rs | 6 +- crates/bitcoin-da/src/job/metrics.rs | 25 ++- crates/bitcoin-da/src/job/rpc.rs | 36 ++-- crates/bitcoin-da/src/job/service.rs | 199 +++--------------- crates/bitcoin-da/src/service.rs | 72 ++++--- .../db/sov-db/src/schema/types/da_jobs.rs | 51 +++-- 10 files changed, 265 insertions(+), 332 deletions(-) diff --git a/bin/citrea/tests/bitcoin/da_job.rs b/bin/citrea/tests/bitcoin/da_job.rs index ffe21f0c52..ca273b3867 100644 --- a/bin/citrea/tests/bitcoin/da_job.rs +++ b/bin/citrea/tests/bitcoin/da_job.rs @@ -5,7 +5,6 @@ use alloy_primitives::{U32, U64}; use async_trait::async_trait; use bitcoin::hashes::Hash; use bitcoin_da::job::rpc::{DaJobRpcClient, JobInfoResponse, JobStatusFilter, RetryJobResponse}; -use bitcoin_da::job::service::JobStatus; use bitcoin_da::service::BitcoinService; use bitcoincore_rpc::RpcApi; use citrea_e2e::bitcoin::{BitcoinNode, DEFAULT_FINALITY_DEPTH}; @@ -16,6 +15,7 @@ use citrea_e2e::Result; use citrea_light_client_prover::rpc::LightClientProverRpcClient; use jsonrpsee::http_client::HttpClient; use reth_tasks::TaskManager; +use sov_db::schema::types::da_jobs::DaJobStatus; use sov_ledger_rpc::LedgerRpcClient; use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; use sov_rollup_interface::services::da::DaService; @@ -89,7 +89,7 @@ impl JobServiceTest { let job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; - assert_eq!(job_by_id.status, JobStatus::Completed); + assert_eq!(job_by_id.status, DaJobStatus::Completed); assert_eq!(job_by_id.sent_count, 1); assert_eq!(job_by_id.error, None); @@ -137,7 +137,7 @@ impl JobServiceTest { assert_eq!(da.get_raw_mempool().await?.len(), 18); let job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; - assert_eq!(job_by_id.status, JobStatus::InProgress); + assert_eq!(job_by_id.status, DaJobStatus::InProgress); assert_eq!(job_by_id.sent_count, 9); // 9 commit/reveal pair // Cancel job @@ -145,7 +145,7 @@ impl JobServiceTest { assert!(cancel_job_response.success); let job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; - assert_eq!(job_by_id.status, JobStatus::Cancelled); + assert_eq!(job_by_id.status, DaJobStatus::Cancelled); // Mine sent txs da.generate(1).await?; @@ -157,12 +157,12 @@ impl JobServiceTest { let retry_job_response: RetryJobResponse = da_service_client.da_job_retry(job_id).await?; let old_job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; - assert_eq!(old_job_by_id.status, JobStatus::Cancelled); + assert_eq!(old_job_by_id.status, DaJobStatus::Cancelled); let new_job_by_id: JobInfoResponse = da_service_client .da_job_get_info(retry_job_response.new_job_id) .await?; - assert_eq!(new_job_by_id.status, JobStatus::Pending); + assert_eq!(new_job_by_id.status, DaJobStatus::Pending); da.generate(1).await?; // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit @@ -174,7 +174,7 @@ impl JobServiceTest { let new_job_by_id: JobInfoResponse = da_service_client .da_job_get_info(retry_job_response.new_job_id) .await?; - assert_eq!(new_job_by_id.status, JobStatus::InProgress); + assert_eq!(new_job_by_id.status, DaJobStatus::InProgress); da.generate(1).await?; // TODO find a way to deterministically wait for retry completion @@ -183,7 +183,7 @@ impl JobServiceTest { let new_job_by_id: JobInfoResponse = da_service_client .da_job_get_info(retry_job_response.new_job_id) .await?; - assert_eq!(new_job_by_id.status, JobStatus::Completed); + assert_eq!(new_job_by_id.status, DaJobStatus::Completed); Ok(()) } @@ -321,7 +321,7 @@ impl JobServiceTest { let job_before: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; assert_eq!(job_before.job_id, job_id); - assert_eq!(job_before.status, JobStatus::InProgress); + assert_eq!(job_before.status, DaJobStatus::InProgress); assert_eq!(job_before.sent_count, 9); let active_jobs_before = da_service_client @@ -359,14 +359,14 @@ impl JobServiceTest { .await?; assert_eq!(active_jobs_after.len(), 1); assert_eq!(active_jobs_after[0].job_id, job_id); - assert_eq!(active_jobs_after[0].status, JobStatus::InProgress); + assert_eq!(active_jobs_after[0].status, DaJobStatus::InProgress); da.generate(1).await?; da.wait_mempool_len(6, None).await?; let completed_job: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; - assert_eq!(completed_job.status, JobStatus::Completed); + assert_eq!(completed_job.status, DaJobStatus::Completed); assert_eq!(completed_job.created_at, job_before.created_at); assert_eq!(completed_job.error, None); diff --git a/crates/bitcoin-da/src/fee.rs b/crates/bitcoin-da/src/fee.rs index 272ec9ebcf..973e7861b9 100644 --- a/crates/bitcoin-da/src/fee.rs +++ b/crates/bitcoin-da/src/fee.rs @@ -5,16 +5,18 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; -use bitcoin::{Amount, Network, Sequence, Transaction, Txid}; +use bitcoin::hashes::Hash; +use bitcoin::{Amount, Network, Sequence, Txid}; use bitcoincore_rpc::json::{ BumpFeeResult, CreateRawTransactionInput, EstimateMode, WalletCreateFundedPsbtOptions, }; use bitcoincore_rpc::{Client, RpcApi}; +use sov_db::schema::types::da_jobs::SentChunks; use thiserror::Error; use tracing::{debug, instrument, trace, warn}; use crate::error::BitcoinServiceError; -use crate::job::service::SentChunks; +use crate::helpers::builders::TxWithId; use crate::monitoring::{MonitoredTx, MonitoredTxKind}; use crate::spec::utxo::UTXO; use crate::tx_signer::SignedTxPair; @@ -232,6 +234,102 @@ impl FeeService { pub fn get_next_fee_rate_multiplier(&self, multiplier: f64) -> f64 { (multiplier * FEE_RATE_MULTIPLIER_FACTOR).min(MAX_FEE_RATE_MULTIPLIER) } + + pub(crate) async fn validate_txs_fee_rate( + &self, + txs: &[SignedTxPair], + sent_chunks: &SentChunks, + fee_rate: u64, + utxos: Vec, + prev_utxo: Option, + ) -> std::result::Result<(), BitcoinServiceError> { + let mut utxo_map = utxos + .into_iter() + .map(|utxo| ((utxo.tx_id, utxo.vout), Amount::from_sat(utxo.amount))) + .collect::>(); + if let Some(prev_utxo) = prev_utxo { + utxo_map.insert( + (prev_utxo.tx_id, prev_utxo.vout), + Amount::from_sat(prev_utxo.amount), + ); + } + + // Recover sent chunks + let mut commit_txs: Vec = vec![]; + for tx in &sent_chunks.commit_txs { + let id = Txid::from_byte_array(*tx); + let tx = self + .client + .get_transaction(&id, None) + .await? + .transaction()?; + commit_txs.push(TxWithId { tx, id }); + } + let mut reveal_txs: Vec = vec![]; + for tx in &sent_chunks.reveal_txs { + let id = Txid::from_byte_array(*tx); + let tx = self + .client + .get_transaction(&id, None) + .await? + .transaction()?; + reveal_txs.push(TxWithId { tx, id }); + } + + // Add sent chunks as available inputs + let get_tx_outputs = |txs: &[TxWithId]| { + txs.iter() + .flat_map(|tx| { + let txid = tx.id; + tx.tx + .output + .iter() + .enumerate() + .map(move |(idx, out)| ((txid, idx as u32), out.value)) + }) + .collect::>() + }; + utxo_map.extend(get_tx_outputs(&commit_txs)); + utxo_map.extend(get_tx_outputs(&reveal_txs)); + + for tx in txs { + // Validate commit + let commit_tx = &tx.commit.tx; + let input_amount: Amount = commit_tx + .input + .iter() + .flat_map(|input| { + utxo_map + .get(&(input.previous_output.txid, input.previous_output.vout)) + .cloned() + }) + .sum(); + let output_amount = commit_tx.output.iter().map(|tx| tx.value).sum(); + + if (input_amount - output_amount) < Amount::from_sat(commit_tx.vsize() as u64) { + return Err(BitcoinServiceError::FeeCalculation(fee_rate)); + } + + // Add commit change output to utxo_map + if let Some(change_output) = commit_tx.output.get(1) { + utxo_map.insert((tx.commit_txid(), 1), change_output.value); + } + + // Validate reveal + let reveal_tx = &tx.reveal.tx; + let input_amount = commit_tx.output[0].value; + let output_amount = reveal_tx.output[0].value; + + // Add reveal utxo to utxo_map, used by chunking txs + utxo_map.insert((tx.reveal_txid(), 0), output_amount); + + if (input_amount - output_amount) < Amount::from_sat(reveal_tx.vsize() as u64) { + return Err(BitcoinServiceError::FeeCalculation(fee_rate)); + } + } + + Ok(()) + } } pub(crate) async fn get_fee_rate_from_mempool_space( @@ -265,78 +363,6 @@ pub(crate) async fn get_fee_rate_from_mempool_space( Ok(Some(fee_rate)) } -pub(crate) fn validate_txs_fee_rate( - txs: &[SignedTxPair], - sent_chunks: &SentChunks, - fee_rate: u64, - utxos: Vec, - prev_utxo: Option, -) -> std::result::Result<(), BitcoinServiceError> { - let mut utxo_map = utxos - .into_iter() - .map(|utxo| ((utxo.tx_id, utxo.vout), Amount::from_sat(utxo.amount))) - .collect::>(); - if let Some(prev_utxo) = prev_utxo { - utxo_map.insert( - (prev_utxo.tx_id, prev_utxo.vout), - Amount::from_sat(prev_utxo.amount), - ); - } - - // Add sent chunks as available inputs - let get_tx_outputs = |txs: &[Transaction]| { - txs.iter() - .flat_map(|tx| { - let txid = tx.compute_txid(); - tx.output - .iter() - .enumerate() - .map(move |(idx, out)| ((txid, idx as u32), out.value)) - }) - .collect::>() - }; - utxo_map.extend(get_tx_outputs(&sent_chunks.commit_txs)); - utxo_map.extend(get_tx_outputs(&sent_chunks.reveal_txs)); - - for tx in txs { - // Validate commit - let commit_tx = &tx.commit.tx; - let input_amount: Amount = commit_tx - .input - .iter() - .flat_map(|input| { - utxo_map - .get(&(input.previous_output.txid, input.previous_output.vout)) - .cloned() - }) - .sum(); - let output_amount = commit_tx.output.iter().map(|tx| tx.value).sum(); - - if (input_amount - output_amount) < Amount::from_sat(commit_tx.vsize() as u64) { - return Err(BitcoinServiceError::FeeCalculation(fee_rate)); - } - - // Add commit change output to utxo_map - if let Some(change_output) = commit_tx.output.get(1) { - utxo_map.insert((tx.commit_txid(), 1), change_output.value); - } - - // Validate reveal - let reveal_tx = &tx.reveal.tx; - let input_amount = commit_tx.output[0].value; - let output_amount = reveal_tx.output[0].value; - - // Add reveal utxo to utxo_map, used by chunking txs - utxo_map.insert((tx.reveal_txid(), 0), output_amount); - - if (input_amount - output_amount) < Amount::from_sat(reveal_tx.vsize() as u64) { - return Err(BitcoinServiceError::FeeCalculation(fee_rate)); - } - } - - Ok(()) -} - async fn get_with_timeout( url: T, timeout: Duration, diff --git a/crates/bitcoin-da/src/helpers/builders/body_builders.rs b/crates/bitcoin-da/src/helpers/builders/body_builders.rs index b93cf6875e..b7683e87d0 100644 --- a/crates/bitcoin-da/src/helpers/builders/body_builders.rs +++ b/crates/bitcoin-da/src/helpers/builders/body_builders.rs @@ -23,7 +23,6 @@ use super::{ get_size_reveal, sign_blob_with_private_key, update_witness, TransactionKind, TxWithId, }; use crate::error::BitcoinServiceError; -use crate::job::service::SentChunks; use crate::service::split_proof; use crate::spec::utxo::UTXO; use crate::{REVEAL_OUTPUT_AMOUNT, REVEAL_OUTPUT_THRESHOLD}; @@ -121,7 +120,8 @@ impl DaTxs { #[instrument(level = "trace", skip_all, err)] pub fn create_inscription_transactions( data: RawTxData, - sent_chunks: SentChunks, + previous_commit_chunks: Vec, + previous_reveal_chunks: Vec, da_private_key: SecretKey, prev_utxo: Option, utxos: Vec, @@ -153,8 +153,8 @@ pub fn create_inscription_transactions( reveal_fee_rate, network, &reveal_tx_prefix, - sent_chunks.commit_txs, - sent_chunks.reveal_txs, + previous_commit_chunks, + previous_reveal_chunks, ), RawTxData::BatchProofMethodId(body) => create_inscription_type_3( body, diff --git a/crates/bitcoin-da/src/helpers/builders/tests.rs b/crates/bitcoin-da/src/helpers/builders/tests.rs index d908c169c4..0be0b8d427 100644 --- a/crates/bitcoin-da/src/helpers/builders/tests.rs +++ b/crates/bitcoin-da/src/helpers/builders/tests.rs @@ -11,7 +11,6 @@ use citrea_primitives::compression::{compress_blob, decompress_blob}; use super::body_builders::{DaTxs, RawTxData}; use crate::helpers::builders::sign_blob_with_private_key; use crate::helpers::parsers::{parse_relevant_transaction, ParsedTransaction}; -use crate::job::service::SentChunks; use crate::spec::utxo::UTXO; use crate::REVEAL_OUTPUT_AMOUNT; @@ -510,7 +509,8 @@ fn create_inscription_transactions() { let tx_prefix = &[0u8]; let DaTxs::Complete { commit, reveal } = super::body_builders::create_inscription_transactions( RawTxData::Complete(body.clone()), - SentChunks::default(), + vec![], + vec![], da_private_key, None, utxos.clone(), diff --git a/crates/bitcoin-da/src/job/error.rs b/crates/bitcoin-da/src/job/error.rs index cd0794b6e7..3dcc3d6803 100644 --- a/crates/bitcoin-da/src/job/error.rs +++ b/crates/bitcoin-da/src/job/error.rs @@ -1,4 +1,4 @@ -use sov_db::schema::types::da_jobs::{JobId, JobStatus}; +use sov_db::schema::types::da_jobs::{DaJobStatus, JobId}; use thiserror::Error; /// Job errors @@ -34,9 +34,9 @@ pub enum JobServiceError { /// Job cancellation failure #[error("Job {0} cannot be cancelled as it is in status: {1:?}")] - JobCancellationFailure(JobId, JobStatus), + JobCancellationFailure(JobId, DaJobStatus), /// Job retry failure #[error("Job {0} cannot be retried as it is in status: {1:?}")] - JobRetryFailure(JobId, JobStatus), + JobRetryFailure(JobId, DaJobStatus), } diff --git a/crates/bitcoin-da/src/job/metrics.rs b/crates/bitcoin-da/src/job/metrics.rs index 74a1756c8e..2f3989c2fb 100644 --- a/crates/bitcoin-da/src/job/metrics.rs +++ b/crates/bitcoin-da/src/job/metrics.rs @@ -2,10 +2,9 @@ use std::sync::LazyLock; use metrics::{Counter, Gauge, Histogram}; use metrics_derive::Metrics; -use sov_db::schema::types::da_jobs::JobStatus; +use sov_db::schema::types::da_jobs::{DaJobStatus, JobProgress}; use crate::helpers::get_timestamp; -use crate::job::service::JobProgress; /// Defines the metrics being collected for the DA job service #[derive(Metrics)] @@ -61,28 +60,28 @@ pub struct DaJobMetrics { } impl DaJobMetrics { - pub fn record_status_update(&self, old_status: &JobStatus, progress: &JobProgress) { + pub fn record_status_update(&self, old_status: &DaJobStatus, progress: &JobProgress) { let new_status = &progress.status; if old_status == new_status { return; } match old_status { - JobStatus::Pending => self.jobs_pending.decrement(1.0), - JobStatus::InProgress => self.jobs_in_progress.decrement(1.0), - JobStatus::Completed => self.jobs_completed.decrement(1.0), - JobStatus::Cancelled => self.jobs_cancelled.decrement(1.0), - JobStatus::Failed { .. } => self.jobs_failed.decrement(1.0), + DaJobStatus::Pending => self.jobs_pending.decrement(1.0), + DaJobStatus::InProgress => self.jobs_in_progress.decrement(1.0), + DaJobStatus::Completed => self.jobs_completed.decrement(1.0), + DaJobStatus::Cancelled => self.jobs_cancelled.decrement(1.0), + DaJobStatus::Failed { .. } => self.jobs_failed.decrement(1.0), } match new_status { - JobStatus::Pending => { + DaJobStatus::Pending => { self.jobs_pending.increment(1.0); } - JobStatus::InProgress => { + DaJobStatus::InProgress => { self.jobs_in_progress.increment(1.0); } - JobStatus::Completed => { + DaJobStatus::Completed => { self.jobs_completed.increment(1.0); self.jobs_completed_total.increment(1); @@ -96,11 +95,11 @@ impl DaJobMetrics { self.job_chunks_sent .record(progress.sent_chunks.count() as f64); } - JobStatus::Cancelled => { + DaJobStatus::Cancelled => { self.jobs_cancelled.increment(1.0); self.jobs_cancelled_total.increment(1); } - JobStatus::Failed { .. } => { + DaJobStatus::Failed { .. } => { self.jobs_failed.increment(1.0); self.jobs_failed_total.increment(1); } diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs index 678826a509..69d192c387 100644 --- a/crates/bitcoin-da/src/job/rpc.rs +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -9,9 +9,9 @@ use citrea_common::rpc::utils::internal_rpc_error; use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; use serde::{Deserialize, Serialize}; +use sov_db::schema::types::da_jobs::{DaJobStatus, JobId, JobProgress}; use super::Result; -use crate::job::service::{JobId, JobProgress, JobStatus}; use crate::service::BitcoinService; /// RPC provider trait for da job service @@ -103,33 +103,33 @@ pub enum JobStatusFilter { impl JobStatusFilter { /// Convert filter to list of status codes to query - pub(super) fn to_job_status(&self) -> Vec { + pub(super) fn to_job_status(&self) -> Vec { match self { - JobStatusFilter::Pending => vec![JobStatus::Pending], - JobStatusFilter::InProgress => vec![JobStatus::InProgress], - JobStatusFilter::Completed => vec![JobStatus::Completed], - JobStatusFilter::Cancelled => vec![JobStatus::Cancelled], + JobStatusFilter::Pending => vec![DaJobStatus::Pending], + JobStatusFilter::InProgress => vec![DaJobStatus::InProgress], + JobStatusFilter::Completed => vec![DaJobStatus::Completed], + JobStatusFilter::Cancelled => vec![DaJobStatus::Cancelled], JobStatusFilter::Failed => { - vec![JobStatus::Failed { + vec![DaJobStatus::Failed { error: Default::default(), }] } JobStatusFilter::Active => { - vec![JobStatus::Pending, JobStatus::InProgress] + vec![DaJobStatus::Pending, DaJobStatus::InProgress] } JobStatusFilter::Terminal => vec![ - JobStatus::Completed, - JobStatus::Cancelled, - JobStatus::Failed { + DaJobStatus::Completed, + DaJobStatus::Cancelled, + DaJobStatus::Failed { error: Default::default(), }, ], JobStatusFilter::All => vec![ - JobStatus::Pending, - JobStatus::InProgress, - JobStatus::Completed, - JobStatus::Cancelled, - JobStatus::Failed { + DaJobStatus::Pending, + DaJobStatus::InProgress, + DaJobStatus::Completed, + DaJobStatus::Cancelled, + DaJobStatus::Failed { error: Default::default(), }, ], @@ -144,7 +144,7 @@ pub struct JobInfoResponse { /// Unique job identifier pub job_id: JobId, /// Current job status - pub status: JobStatus, + pub status: DaJobStatus, /// Job creation timestamp pub created_at: u64, /// Last update timestamp @@ -159,7 +159,7 @@ pub struct JobInfoResponse { impl From for JobInfoResponse { fn from(value: JobProgress) -> Self { let error = match &value.status { - JobStatus::Failed { error } => Some(error.clone()), + DaJobStatus::Failed { error } => Some(error.clone()), _ => None, }; diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index d2c219f477..12d3a261c3 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -2,11 +2,9 @@ use std::collections::{HashMap, HashSet}; use std::sync::{Arc, Mutex}; use bitcoin::hashes::Hash; -use bitcoin::{Transaction, Txid}; -use serde::{Deserialize, Serialize}; +use bitcoin::Txid; use sov_db::ledger_db::DaLedgerOps; -pub use sov_db::schema::types::da_jobs::{Job, JobId, JobStatus}; -use sov_db::schema::types::da_jobs::{JobProgress as DbJobProgress, SentChunks as DbSentChunks}; +use sov_db::schema::types::da_jobs::{DaJobStatus, Job, JobId, JobProgress}; use tokio::sync::oneshot; use tracing::{info, instrument}; @@ -21,151 +19,6 @@ use crate::service::TxidWrapper; type JobWaiters = HashMap>>; -/// Tracks progress of a job including sent transactions for recovery. -/// -/// This state is persisted to the database and updated as transactions -/// are sent to bitcoin da. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct JobProgress { - /// Job id as uuidv7 - pub job_id: JobId, - /// Current job status - pub status: JobStatus, - /// Partially sent commit/reveal chunks for partial sending and recovery - pub sent_chunks: SentChunks, - /// Last update timestamp - pub last_updated: u64, -} - -impl JobProgress { - fn new(job_id: JobId, last_updated: u64) -> Self { - Self { - job_id, - status: JobStatus::Pending, - sent_chunks: SentChunks::new(), - last_updated, - } - } -} - -/// Track sent chunk for partial sending and recovery -#[derive(Debug, Default, Clone, Serialize, Deserialize)] -pub struct SentChunks { - /// Sent commit txs - pub commit_txs: Vec, - /// Sent reveal txs - pub reveal_txs: Vec, - /// All sent txids - pub txids: HashSet, -} - -impl SentChunks { - /// Return a default SentChunk with empty vectors - pub fn new() -> Self { - Self::default() - } - - /// Return the number of sent chunks - pub fn count(&self) -> usize { - self.reveal_txs.len() - } - - /// Extend with sent commit and reveal chunks - pub fn extend( - &mut self, - commits: Vec, - reveals: Vec, - txids: Vec, - ) { - self.commit_txs.extend(commits); - self.reveal_txs.extend(reveals); - self.txids.extend(txids); - } -} - -impl From for SentChunks { - fn from(db_chunks: DbSentChunks) -> Self { - let commit_txs = db_chunks - .commit_txs - .iter() - .map(|bytes| { - bitcoin::consensus::deserialize(bytes) - .expect("Failed to deserialize commit transaction from database") - }) - .collect(); - - let reveal_txs = db_chunks - .reveal_txs - .iter() - .map(|bytes| { - bitcoin::consensus::deserialize(bytes) - .expect("Failed to deserialize reveal transaction from database") - }) - .collect(); - - let txids = db_chunks - .txids - .into_iter() - .map(Txid::from_byte_array) - .collect(); - - Self { - commit_txs, - reveal_txs, - txids, - } - } -} - -impl From for DbSentChunks { - fn from(chunks: SentChunks) -> Self { - let commit_txs = chunks - .commit_txs - .iter() - .map(bitcoin::consensus::serialize) - .collect(); - - let reveal_txs = chunks - .reveal_txs - .iter() - .map(bitcoin::consensus::serialize) - .collect(); - - let txids = chunks - .txids - .into_iter() - .map(|tx| tx.to_byte_array()) - .collect(); - - Self { - commit_txs, - reveal_txs, - txids, - } - } -} - -impl From for JobProgress { - fn from(db_progress: DbJobProgress) -> Self { - Self { - job_id: db_progress.job_id, - status: db_progress.status, - sent_chunks: db_progress.sent_chunks.into(), - last_updated: db_progress.last_updated, - } - } -} - -impl From for DbJobProgress { - fn from(progress: JobProgress) -> Self { - Self { - job_id: progress.job_id, - status: progress.status, - sent_chunks: progress.sent_chunks.into(), - last_updated: progress.last_updated, - } - } -} /// Job service pub struct DaJobService { @@ -197,7 +50,7 @@ impl DaJobService { let job = Job::new(job_id, data, created_at); let progress = JobProgress::new(job_id, created_at); - self.ledger_db.submit_job(&job, &progress.into())?; + self.ledger_db.submit_job(&job, &progress)?; JM.record_job_submitted(job.data.len()); @@ -221,7 +74,6 @@ impl DaJobService { self.ledger_db .get_progress(job_id) .map_err(JobServiceError::DatabaseError) - .map(|opt| opt.map(Into::into)) } /// Get all `Pending` and `InProgress` job ids from storage @@ -231,12 +83,12 @@ impl DaJobService { active_jobs.extend( self.ledger_db - .get_job_ids_by_status(JobStatus::Pending.as_u8())?, + .get_job_ids_by_status(DaJobStatus::Pending.as_u8())?, ); active_jobs.extend( self.ledger_db - .get_job_ids_by_status(JobStatus::InProgress.as_u8())?, + .get_job_ids_by_status(DaJobStatus::InProgress.as_u8())?, ); // Sort uuidv7 chronologically @@ -250,7 +102,7 @@ impl DaJobService { pub fn update_job_status( &self, progress: &mut JobProgress, - new_status: JobStatus, + new_status: DaJobStatus, ) -> Result<()> { let job_id = progress.job_id; let previous_status = progress.status.clone(); @@ -258,7 +110,7 @@ impl DaJobService { progress.status = new_status; progress.last_updated = get_timestamp(); - let db_progress = progress.clone().into(); + let db_progress = progress.clone(); self.ledger_db .upsert_progress(&db_progress, previous_status.as_u8())?; @@ -279,12 +131,23 @@ impl DaJobService { let active_job_ids = self.get_all_active_job_ids()?; for job_id in active_job_ids { if let Some(JobProgress { - status: JobStatus::InProgress, + status: DaJobStatus::InProgress, sent_chunks, .. }) = self.get_progress(&job_id)? { - txids.extend(sent_chunks.txids); + txids.extend( + sent_chunks + .commit_txs + .into_iter() + .map(Txid::from_byte_array), + ); + txids.extend( + sent_chunks + .reveal_txs + .into_iter() + .map(Txid::from_byte_array), + ); } } @@ -295,25 +158,25 @@ impl DaJobService { pub async fn has_job_in_progress(&self) -> Result { let in_progress_jobs = self .ledger_db - .get_job_ids_by_status(JobStatus::InProgress.as_u8())?; + .get_job_ids_by_status(DaJobStatus::InProgress.as_u8())?; Ok(!in_progress_jobs.is_empty()) } fn notify_new_status(&self, job_id: JobId, progress: &JobProgress) { let result = match &progress.status { - JobStatus::Completed => { + DaJobStatus::Completed => { if let Some(last_tx) = progress.sent_chunks.reveal_txs.last() { - Ok(TxidWrapper(last_tx.compute_txid())) + Ok(TxidWrapper(Txid::from_byte_array(*last_tx))) } else { Err(JobServiceError::NoTransactionsFound(job_id).into()) } } - JobStatus::Cancelled => Err(JobServiceError::JobCancelled(job_id).into()), - JobStatus::Failed { error } => { + DaJobStatus::Cancelled => Err(JobServiceError::JobCancelled(job_id).into()), + DaJobStatus::Failed { error } => { Err(JobServiceError::JobFailed(job_id, error.clone()).into()) } - JobStatus::Pending | JobStatus::InProgress => return, + DaJobStatus::Pending | DaJobStatus::InProgress => return, }; if let Some(tx) = self.job_waiters.lock().unwrap().remove(&job_id) { @@ -340,12 +203,12 @@ impl DaJobRpcProvider for DaJobService { // Only allow cancellation of pending or in-progress jobs match progress.status { - JobStatus::Pending | JobStatus::InProgress => { - self.update_job_status(&mut progress, JobStatus::Cancelled)?; + DaJobStatus::Pending | DaJobStatus::InProgress => { + self.update_job_status(&mut progress, DaJobStatus::Cancelled)?; tracing::info!("Job {job_id} successfully cancelled"); Ok(()) } - JobStatus::Completed | JobStatus::Cancelled | JobStatus::Failed { .. } => Err( + DaJobStatus::Completed | DaJobStatus::Cancelled | DaJobStatus::Failed { .. } => Err( JobServiceError::JobCancellationFailure(job_id, progress.status), ), } @@ -359,7 +222,7 @@ impl DaJobRpcProvider for DaJobService { // Only allow retry of failed or cancelled jobs match progress.status { - JobStatus::Failed { .. } | JobStatus::Cancelled => { + DaJobStatus::Failed { .. } | DaJobStatus::Cancelled => { // Get original job and deserialize data let original_job = self .get_job(&job_id)? @@ -374,7 +237,7 @@ impl DaJobRpcProvider for DaJobService { Ok(new_job_id) } - JobStatus::Pending | JobStatus::InProgress | JobStatus::Completed => { + DaJobStatus::Pending | DaJobStatus::InProgress | DaJobStatus::Completed => { Err(JobServiceError::JobRetryFailure(job_id, progress.status)) } } diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index dc4189f260..bef7d75e77 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -29,7 +29,7 @@ use lru::LruCache; use reth_tasks::shutdown::GracefulShutdown; use serde::{Deserialize, Serialize}; use sov_db::ledger_db::LedgerDB; -use sov_db::schema::types::da_jobs::JobStatus; +use sov_db::schema::types::da_jobs::{DaJobStatus, JobProgress, SentChunks}; use sov_rollup_interface::da::{DaSpec, DaTxRequest, DataOnDa, SequencerCommitment}; use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::zk::Proof; @@ -41,7 +41,7 @@ use tracing::{debug, error, info, instrument, trace, warn}; use uuid::Uuid; use crate::error::{BitcoinServiceError, MempoolRejection}; -use crate::fee::{validate_txs_fee_rate, BumpFeeMethod, FeeService}; +use crate::fee::{BumpFeeMethod, FeeService}; use crate::helpers::backup::backup_txs_to_file; use crate::helpers::builders::body_builders::{create_inscription_transactions, DaTxs, RawTxData}; use crate::helpers::builders::TxWithId; @@ -49,7 +49,7 @@ use crate::helpers::merkle_tree::BitcoinMerkleTree; use crate::helpers::parsers::{parse_relevant_transaction, ParsedTransaction, VerifyParsed}; use crate::helpers::{get_timestamp, merkle_tree, TransactionKind}; use crate::job::error::JobServiceError; -use crate::job::service::{DaJobService, JobProgress, SentChunks}; +use crate::job::service::DaJobService; use crate::metrics::BITCOIN_DA_METRICS as BM; use crate::monitoring::{MonitoredTxKind, MonitoringConfig, MonitoringService, TxStatus}; use crate::network_constants::NetworkConstants; @@ -349,12 +349,12 @@ impl BitcoinService { { Ok(completed) => { if completed { - job_service.update_job_status(progress, JobStatus::Completed)?; + job_service.update_job_status(progress, DaJobStatus::Completed)?; info!("Job {} completed successfully", job_id); previous_job_in_progress = false; } else { - job_service.update_job_status(progress, JobStatus::InProgress)?; + job_service.update_job_status(progress, DaJobStatus::InProgress)?; info!("Job {} partially sent", job_id); previous_job_in_progress = true; @@ -372,7 +372,7 @@ impl BitcoinService { error!("Error processing job {}: {:?}", job_id, e); job_service.update_job_status( progress, - JobStatus::Failed { + DaJobStatus::Failed { error: e.to_string(), }, )?; @@ -443,7 +443,7 @@ impl BitcoinService { let utxos = self.get_utxos(sent_txids).await?; let prev_utxo = match &progress.status { - JobStatus::InProgress => None, // Will use previous reveal utxo in create_inscription_type_1 + DaJobStatus::InProgress => None, // Will use previous reveal utxo in create_inscription_type_1 _ => { self.select_prev_utxo(&utxos, previous_job_in_progress) .await? @@ -469,14 +469,15 @@ impl BitcoinService { // Test whether signed_txs should be accepted in queue if !self.test_mempool_accept_queue_tx(&signed_txs).await? { // If it failed on mempool policy limit, it can also fail on meeting min relay fee - // Stateless validation of signed txs fee - validate_txs_fee_rate( - &signed_txs, - &progress.sent_chunks, - fee_sat_per_vbyte, - utxos, - prev_utxo, - )?; + self.fee + .validate_txs_fee_rate( + &signed_txs, + &progress.sent_chunks, + fee_sat_per_vbyte, + utxos, + prev_utxo, + ) + .await?; } // backup to file after mempool acceptance @@ -498,9 +499,8 @@ impl BitcoinService { txids.extend(&ids); progress.sent_chunks.extend( - vec![signed_tx.commit.tx.clone()], - vec![signed_tx.reveal.tx.clone()], - ids, + vec![signed_tx.commit.tx.compute_txid().to_byte_array()], + vec![signed_tx.reveal.tx.compute_txid().to_byte_array()], ); let txs = signed_tx.clone().into_txs_with_id(); @@ -647,12 +647,36 @@ impl BitcoinService { .require_network(network)?; let prefix = self.reveal_tx_prefix.clone(); + + let mut previous_commit_chunks = Vec::new(); + for txid in &sent_chunks.commit_txs { + let txid = Txid::from_byte_array(*txid); + previous_commit_chunks.push( + self.client + .get_transaction(&txid, None) + .await? + .transaction()?, + ) + } + + let mut previous_reveal_chunks = Vec::new(); + for txid in &sent_chunks.reveal_txs { + let txid = Txid::from_byte_array(*txid); + previous_reveal_chunks.push( + self.client + .get_transaction(&txid, None) + .await? + .transaction()?, + ) + } + tokio::task::spawn_blocking(move || { // Since this is CPU bound work, we use spawn_blocking // to release the tokio runtime execution create_inscription_transactions( data, - sent_chunks, + previous_commit_chunks, + previous_reveal_chunks, da_private_key, prev_utxo, utxos, @@ -1377,23 +1401,23 @@ impl DaService for BitcoinService { let (tx, rx) = oneshot::channel(); match progress.status { - JobStatus::Completed => { + DaJobStatus::Completed => { // Job already finished before we subscribed if let Some(last_tx) = progress.sent_chunks.reveal_txs.last() { - let _ = tx.send(Ok(TxidWrapper(last_tx.compute_txid()))); + let _ = tx.send(Ok(TxidWrapper(Txid::from_byte_array(*last_tx)))); } else { let _ = tx.send(Err(JobServiceError::NoTransactionsFound(job_id).into())); } } - JobStatus::Failed { error } => { + DaJobStatus::Failed { error } => { // Job already failed let _ = tx.send(Err(JobServiceError::JobFailed(job_id, error).into())); } - JobStatus::Cancelled => { + DaJobStatus::Cancelled => { // Job already cancelled let _ = tx.send(Err(JobServiceError::JobCancelled(job_id).into())); } - JobStatus::Pending | JobStatus::InProgress => { + DaJobStatus::Pending | DaJobStatus::InProgress => { // Job still running, register for notification self.job_service.lock().await.insert_waiter(job_id, tx); } diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs index c86cc8d9bc..86ed4d91c5 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs @@ -7,7 +7,7 @@ pub type JobId = Uuid; /// Job status representing the current state of transaction processing #[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize, PartialEq)] -pub enum JobStatus { +pub enum DaJobStatus { /// Job is queued and waiting to be processed Pending, /// Job is in progress @@ -23,15 +23,15 @@ pub enum JobStatus { }, } -impl JobStatus { - /// u8 representation of `JobStatus` +impl DaJobStatus { + /// u8 representation of `DaJobStatus` pub fn as_u8(&self) -> u8 { match self { - JobStatus::Pending => 0, - JobStatus::InProgress => 1, - JobStatus::Completed => 2, - JobStatus::Cancelled => 3, - JobStatus::Failed { .. } => 4, + DaJobStatus::Pending => 0, + DaJobStatus::InProgress => 1, + DaJobStatus::Completed => 2, + DaJobStatus::Cancelled => 3, + DaJobStatus::Failed { .. } => 4, } } } @@ -39,15 +39,24 @@ impl JobStatus { /// Track sent chunk for partial sending and recovery #[derive(Debug, Default, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct SentChunks { - /// Sent commit txs (serialized bitcoin::Transaction) - pub commit_txs: Vec>, - /// Sent reveal txs (serialized bitcoin::Transaction) - pub reveal_txs: Vec>, - /// Sent txids - pub txids: Vec<[u8; 32]>, + /// Sent commit txids + pub commit_txs: Vec<[u8; 32]>, + /// Sent reveal txids + pub reveal_txs: Vec<[u8; 32]>, } impl SentChunks { + /// Number of sent commit/reveal pair + pub fn count(&self) -> usize { + self.reveal_txs.len() + } + + /// Extend with sent commit and reveal chunks + pub fn extend(&mut self, commits: Vec<[u8; 32]>, reveals: Vec<[u8; 32]>) { + self.commit_txs.extend(commits); + self.reveal_txs.extend(reveals); + } + /// Return a default SentChunk with empty vectors pub fn new() -> Self { Self::default() @@ -63,13 +72,25 @@ pub struct JobProgress { /// Job id as uuidv7 pub job_id: JobId, /// Current job status - pub status: JobStatus, + pub status: DaJobStatus, /// Partially sent commit/reveal chunks for partial sending and recovery pub sent_chunks: SentChunks, /// Last update timestamp pub last_updated: u64, } +impl JobProgress { + /// Creates a new `JobProgress` + pub fn new(job_id: JobId, last_updated: u64) -> Self { + Self { + job_id, + status: DaJobStatus::Pending, + sent_chunks: SentChunks::new(), + last_updated, + } + } +} + /// DA Job representing a transaction to be sent to the DA layer #[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct Job { From 542e2ab36183a1464be6e1d4989ba8357d79653b Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 20 Oct 2025 14:49:09 +0100 Subject: [PATCH 48/81] Cache RawTxData and retrieve proof by id --- crates/batch-prover/src/prover.rs | 6 +- crates/bitcoin-da/src/fee.rs | 4 +- .../src/helpers/builders/body_builders.rs | 24 +------ crates/bitcoin-da/src/job/mod.rs | 4 ++ crates/bitcoin-da/src/job/service.rs | 72 +++++++++++++++++-- crates/bitcoin-da/src/service.rs | 8 +-- crates/prover-services/src/parallel.rs | 14 ++++ .../adapters/mock-da/src/service.rs | 3 + .../full-node/db/sov-db/src/ledger_db/mod.rs | 8 +++ .../db/sov-db/src/ledger_db/traits.rs | 3 + .../rollup-interface/src/state_machine/da.rs | 3 + 11 files changed, 111 insertions(+), 38 deletions(-) diff --git a/crates/batch-prover/src/prover.rs b/crates/batch-prover/src/prover.rs index 4471ec279b..ddade3bb07 100644 --- a/crates/batch-prover/src/prover.rs +++ b/crates/batch-prover/src/prover.rs @@ -740,7 +740,7 @@ where // submit the proof to the DA service in the background tokio::spawn(async move { let (da_job_id, rx) = prover_service - .submit_proof(proof_with_duration.proof) + .submit_proof_by_id(proving_job_id) .await .expect("Failed to submit proof"); @@ -836,7 +836,7 @@ where } // submit all proofs to da - for (proving_job_id, proof) in proofs { + for (proving_job_id, _) in proofs { let prover_service = self.prover_service.clone(); let ledger_db = self.ledger_db.clone(); info!("Submitting recovered proof for job {}", proving_job_id); @@ -857,7 +857,7 @@ where } else { // No on going da job, submit a new one let (da_job_id, rx) = prover_service - .submit_proof(proof) + .submit_proof_by_id(proving_job_id) .await .expect("Failed to submit proof"); diff --git a/crates/bitcoin-da/src/fee.rs b/crates/bitcoin-da/src/fee.rs index 973e7861b9..63e256da77 100644 --- a/crates/bitcoin-da/src/fee.rs +++ b/crates/bitcoin-da/src/fee.rs @@ -255,7 +255,7 @@ impl FeeService { } // Recover sent chunks - let mut commit_txs: Vec = vec![]; + let mut commit_txs = vec![]; for tx in &sent_chunks.commit_txs { let id = Txid::from_byte_array(*tx); let tx = self @@ -265,7 +265,7 @@ impl FeeService { .transaction()?; commit_txs.push(TxWithId { tx, id }); } - let mut reveal_txs: Vec = vec![]; + let mut reveal_txs = vec![]; for tx in &sent_chunks.reveal_txs { let id = Txid::from_byte_array(*tx); let tx = self diff --git a/crates/bitcoin-da/src/helpers/builders/body_builders.rs b/crates/bitcoin-da/src/helpers/builders/body_builders.rs index b7683e87d0..c96bf5ac4d 100644 --- a/crates/bitcoin-da/src/helpers/builders/body_builders.rs +++ b/crates/bitcoin-da/src/helpers/builders/body_builders.rs @@ -15,15 +15,13 @@ use bitcoin::{Address, Amount, Network, Transaction}; use metrics::histogram; use secp256k1::SECP256K1; use serde::{Deserialize, Serialize}; -use sov_rollup_interface::da::{DaTxRequest, DataOnDa}; +use sov_rollup_interface::da::DataOnDa; use tracing::{info, instrument, trace, warn}; use super::{ build_commit_transaction, build_control_block, build_reveal_transaction, build_witness, get_size_reveal, sign_blob_with_private_key, update_witness, TransactionKind, TxWithId, }; -use crate::error::BitcoinServiceError; -use crate::service::split_proof; use crate::spec::utxo::UTXO; use crate::{REVEAL_OUTPUT_AMOUNT, REVEAL_OUTPUT_THRESHOLD}; @@ -42,26 +40,6 @@ pub enum RawTxData { SequencerCommitment(Vec), } -impl TryFrom for RawTxData { - type Error = BitcoinServiceError; - - fn try_from(request: DaTxRequest) -> Result { - match request { - DaTxRequest::ZKProof(zkproof) => split_proof(zkproof), - DaTxRequest::SequencerCommitment(comm) => { - let blob = borsh::to_vec(&DataOnDa::SequencerCommitment(comm)) - .expect("SequencerCommitment serialize must not fail"); - Ok(RawTxData::SequencerCommitment(blob)) - } - DaTxRequest::BatchProofMethodId(id) => { - let blob = borsh::to_vec(&DataOnDa::BatchProofMethodId(id)) - .expect("BatchProofMethodId serialize must not fail"); - Ok(RawTxData::BatchProofMethodId(blob)) - } - } - } -} - /// This is a list of txs we need to send to DA #[derive(Serialize, Clone, Debug)] pub enum DaTxs { diff --git a/crates/bitcoin-da/src/job/mod.rs b/crates/bitcoin-da/src/job/mod.rs index faf9fb6f1d..0695397b5f 100644 --- a/crates/bitcoin-da/src/job/mod.rs +++ b/crates/bitcoin-da/src/job/mod.rs @@ -8,10 +8,14 @@ use crate::job::error::JobServiceError; /// Job related error types pub mod error; + +/// Job related RPC endpoints pub mod rpc; + /// Core job queue implementation and state management pub mod service; +/// Job related metrics mod metrics; type Result = std::result::Result; diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 12d3a261c3..f62f67ba3d 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -1,10 +1,14 @@ use std::collections::{HashMap, HashSet}; +use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; +use anyhow::Context; use bitcoin::hashes::Hash; use bitcoin::Txid; +use lru::LruCache; use sov_db::ledger_db::DaLedgerOps; use sov_db::schema::types::da_jobs::{DaJobStatus, Job, JobId, JobProgress}; +use sov_rollup_interface::da::{DaTxRequest, DataOnDa}; use tokio::sync::oneshot; use tracing::{info, instrument}; @@ -15,7 +19,7 @@ use crate::helpers::get_timestamp; use crate::job::error::JobServiceError; use crate::job::metrics::DA_JOB_METRICS as JM; use crate::job::rpc::{DaJobRpcProvider, JobListFilter}; -use crate::service::TxidWrapper; +use crate::service::{split_proof, TxidWrapper}; type JobWaiters = HashMap>>; @@ -24,28 +28,32 @@ type JobWaiters = pub struct DaJobService { ledger_db: DB, job_waiters: Arc>, + raw_tx_data_cache: Arc>>, } impl DaJobService { /// Creates a new DaJobService with ledger_db - pub fn new(ledger_db: DB) -> Self { + pub fn new(ledger_db: DB, cache_size: Option) -> Self { + let cache_size = cache_size.unwrap_or_else(|| NonZeroUsize::new(10).unwrap()); + Self { ledger_db, job_waiters: Arc::new(Mutex::new(HashMap::new())), + raw_tx_data_cache: Arc::new(Mutex::new(LruCache::new(cache_size))), } } /// Create a new job and save to db pub fn submit_job( &self, - raw_tx_data: RawTxData, + da_tx_request: DaTxRequest, tx: oneshot::Sender>, ) -> Result { let job_id = uuid::Uuid::now_v7(); let created_at = get_timestamp(); // Serialize RawTxData to Vec - let data = borsh::to_vec(&raw_tx_data)?; + let data = borsh::to_vec(&da_tx_request)?; let job = Job::new(job_id, data, created_at); let progress = JobProgress::new(job_id, created_at); @@ -76,6 +84,60 @@ impl DaJobService { .map_err(JobServiceError::DatabaseError) } + /// Get the raw transaction data for a job + /// + /// This function attempts to retrieve the data from cache first. + /// If not found in cache, it deserializes from the job data and + /// transforms it into the appropriate RawTxData format. + /// + /// For StoredProof requests, it retrieves the actual proof from the database + /// using the proof_id reference. + /// + /// # Arguments + /// + /// * `job` - The job containing serialized DaTxRequest data + /// + /// # Returns + /// + /// * `Result` - The raw transaction data or an error + #[instrument(level = "trace", skip(self), ret)] + pub(crate) fn get_job_data(&self, job: &Job) -> Result { + if let Some(data) = self.raw_tx_data_cache.lock().unwrap().get(&job.id) { + return Ok(data.to_owned()); + }; + + // Deserialize RawTxData from job + let job_data: DaTxRequest = + borsh::from_slice(&job.data).map_err(JobServiceError::SerializationError)?; + + let raw_tx_data = match job_data { + DaTxRequest::ZKProof(zkproof) => split_proof(zkproof), + DaTxRequest::StoredProof(proof_id) => { + // Retrieve proof via secondary index + let zkproof = self.ledger_db.get_proof_by_proof_id(proof_id)?; + split_proof(zkproof) + } + DaTxRequest::SequencerCommitment(comm) => { + let blob = borsh::to_vec(&DataOnDa::SequencerCommitment(comm)) + .expect("SequencerCommitment serialize must not fail"); + Ok(RawTxData::SequencerCommitment(blob)) + } + DaTxRequest::BatchProofMethodId(id) => { + let blob = borsh::to_vec(&DataOnDa::BatchProofMethodId(id)) + .expect("BatchProofMethodId serialize must not fail"); + Ok(RawTxData::BatchProofMethodId(blob)) + } + } + .context("Failed to retrieve RawTxData from DaTxRequest")?; + + self.raw_tx_data_cache + .lock() + .unwrap() + .push(job.id, raw_tx_data.clone()); + + Ok(raw_tx_data) + } + /// Get all `Pending` and `InProgress` job ids from storage #[instrument(level = "trace", skip(self), ret)] pub(crate) fn get_all_active_job_ids(&self) -> Result> { @@ -228,7 +290,7 @@ impl DaJobRpcProvider for DaJobService { .get_job(&job_id)? .ok_or(JobServiceError::JobNotFound(job_id))?; - let raw_data: RawTxData = borsh::from_slice(&original_job.data)?; + let raw_data: DaTxRequest = borsh::from_slice(&original_job.data)?; let (tx, _rx) = oneshot::channel(); // Create new job with same data diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index bef7d75e77..4dea294ac0 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -265,7 +265,7 @@ impl BitcoinService { let utxo_selection_mode = config.utxo_selection_mode.clone().unwrap_or_default(); - let job_service = Mutex::new(DaJobService::new(ledger_db)); + let job_service = Mutex::new(DaJobService::new(ledger_db, None)); let max_fee_rate_sat_to_pay = config .max_fee_rate_sat_to_pay .unwrap_or(DEFAULT_MAX_FEE_RATE_SAT_VB); @@ -337,9 +337,7 @@ impl BitcoinService { .get_progress(&job_id)? .ok_or(JobServiceError::JobNotFound(job_id))?; - // Deserialize RawTxData from job - let job_data: RawTxData = - borsh::from_slice(&job.data).map_err(JobServiceError::SerializationError)?; + let job_data = job_service.get_job_data(&job)?; let sent_txids = job_service.get_pending_chunks()?; @@ -1378,7 +1376,7 @@ impl DaService for BitcoinService { return Err(BitcoinServiceError::PreviousJobInProgress); } } - job_service.submit_job(tx_request.try_into()?, tx)? + job_service.submit_job(tx_request, tx)? }; // TODO maybe single job handling here diff --git a/crates/prover-services/src/parallel.rs b/crates/prover-services/src/parallel.rs index 975ab39680..ec08948f4b 100644 --- a/crates/prover-services/src/parallel.rs +++ b/crates/prover-services/src/parallel.rs @@ -216,6 +216,20 @@ where .map_err(|e| anyhow::anyhow!(e)) } + /// Submits the zk proof by id to the DA service, returning transaction id. + #[instrument(name = "ParallelProverService", skip_all)] + pub async fn submit_proof_by_id( + &self, + proof_id: Uuid, + ) -> anyhow::Result<(Uuid, DaJobWaiter)> { + let tx_request = DaTxRequest::StoredProof(proof_id); + info!("Submitting proof to DA service"); + self.da_service + .send_transaction(tx_request) + .await + .map_err(|e| anyhow::anyhow!(e)) + } + // Only used in tests pub async fn submit_proofs(&self, proofs: Vec) -> anyhow::Result> { let mut tx_and_proof = Vec::with_capacity(proofs.len()); diff --git a/crates/sovereign-sdk/adapters/mock-da/src/service.rs b/crates/sovereign-sdk/adapters/mock-da/src/service.rs index 4ffd9c59d2..97a61c9f10 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/service.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/service.rs @@ -443,6 +443,9 @@ impl DaService for MockDaService { let req = DataOnDa::Complete(proof); borsh::to_vec(&req).unwrap() } + DaTxRequest::StoredProof(_) => { + unimplemented!() + } DaTxRequest::SequencerCommitment(seq_comm) => { tracing::debug!("Adding a sequencer commitment"); let req = DataOnDa::SequencerCommitment(seq_comm); diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs index d772d8923a..69286ddeb0 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs @@ -2,6 +2,7 @@ use std::ops::RangeInclusive; use std::path::Path; use std::sync::Arc; +use anyhow::Context; use rocksdb::{ReadOptions, WriteBatch}; use sov_rollup_interface::block::L2Block; use sov_rollup_interface::da::SequencerCommitment; @@ -1044,4 +1045,11 @@ impl DaLedgerOps for LedgerDB { } Ok(job_ids) } + + fn get_proof_by_proof_id(&self, proof_id: Uuid) -> anyhow::Result> { + self.db + .get::(&proof_id)? + .map(|stored_batch_proof| stored_batch_proof.proof) + .context("Failed to retrieve proof by id") + } } diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs index ffe09da55a..0bafa4e24e 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs @@ -347,6 +347,9 @@ pub trait DaLedgerOps { /// Get all job ids for a specific status fn get_job_ids_by_status(&self, status: u8) -> Result>; + + /// Get stored proof by proof_id + fn get_proof_by_proof_id(&self, proof_id: Uuid) -> Result>; } /// Test ledger operations diff --git a/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs b/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs index bdb68e5aa1..f7180944a1 100644 --- a/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs +++ b/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs @@ -6,6 +6,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; +use uuid::Uuid; use crate::zk::Proof; use crate::{BasicAddress, Network}; @@ -109,6 +110,8 @@ pub enum DaTxRequest { SequencerCommitment(SequencerCommitment), /// Or a zk proof and state diff ZKProof(Proof), + /// Or a job id for a stored proof + StoredProof(Uuid), /// Batch proof method id update for light client BatchProofMethodId(BatchProofMethodId), } From f2482176552356153379e262aeb3357520e2d410 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 20 Oct 2025 17:11:59 +0100 Subject: [PATCH 49/81] Keep DaTxRequest behind native flag --- crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs b/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs index f7180944a1..0b299d51c1 100644 --- a/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs +++ b/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs @@ -6,6 +6,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; +#[cfg(feature = "native")] use uuid::Uuid; use crate::zk::Proof; @@ -103,6 +104,7 @@ impl core::cmp::Ord for SequencerCommitment { } /// Transaction request to send to the DA queue. +#[cfg(feature = "native")] #[allow(clippy::large_enum_variant)] #[derive(Debug, Clone, Eq, PartialEq, BorshDeserialize, BorshSerialize)] pub enum DaTxRequest { From d1974396bd806a6330475d7ea812041ad6249f1a Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 22 Oct 2025 17:06:28 +0100 Subject: [PATCH 50/81] Handle process new job in select loop --- crates/bitcoin-da/src/service.rs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 508044d206..5f16fb4682 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -36,7 +36,7 @@ use sov_rollup_interface::zk::Proof; use sov_rollup_interface::Network; use tokio::select; use tokio::sync::mpsc::UnboundedReceiver; -use tokio::sync::{oneshot, Mutex}; +use tokio::sync::{oneshot, Mutex, Notify}; use tracing::{debug, error, info, instrument, trace, warn}; use uuid::Uuid; @@ -189,6 +189,7 @@ pub struct BitcoinService { pub(crate) job_service: Mutex>, max_fee_rate_sat_to_pay: u64, fee_rate_cap_duration_secs: u64, + job_notifier: Arc, } impl BitcoinService { @@ -224,6 +225,7 @@ impl BitcoinService { job_service, max_fee_rate_sat_to_pay, fee_rate_cap_duration_secs, + job_notifier: Arc::new(Notify::new()), } } @@ -312,6 +314,13 @@ impl BitcoinService { } } } + + _ = self.job_notifier.notified() => { + trace!("Job submitted, processing queue"); + if let Err(e) = self.process_job_service().await { + error!(?e, "Error processing queue on job trigger"); + } + } } } } @@ -1376,8 +1385,8 @@ impl DaService for BitcoinService { job_service.submit_job(tx_request, tx)? }; - // TODO maybe single job handling here - self.process_job_service().await?; + // For now, notify on new job and process all of them in order as this is needed for utxo handling + self.job_notifier.notify_one(); Ok((job_id, rx)) } From 28504b36a4c80b1d6b15b136bb8677b183b140cb Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 22 Oct 2025 17:06:52 +0100 Subject: [PATCH 51/81] Add e2e test for batch prover restart recovery --- bin/citrea/tests/bitcoin/da_job.rs | 226 ++++++++++++++++++++++++++++- bin/citrea/tests/bitcoin/mod.rs | 1 + bin/citrea/tests/bitcoin/utils.rs | 58 ++++++++ crates/batch-prover/Cargo.toml | 3 + crates/batch-prover/src/prover.rs | 2 +- crates/batch-prover/src/rpc.rs | 56 +++++++ 6 files changed, 343 insertions(+), 3 deletions(-) diff --git a/bin/citrea/tests/bitcoin/da_job.rs b/bin/citrea/tests/bitcoin/da_job.rs index ca273b3867..282ab7898a 100644 --- a/bin/citrea/tests/bitcoin/da_job.rs +++ b/bin/citrea/tests/bitcoin/da_job.rs @@ -1,3 +1,4 @@ +use std::io::Write; use std::sync::Arc; use std::time::Duration; @@ -7,10 +8,15 @@ use bitcoin::hashes::Hash; use bitcoin_da::job::rpc::{DaJobRpcClient, JobInfoResponse, JobStatusFilter, RetryJobResponse}; use bitcoin_da::service::BitcoinService; use bitcoincore_rpc::RpcApi; +use citrea_batch_prover::rpc::BatchProverRpcClient; use citrea_e2e::bitcoin::{BitcoinNode, DEFAULT_FINALITY_DEPTH}; -use citrea_e2e::config::{BitcoinConfig, LightClientProverConfig, TestCaseConfig}; +use citrea_e2e::config::{ + BatchProverConfig, BitcoinConfig, LightClientProverConfig, TestCaseConfig, +}; use citrea_e2e::framework::TestFramework; +use citrea_e2e::node::BatchProver; use citrea_e2e::test_case::{TestCase, TestCaseRunner}; +use citrea_e2e::traits::Restart; use citrea_e2e::Result; use citrea_light_client_prover::rpc::LightClientProverRpcClient; use jsonrpsee::http_client::HttpClient; @@ -23,7 +29,10 @@ use sov_rollup_interface::services::da::DaService; use super::get_citrea_path; use crate::bitcoin::full_node::create_serialized_fake_receipt_batch_proof_with_state_roots; use crate::bitcoin::light_client_test::create_random_state_diff; -use crate::bitcoin::utils::spawn_bitcoin_da_prover_service_with_rpc_server; +use crate::bitcoin::utils::{ + create_serialized_fake_receipt_batch_proof_and_serialized_output, + spawn_bitcoin_da_prover_service_with_rpc_server, wait_for_prover_job_count, +}; struct JobServiceTest { task_manager: Option, @@ -565,3 +574,216 @@ async fn test_bitcoin_job_service() -> Result<()> { .run() .await } + +struct BatchProverRecoveryJobServiceTest; + +impl BatchProverRecoveryJobServiceTest { + #[allow(clippy::too_many_arguments)] + async fn test_batch_prover_da_job_recovery( + &mut self, + da: &BitcoinNode, + batch_prover: &mut BatchProver, + genesis_state_root: [u8; 32], + batch_proof_method_id: [u32; 8], + finalized_height: u64, + commitment: &SequencerCommitment, + commitment_state_root: [u8; 32], + ) -> Result<()> { + let batch_prover_client = batch_prover.client.http_client().clone(); + + let l1_hash = da.get_block_hash(finalized_height).await?; + // Create 400kb proof that should be chunked and sent over multiple bitcoin blocks + let state_diff_400kb = create_random_state_diff(400); + let (proof, output) = create_serialized_fake_receipt_batch_proof_and_serialized_output( + genesis_state_root, + 20, + batch_proof_method_id, + Some(state_diff_400kb), + false, + l1_hash.as_raw_hash().to_byte_array(), + vec![commitment.clone()], + vec![commitment_state_root], + None, + ); + + let mut tempfile = tempfile::NamedTempFile::new().unwrap(); + tempfile.write_all(&proof).unwrap(); + + let job_id = batch_prover_client + .submit_proof_from_file(tempfile.path().to_path_buf(), output) + .await?; + + wait_for_prover_job_count(batch_prover, 1, None).await?; + + da.wait_mempool_len(18, None).await?; + assert_eq!(da.get_raw_mempool().await?.len(), 18); + + let job_in_progress: JobInfoResponse = batch_prover_client.da_job_get_info(job_id).await?; + assert_eq!(job_in_progress.job_id, job_id); + assert_eq!(job_in_progress.status, DaJobStatus::InProgress); + assert_eq!(job_in_progress.sent_count, 9); + + let active_jobs_before = batch_prover_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_before.len(), 1); + assert_eq!(active_jobs_before[0].job_id, job_id); + + batch_prover.restart(None, None).await?; + + // Assert that restart doesn't create any new job + let active_jobs_after_restart = batch_prover_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_after_restart.len(), 1); + assert_eq!(active_jobs_after_restart[0].job_id, job_id); + + da.generate(1).await?; + + da.wait_mempool_len(6, None).await?; + + let completed_job: JobInfoResponse = batch_prover_client.da_job_get_info(job_id).await?; + assert_eq!(completed_job.status, DaJobStatus::Completed); + assert_eq!(completed_job.error, None); + + let active_jobs_final = batch_prover_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_final.len(), 0); + + Ok(()) + } +} + +#[async_trait] +impl TestCase for BatchProverRecoveryJobServiceTest { + fn test_config() -> TestCaseConfig { + TestCaseConfig { + with_full_node: true, + with_sequencer: true, + with_light_client_prover: true, + with_batch_prover: true, + ..Default::default() + } + } + + fn bitcoin_config() -> BitcoinConfig { + BitcoinConfig { + extra_args: vec![ + "-persistmempool=0", + "-walletbroadcast=0", + "-limitancestorcount=100", + "-limitdescendantcount=100", + "-fallbackfee=0.00001", + ], + ..Default::default() + } + } + + fn batch_prover_config() -> BatchProverConfig { + BatchProverConfig { + proof_sampling_number: 99999999, // Prevent prover from proving on its own + ..Default::default() + } + } + + fn scan_l1_start_height() -> Option { + Some(170) + } + + fn light_client_prover_config() -> LightClientProverConfig { + LightClientProverConfig { + initial_da_height: 171, + ..Default::default() + } + } + + async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { + let da = f.bitcoin_nodes.get_mut(0).unwrap(); + let sequencer = f.sequencer.as_mut().unwrap(); + let full_node = f.full_node.as_mut().unwrap(); + let light_client_prover = f.light_client_prover.as_mut().unwrap(); + let batch_prover = f.batch_prover.as_mut().unwrap(); + + let max_l2_blocks_per_commitment = sequencer.max_l2_blocks_per_commitment(); + + da.generate(DEFAULT_FINALITY_DEPTH).await?; + let finalized_height = da.get_finalized_height(None).await?; + + light_client_prover + .wait_for_l1_height(finalized_height, None) + .await?; + + let lcp = light_client_prover + .client + .http_client() + .get_light_client_proof_by_l1_height(U64::from(finalized_height)) + .await?; + let lcp_output = lcp.unwrap().light_client_proof_output; + + let batch_proof_method_ids = light_client_prover + .client + .http_client() + .get_batch_proof_method_ids() + .await?; + let genesis_state_root = lcp_output.l2_state_root; + + // Generate sequencer commitment + for _ in 0..max_l2_blocks_per_commitment { + sequencer.client.send_publish_batch_request().await?; + } + + da.wait_mempool_len(2, None).await?; + da.generate(DEFAULT_FINALITY_DEPTH).await?; + let finalized_height = da.get_finalized_height(None).await?; + + full_node + .wait_for_l2_height(max_l2_blocks_per_commitment, None) + .await?; + full_node.wait_for_l1_height(finalized_height, None).await?; + + let commitment = full_node + .client + .http_client() + .get_sequencer_commitment_by_index(U32::from(1)) + .await? + .map(|c| SequencerCommitment { + merkle_root: c.merkle_root, + l2_end_block_number: c.l2_end_block_number.to::(), + index: c.index.to::(), + }) + .unwrap(); + + let commitment_state_root = sequencer + .client + .http_client() + .get_l2_block_by_number(U64::from(commitment.l2_end_block_number)) + .await? + .unwrap() + .header + .state_root; + + let batch_proof_method_id: [u32; 8] = batch_proof_method_ids[0].method_id.into(); + + self.test_batch_prover_da_job_recovery( + da, + batch_prover, + genesis_state_root, + batch_proof_method_id, + finalized_height, + &commitment, + commitment_state_root, + ) + .await?; + + Ok(()) + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_batch_prover_job_service_recovery() -> Result<()> { + TestCaseRunner::new(BatchProverRecoveryJobServiceTest {}) + .set_citrea_path(get_citrea_path()) + .run() + .await +} diff --git a/bin/citrea/tests/bitcoin/mod.rs b/bin/citrea/tests/bitcoin/mod.rs index 587c8a7cc0..fc4c4988c0 100644 --- a/bin/citrea/tests/bitcoin/mod.rs +++ b/bin/citrea/tests/bitcoin/mod.rs @@ -14,6 +14,7 @@ pub mod backup; pub mod bitcoin_service; pub mod bitcoin_test; pub mod bitcoin_verifier; +#[cfg(feature = "testing")] pub mod da_job; #[cfg(feature = "testing")] pub mod da_queue; diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index dcfb416170..fa6d67453d 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -33,15 +33,18 @@ use citrea_primitives::{MAX_TX_BODY_SIZE, REVEAL_TX_PREFIX}; use jsonrpsee::http_client::{HttpClient, HttpClientBuilder}; use jsonrpsee::RpcModule; use reth_tasks::TaskExecutor; +use risc0_zkvm::{FakeReceipt, InnerReceipt, MaybePruned, ReceiptClaim}; use sov_db::ledger_db::LedgerDB; use sov_db::rocks_db_config::RocksdbConfig; use sov_ledger_rpc::LedgerRpcClient; +use sov_modules_api::BatchProofCircuitOutputV3; use sov_rollup_interface::da::{ BatchProofMethodId, BatchProofMethodIdBody, DaTxRequest, SequencerCommitment, SECURITY_COUNCIL_SIGNATURE_SIZE, SECURITY_COUNCIL_SIGNATURE_THRESHOLD, }; use sov_rollup_interface::rpc::{JobRpcResponse, VerifiedBatchProofResponse}; use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::zk::batch_proof::output::{BatchProofCircuitOutput, CumulativeStateDiff}; use sov_rollup_interface::Network; use tokio::time::sleep; use uuid::Uuid; @@ -764,3 +767,58 @@ pub mod macros { pub(crate) use assert_panic; } + +#[allow(clippy::too_many_arguments)] +pub fn create_serialized_fake_receipt_batch_proof_and_serialized_output( + initial_state_root: [u8; 32], + last_l2_height: u64, + method_id: [u32; 8], + state_diff: Option, + malformed_journal: bool, + last_l1_hash_on_bitcoin_light_client_contract: [u8; 32], + sequencer_commitments: Vec, + state_roots_of_seq_comms: Vec<[u8; 32]>, + prev_sequencer_commitment_hash: Option<[u8; 32]>, +) -> (Vec, Vec) { + let sequencer_commitment_hashes = sequencer_commitments + .iter() + .map(|c| c.serialize_and_calculate_sha_256()) + .collect::>(); + let previous_commitment_index = if sequencer_commitments[0].index == 1 { + None + } else { + Some(sequencer_commitments[0].index - 1) + }; + let mut state_roots = vec![initial_state_root]; + + // For the sake of easiness of impl tests, we can use merkle root as state root + state_roots.extend(state_roots_of_seq_comms); + + let output_v3 = BatchProofCircuitOutputV3 { + state_roots, + last_l2_height, + final_l2_block_hash: [0u8; 32], + state_diff: state_diff.unwrap_or_default(), + sequencer_commitment_hashes, + last_l1_hash_on_bitcoin_light_client_contract, + sequencer_commitment_index_range: ( + sequencer_commitments[0].index, + sequencer_commitments[sequencer_commitments.len() - 1].index, + ), + previous_commitment_index, + previous_commitment_hash: prev_sequencer_commitment_hash, + }; + let batch_proof_output = BatchProofCircuitOutput::V3(output_v3); + let mut output_serialized = borsh::to_vec(&batch_proof_output).unwrap(); + + // Distorts the output and make it unparsable + if malformed_journal { + output_serialized.push(1u8); + } + + let claim = MaybePruned::Value(ReceiptClaim::ok(method_id, output_serialized.clone())); + let fake_receipt = FakeReceipt::new(claim); + // Receipt with verifiable claim + let receipt = InnerReceipt::Fake(fake_receipt); + (bincode::serialize(&receipt).unwrap(), output_serialized) +} diff --git a/crates/batch-prover/Cargo.toml b/crates/batch-prover/Cargo.toml index cc57e58962..bb728aa32e 100644 --- a/crates/batch-prover/Cargo.toml +++ b/crates/batch-prover/Cargo.toml @@ -63,3 +63,6 @@ tempfile = { workspace = true } [lints] workspace = true + +[features] +testing = [] diff --git a/crates/batch-prover/src/prover.rs b/crates/batch-prover/src/prover.rs index c4ba4544a2..607293c407 100644 --- a/crates/batch-prover/src/prover.rs +++ b/crates/batch-prover/src/prover.rs @@ -1274,7 +1274,7 @@ fn get_prev_hash_proof( /// /// # Returns /// A `BatchProofCircuitOutput` that contains the extracted output from the proof. -fn extract_proof_output( +pub(crate) fn extract_proof_output( job_id: &Uuid, proof: &Proof, code_commitments_by_spec: &HashMap, diff --git a/crates/batch-prover/src/rpc.rs b/crates/batch-prover/src/rpc.rs index 0809e66525..060bbb7191 100644 --- a/crates/batch-prover/src/rpc.rs +++ b/crates/batch-prover/src/rpc.rs @@ -8,6 +8,8 @@ use std::collections::HashMap; use std::fmt::Debug; use std::path::Path; +#[cfg(feature = "testing")] +use std::path::PathBuf; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use std::{env, fs}; @@ -267,6 +269,18 @@ pub trait BatchProverRpc { /// A new `Uuid` representing the retried proving job. #[method(name = "retryProvingJob")] async fn retry_proving_job(&self, job_id: Uuid) -> RpcResult; + + /// Submit a proof from a file path. For testing/debugging purposes only. + /// + /// # Arguments + /// * `proof_path` - Path to the serialized proof file to submit + /// + /// # Returns + /// A `BatchProofResponse` containing the L1 transaction ID and proof. + #[cfg(feature = "testing")] + #[method(name = "submitProofFromFile")] + async fn submit_proof_from_file(&self, proof_path: PathBuf, output: Vec) + -> RpcResult; } /// Server implementation of the Batch Prover RPC interface @@ -661,6 +675,48 @@ where info!("Retried proving job {}, new job id: {}", job_id, new_id); Ok(new_id) } + + #[cfg(feature = "testing")] + async fn submit_proof_from_file( + &self, + proof_path: PathBuf, + output: Vec, + ) -> RpcResult { + let ledger_db = &self.context.ledger_db; + let proving_job_id = Uuid::now_v7(); + info!("Submitting proof from file {proof_path:?} with id {proving_job_id}"); + + let proof = fs::read(&proof_path) + .map_err(|e| internal_rpc_error(format!("Failed to read proof file: {e}")))?; + + let output: BatchProofCircuitOutput = borsh::from_slice(&output).unwrap(); + + let commitment_indices = (output.sequencer_commitment_index_range().0 + ..output.sequencer_commitment_index_range().1) + .collect(); + + ledger_db + .insert_new_proving_job(proving_job_id, &commitment_indices) + .expect("Should insert new proving job"); + ledger_db + .put_proof_by_job_id(proving_job_id, proof.clone(), output.into()) + .expect("Should put proof to db"); + + let (da_job_id, _) = self + .context + .da_service + .send_transaction(DaTxRequest::ZKProof(proof.clone())) + .await + .map_err(internal_rpc_error)?; + + ledger_db + .set_proving_job_da_job_id(proving_job_id, da_job_id) + .expect("Failed to save da job by id"); + + info!("Submitted proof from file, da job id: {da_job_id}"); + + Ok(da_job_id) + } } /// Creates an RPC module with fullnode methods From a12e54b21c6c1a086f88a6b3b4031dda6b9adbb8 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 22 Oct 2025 20:01:19 +0100 Subject: [PATCH 52/81] Fix build --- crates/batch-prover/src/rpc.rs | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/crates/batch-prover/src/rpc.rs b/crates/batch-prover/src/rpc.rs index 060bbb7191..2e39df474e 100644 --- a/crates/batch-prover/src/rpc.rs +++ b/crates/batch-prover/src/rpc.rs @@ -7,9 +7,7 @@ use std::collections::HashMap; use std::fmt::Debug; -use std::path::Path; -#[cfg(feature = "testing")] -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use std::{env, fs}; @@ -270,14 +268,14 @@ pub trait BatchProverRpc { #[method(name = "retryProvingJob")] async fn retry_proving_job(&self, job_id: Uuid) -> RpcResult; - /// Submit a proof from a file path. For testing/debugging purposes only. + /// Submit a proof from a file path. Only available with `testing` feature. /// /// # Arguments /// * `proof_path` - Path to the serialized proof file to submit + /// * `output` - Serialized `BatchProofCircuitOutput` /// /// # Returns - /// A `BatchProofResponse` containing the L1 transaction ID and proof. - #[cfg(feature = "testing")] + /// The bitcoin-da job id #[method(name = "submitProofFromFile")] async fn submit_proof_from_file(&self, proof_path: PathBuf, output: Vec) -> RpcResult; @@ -676,6 +674,15 @@ where Ok(new_id) } + #[cfg(not(feature = "testing"))] + async fn submit_proof_from_file( + &self, + _proof_path: PathBuf, + _output: Vec, + ) -> RpcResult { + Err(internal_rpc_error("Unsupported test method")) + } + #[cfg(feature = "testing")] async fn submit_proof_from_file( &self, @@ -719,13 +726,13 @@ where } } -/// Creates an RPC module with fullnode methods +/// Creates an RPC module with batch-prover methods /// /// # Arguments /// * `rpc_context` - Context containing shared data for RPC methods /// /// # Type Parameters -/// * `DB` - Database type implementing NodeLedgerOps +/// * `DB` - Database type implementing BatchProverLedgerOps /// * `Da` - Data availability service type implementing DaService /// * `Vm` - Virtual machine type implementing Zkvm pub fn create_rpc_module( From 06efea69949d477c308b33097ecb20bf800d65b6 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 22 Oct 2025 22:11:52 +0100 Subject: [PATCH 53/81] Handle StoredProof variant for mockda --- Cargo.lock | 1 + bin/citrea/src/rollup/mock.rs | 5 ++-- .../sovereign-sdk/adapters/mock-da/Cargo.toml | 3 ++- .../adapters/mock-da/src/service.rs | 27 ++++++++++++++++--- 4 files changed, 30 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 789c8b7160..85af14d725 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12116,6 +12116,7 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", + "sov-db", "sov-rollup-interface", "tempfile", "tokio", diff --git a/bin/citrea/src/rollup/mock.rs b/bin/citrea/src/rollup/mock.rs index 2596b0a57f..4908a1b348 100644 --- a/bin/citrea/src/rollup/mock.rs +++ b/bin/citrea/src/rollup/mock.rs @@ -71,11 +71,12 @@ impl RollupBlueprint for MockDemoRollup { _require_wallet_check: bool, _task_manager: TaskExecutor, _network: Network, - _ledger_db: LedgerDB, + ledger_db: LedgerDB, ) -> Result, anyhow::Error> { - Ok(Arc::new(MockDaService::new( + Ok(Arc::new(MockDaService::new_with_ledger_db( rollup_config.da.sender_address.clone(), &rollup_config.da.db_path, + ledger_db, ))) } diff --git a/crates/sovereign-sdk/adapters/mock-da/Cargo.toml b/crates/sovereign-sdk/adapters/mock-da/Cargo.toml index 2b34b16ea7..a445cb92a9 100644 --- a/crates/sovereign-sdk/adapters/mock-da/Cargo.toml +++ b/crates/sovereign-sdk/adapters/mock-da/Cargo.toml @@ -24,7 +24,7 @@ serde_json = { workspace = true, optional = true } tracing = { workspace = true, optional = true, features = ["attributes"]} uuid = { workspace = true, optional = true } - +sov-db = { path = "../../full-node/db/sov-db", optional = true } sov-rollup-interface = { path = "../../rollup-interface" } [dev-dependencies] @@ -39,4 +39,5 @@ native = [ "dep:tracing", "dep:uuid", "sov-rollup-interface/native", + "sov-db" ] diff --git a/crates/sovereign-sdk/adapters/mock-da/src/service.rs b/crates/sovereign-sdk/adapters/mock-da/src/service.rs index 97a61c9f10..dcf48d247b 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/service.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/service.rs @@ -5,6 +5,7 @@ use std::time::Duration; use async_trait::async_trait; use borsh::BorshDeserialize; use sha2::Digest; +use sov_db::ledger_db::{DaLedgerOps, LedgerDB}; use sov_rollup_interface::da::{ BlobReaderTrait, BlockHeaderTrait, DaSpec, DaTxRequest, DataOnDa, SequencerCommitment, Time, }; @@ -76,6 +77,7 @@ pub struct MockDaService { finalized_header_sender: broadcast::Sender, wait_attempts: usize, planned_fork: Arc>>, + ledger_db: Option, } impl MockDaService { @@ -84,6 +86,18 @@ impl MockDaService { Self::with_finality(sequencer_da_address, 0, db_path) } + /// Creates a new [`MockDaService`] with instant finality and access to LedgerDB for stored proof related functionalities. + pub fn new_with_ledger_db( + sequencer_da_address: MockAddress, + db_path: &Path, + ledger_db: LedgerDB, + ) -> Self { + let mut service = Self::with_finality(sequencer_da_address, 0, db_path); + + service.ledger_db = Some(ledger_db); + service + } + /// Create a new [`MockDaService`] with given finality. #[tracing::instrument(name = "MockDA")] pub fn with_finality( @@ -106,6 +120,7 @@ impl MockDaService { finalized_header_sender: tx, wait_attempts: 100_0000, planned_fork: Arc::new(Mutex::new(None)), + ledger_db: None, } } @@ -443,8 +458,14 @@ impl DaService for MockDaService { let req = DataOnDa::Complete(proof); borsh::to_vec(&req).unwrap() } - DaTxRequest::StoredProof(_) => { - unimplemented!() + DaTxRequest::StoredProof(proof_id) => { + let proof = self + .ledger_db + .as_ref() + .unwrap() + .get_proof_by_proof_id(proof_id)?; + let req = DataOnDa::Complete(proof); + borsh::to_vec(&req).unwrap() } DaTxRequest::SequencerCommitment(seq_comm) => { tracing::debug!("Adding a sequencer commitment"); @@ -462,7 +483,7 @@ impl DaService for MockDaService { let (tx, rx) = oneshot::channel(); let _ = tx.send(Ok(MockHash([0; 32]))); - Ok((Uuid::default(), rx)) + Ok((Uuid::nil(), rx)) } async fn get_fee_rate(&self) -> Result { From 4299a90babf0d6844678d138adb51afbe5493910 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Thu, 23 Oct 2025 17:38:53 +0100 Subject: [PATCH 54/81] Add RPC to get da job id by job id --- bin/citrea/tests/bitcoin/batch_prover_test.rs | 16 ++++++++++++++++ crates/batch-prover/src/prover.rs | 6 +++--- crates/batch-prover/src/rpc.rs | 19 ++++++++++++++++++- .../full-node/db/sov-db/src/ledger_db/mod.rs | 4 ++-- .../db/sov-db/src/ledger_db/traits.rs | 4 ++-- 5 files changed, 41 insertions(+), 8 deletions(-) diff --git a/bin/citrea/tests/bitcoin/batch_prover_test.rs b/bin/citrea/tests/bitcoin/batch_prover_test.rs index 263b1146f2..67b4baec6c 100644 --- a/bin/citrea/tests/bitcoin/batch_prover_test.rs +++ b/bin/citrea/tests/bitcoin/batch_prover_test.rs @@ -1555,6 +1555,12 @@ impl TestCase for RetryProvingTest { .unwrap(); assert_eq!(proving_job.commitments.len(), 4); + let da_job_id = batch_prover + .client + .http_client() + .get_da_job_id_by_job_id(proving_job.id) + .await?; + // retry proving the same job let new_job_id = batch_prover .client @@ -1565,6 +1571,16 @@ impl TestCase for RetryProvingTest { wait_for_prover_job(batch_prover, new_job_id, None).await?; + let retried_da_job_id = batch_prover + .client + .http_client() + .get_da_job_id_by_job_id(new_job_id) + .await?; + assert_ne!( + da_job_id, retried_da_job_id, + "new da job id should be different" + ); + // check the commitments of the new proving job let new_proving_job = batch_prover .client diff --git a/crates/batch-prover/src/prover.rs b/crates/batch-prover/src/prover.rs index 607293c407..c95c34484c 100644 --- a/crates/batch-prover/src/prover.rs +++ b/crates/batch-prover/src/prover.rs @@ -747,7 +747,7 @@ where info!("Job {proving_job_id} proof submitted to DA. Da job id {da_job_id}"); ledger_db - .set_proving_job_da_job_id(proving_job_id, da_job_id) + .set_da_job_id_by_prover_job_id(proving_job_id, da_job_id) .expect("Failed to save da job by id"); // Todo handle da job sending failure @@ -843,7 +843,7 @@ where // Recovery on-going in progress proof on DA let rx = if let Some(da_job_id) = ledger_db - .get_proving_job_da_job_id(proving_job_id) + .get_da_job_id_by_prover_job_id(proving_job_id) .expect("DB call shouldn't fail") { info!( @@ -862,7 +862,7 @@ where .expect("Failed to submit proof"); ledger_db - .set_proving_job_da_job_id(proving_job_id, da_job_id) + .set_da_job_id_by_prover_job_id(proving_job_id, da_job_id) .expect("Failed to set da job_id"); info!("Recovered Job {} proof sent to DA", proving_job_id); rx diff --git a/crates/batch-prover/src/rpc.rs b/crates/batch-prover/src/rpc.rs index 2e39df474e..e25549c4e9 100644 --- a/crates/batch-prover/src/rpc.rs +++ b/crates/batch-prover/src/rpc.rs @@ -223,6 +223,16 @@ pub trait BatchProverRpc { #[method(name = "getProvingJob")] async fn get_proving_job(&self, job_id: Uuid) -> RpcResult>; + /// Get da job id by job id. + /// + /// # Arguments + /// * `job_id` - The unique identifier of the proving job to retrieve. + /// + /// # Returns + /// An optional `Uuid` for the associated da job. + #[method(name = "getDaJobIdByJobId")] + async fn get_da_job_id_by_job_id(&self, job_id: Uuid) -> RpcResult>; + /// Gets last `count` number of job ids. Returns ids in descending order, so latest job is the first index. /// /// # Arguments @@ -674,6 +684,13 @@ where Ok(new_id) } + async fn get_da_job_id_by_job_id(&self, job_id: Uuid) -> RpcResult> { + self.context + .ledger_db + .get_da_job_id_by_prover_job_id(job_id) + .map_err(internal_rpc_error) + } + #[cfg(not(feature = "testing"))] async fn submit_proof_from_file( &self, @@ -717,7 +734,7 @@ where .map_err(internal_rpc_error)?; ledger_db - .set_proving_job_da_job_id(proving_job_id, da_job_id) + .set_da_job_id_by_prover_job_id(proving_job_id, da_job_id) .expect("Failed to save da job by id"); info!("Submitted proof from file, da job id: {da_job_id}"); diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs index 510987a542..36d2fab8f6 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs @@ -695,7 +695,7 @@ impl BatchProverLedgerOps for LedgerDB { } } - fn set_proving_job_da_job_id( + fn set_da_job_id_by_prover_job_id( &self, proving_job_id: Uuid, da_job_id: Uuid, @@ -707,7 +707,7 @@ impl BatchProverLedgerOps for LedgerDB { self.db.write_schemas(schema_batch) } - fn get_proving_job_da_job_id(&self, proving_job_id: Uuid) -> anyhow::Result> { + fn get_da_job_id_by_prover_job_id(&self, proving_job_id: Uuid) -> anyhow::Result> { self.db.get::(&proving_job_id) } } diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs index f55a25c757..bd35c57d31 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs @@ -274,10 +274,10 @@ pub trait BatchProverLedgerOps: SharedLedgerOps + Send + Sync { fn job_status(&self, id: Uuid) -> JobStatus; /// Set a da job_id by prover job_id - fn set_proving_job_da_job_id(&self, proving_job_id: Uuid, da_job_id: Uuid) -> Result<()>; + fn set_da_job_id_by_prover_job_id(&self, proving_job_id: Uuid, da_job_id: Uuid) -> Result<()>; /// Get da job_id by prover job_id - fn get_proving_job_da_job_id(&self, proving_job_id: Uuid) -> Result>; + fn get_da_job_id_by_prover_job_id(&self, proving_job_id: Uuid) -> Result>; } /// Light client prover ledger operations From 2bf550ba9854d9014a0013dc469b8670b80a3891 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Fri, 24 Oct 2025 15:00:18 +0100 Subject: [PATCH 55/81] Remove Job wrapper and store DaTxRequest as value instead --- crates/bitcoin-da/src/job/metrics.rs | 7 +-- crates/bitcoin-da/src/job/service.rs | 45 ++++++++----------- crates/bitcoin-da/src/service.rs | 6 +-- .../full-node/db/sov-db/src/ledger_db/mod.rs | 24 +++++----- .../db/sov-db/src/ledger_db/traits.rs | 16 ++++--- .../full-node/db/sov-db/src/schema/tables.rs | 14 +++--- .../db/sov-db/src/schema/types/da_jobs.rs | 22 --------- .../src/rollback/node/batch_prover.rs | 8 ++-- .../src/rollback/node/sequencer.rs | 6 +-- 9 files changed, 61 insertions(+), 87 deletions(-) diff --git a/crates/bitcoin-da/src/job/metrics.rs b/crates/bitcoin-da/src/job/metrics.rs index 2f3989c2fb..70054dd920 100644 --- a/crates/bitcoin-da/src/job/metrics.rs +++ b/crates/bitcoin-da/src/job/metrics.rs @@ -53,10 +53,6 @@ pub struct DaJobMetrics { /// Number of chunks sent per job #[metric(describe = "Number of commit/reveal pairs sent per job")] pub job_chunks_sent: Histogram, - - /// Size of job data in bytes - #[metric(describe = "Size of job data in bytes")] - pub job_data_size: Histogram, } impl DaJobMetrics { @@ -107,10 +103,9 @@ impl DaJobMetrics { } /// Record a job submission - pub fn record_job_submitted(&self, data_size: usize) { + pub fn record_job_submitted(&self) { self.jobs_submitted_total.increment(1); self.jobs_pending.increment(1.0); - self.job_data_size.record(data_size as f64); } } diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index f62f67ba3d..7cbf317665 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -7,17 +7,18 @@ use bitcoin::hashes::Hash; use bitcoin::Txid; use lru::LruCache; use sov_db::ledger_db::DaLedgerOps; -use sov_db::schema::types::da_jobs::{DaJobStatus, Job, JobId, JobProgress}; +use sov_db::schema::types::da_jobs::{DaJobStatus, JobId, JobProgress}; use sov_rollup_interface::da::{DaTxRequest, DataOnDa}; use tokio::sync::oneshot; use tracing::{info, instrument}; +use uuid::Uuid; use super::Result; use crate::error::BitcoinServiceError; use crate::helpers::builders::body_builders::RawTxData; use crate::helpers::get_timestamp; use crate::job::error::JobServiceError; -use crate::job::metrics::DA_JOB_METRICS as JM; +use crate::job::metrics::DA_JOB_METRICS as METRICS; use crate::job::rpc::{DaJobRpcProvider, JobListFilter}; use crate::service::{split_proof, TxidWrapper}; @@ -49,18 +50,14 @@ impl DaJobService { da_tx_request: DaTxRequest, tx: oneshot::Sender>, ) -> Result { - let job_id = uuid::Uuid::now_v7(); - let created_at = get_timestamp(); + let job_id = Uuid::now_v7(); - // Serialize RawTxData to Vec - let data = borsh::to_vec(&da_tx_request)?; + let progress = JobProgress::new(job_id, get_timestamp()); - let job = Job::new(job_id, data, created_at); - let progress = JobProgress::new(job_id, created_at); - - self.ledger_db.submit_job(&job, &progress)?; + self.ledger_db + .submit_job(job_id, &da_tx_request, &progress)?; - JM.record_job_submitted(job.data.len()); + METRICS.record_job_submitted(); self.job_waiters.lock().unwrap().insert(job_id, tx); @@ -68,11 +65,11 @@ impl DaJobService { Ok(job_id) } - /// Get a job by id + /// Get a job data by id #[instrument(level = "trace", skip(self), ret)] - pub(crate) fn get_job(&self, job_id: &JobId) -> Result> { + pub(crate) fn get_job_request(&self, job_id: &JobId) -> Result> { self.ledger_db - .get_job(job_id) + .get_job_request(job_id) .map_err(JobServiceError::DatabaseError) } @@ -101,15 +98,11 @@ impl DaJobService { /// /// * `Result` - The raw transaction data or an error #[instrument(level = "trace", skip(self), ret)] - pub(crate) fn get_job_data(&self, job: &Job) -> Result { - if let Some(data) = self.raw_tx_data_cache.lock().unwrap().get(&job.id) { + pub(crate) fn get_job_data(&self, job_id: Uuid, job_data: DaTxRequest) -> Result { + if let Some(data) = self.raw_tx_data_cache.lock().unwrap().get(&job_id) { return Ok(data.to_owned()); }; - // Deserialize RawTxData from job - let job_data: DaTxRequest = - borsh::from_slice(&job.data).map_err(JobServiceError::SerializationError)?; - let raw_tx_data = match job_data { DaTxRequest::ZKProof(zkproof) => split_proof(zkproof), DaTxRequest::StoredProof(proof_id) => { @@ -133,7 +126,7 @@ impl DaJobService { self.raw_tx_data_cache .lock() .unwrap() - .push(job.id, raw_tx_data.clone()); + .push(job_id, raw_tx_data.clone()); Ok(raw_tx_data) } @@ -176,7 +169,7 @@ impl DaJobService { self.ledger_db .upsert_progress(&db_progress, previous_status.as_u8())?; - JM.record_status_update(&previous_status, progress); + METRICS.record_status_update(&previous_status, progress); self.notify_new_status(job_id, progress); @@ -286,15 +279,13 @@ impl DaJobRpcProvider for DaJobService { match progress.status { DaJobStatus::Failed { .. } | DaJobStatus::Cancelled => { // Get original job and deserialize data - let original_job = self - .get_job(&job_id)? + let da_tx_request = self + .get_job_request(&job_id)? .ok_or(JobServiceError::JobNotFound(job_id))?; - let raw_data: DaTxRequest = borsh::from_slice(&original_job.data)?; - let (tx, _rx) = oneshot::channel(); // Create new job with same data - let new_job_id = self.submit_job(raw_data, tx)?; + let new_job_id = self.submit_job(da_tx_request, tx)?; tracing::info!("Job {job_id} retried as new job {new_job_id}"); Ok(new_job_id) diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 5f16fb4682..528e45fd5a 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -339,14 +339,14 @@ impl BitcoinService { for job_id in active_job_ids { info!("Processing job {}", job_id); - let job = job_service - .get_job(&job_id)? + let job_request = job_service + .get_job_request(&job_id)? .ok_or(JobServiceError::JobNotFound(job_id))?; let progress = &mut job_service .get_progress(&job_id)? .ok_or(JobServiceError::JobNotFound(job_id))?; - let job_data = job_service.get_job_data(&job)?; + let job_data = job_service.get_job_data(job_id, job_request)?; let sent_txids = job_service.get_pending_chunks()?; diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs index 36d2fab8f6..db195ec002 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use anyhow::Context; use rocksdb::{ReadOptions, WriteBatch}; use sov_rollup_interface::block::L2Block; -use sov_rollup_interface::da::SequencerCommitment; +use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; use sov_rollup_interface::fork::{Fork, ForkMigration}; use sov_rollup_interface::stf::StateDiff; use sov_rollup_interface::zk::{Proof, StorageRootHash}; @@ -18,9 +18,9 @@ use crate::rocks_db_config::RocksdbConfig; use crate::schema::tables::TestTableNew; use crate::schema::tables::{ CommitmentIndicesByJobId, CommitmentIndicesByL1, CommitmentMerkleRoots, CommitmentsByNumber, - DaJobById, DaJobIdByProvingJobId, DaJobProgressById, DaJobStatusIndex, ExecutedMigrations, - JobIdOfCommitment, L2BlockByHash, L2BlockByNumber, L2GenesisStateRoot, L2RangeByL1Height, - L2StatusHeights, LastPrunedBlock, LightClientProofBySlotNumber, MempoolTxs, + DaJobIdByProvingJobId, DaJobProgressById, DaJobStatusIndex, DaTxRequestByJobId, + ExecutedMigrations, JobIdOfCommitment, L2BlockByHash, L2BlockByNumber, L2GenesisStateRoot, + L2RangeByL1Height, L2StatusHeights, LastPrunedBlock, LightClientProofBySlotNumber, MempoolTxs, PendingBonsaiSessionByJobId, PendingBoundlessSessionByJobId, PendingL1SubmissionJobs, PendingProofs, PendingSequencerCommitments, ProofByJobId, ProverLastScannedSlot, ProverPendingCommitments, ProverStateDiffs, SequencerCommitmentByIndex, @@ -30,7 +30,7 @@ use crate::schema::tables::{ use crate::schema::types::batch_proof::{ StoredBatchProof, StoredBatchProofOutput, StoredVerifiedProof, }; -use crate::schema::types::da_jobs::{Job, JobProgress}; +use crate::schema::types::da_jobs::JobProgress; use crate::schema::types::job_status::JobStatus; use crate::schema::types::l2_block::{StoredL2Block, StoredTransaction}; use crate::schema::types::light_client_proof::{ @@ -1017,12 +1017,16 @@ impl ForkMigration for LedgerDB { } impl DaLedgerOps for LedgerDB { - fn submit_job(&self, job: &Job, progress: &JobProgress) -> anyhow::Result<()> { + fn submit_job( + &self, + job_id: Uuid, + da_tx_request: &DaTxRequest, + progress: &JobProgress, + ) -> anyhow::Result<()> { let mut batch = SchemaBatch::new(); - let job_id = job.id; let status = progress.status.as_u8(); - batch.put::(&job_id, job)?; + batch.put::(&job_id, da_tx_request)?; batch.put::(&job_id, progress)?; batch.put::(&(status, job_id), &())?; @@ -1030,8 +1034,8 @@ impl DaLedgerOps for LedgerDB { Ok(()) } - fn get_job(&self, job_id: &Uuid) -> anyhow::Result> { - self.db.get::(job_id) + fn get_job_request(&self, job_id: &Uuid) -> anyhow::Result> { + self.db.get::(job_id) } fn upsert_progress(&self, progress: &JobProgress, previous_status: u8) -> anyhow::Result<()> { diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs index bd35c57d31..6360abcd4f 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use anyhow::Result; use sov_rollup_interface::block::L2Block; -use sov_rollup_interface::da::SequencerCommitment; +use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; use sov_rollup_interface::stf::StateDiff; use sov_rollup_interface::zk::{Proof, StorageRootHash}; use sov_schema_db::SchemaIterator; @@ -12,7 +12,7 @@ use uuid::Uuid; use crate::schema::tables::{PendingProofs, PendingSequencerCommitments}; use crate::schema::types::batch_proof::{StoredBatchProof, StoredBatchProofOutput}; -use crate::schema::types::da_jobs::{Job, JobProgress}; +use crate::schema::types::da_jobs::JobProgress; use crate::schema::types::job_status::JobStatus; use crate::schema::types::l2_block::StoredL2Block; use crate::schema::types::light_client_proof::{ @@ -352,9 +352,15 @@ pub trait SequencerLedgerOps: SharedLedgerOps { /// Bitcoin da ledger operations pub trait DaLedgerOps { /// Store a job to db - fn submit_job(&self, job: &Job, progress: &JobProgress) -> anyhow::Result<()>; - /// Get a DA job by id - fn get_job(&self, job_id: &Uuid) -> Result>; + fn submit_job( + &self, + job_id: Uuid, + job: &DaTxRequest, + progress: &JobProgress, + ) -> anyhow::Result<()>; + + /// Get a DA job request by id + fn get_job_request(&self, job_id: &Uuid) -> Result>; /// Update a DA job progress by id fn upsert_progress(&self, progress: &JobProgress, previous_status: u8) -> Result<()>; diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs index 52ca37dede..99b6d76049 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs @@ -12,7 +12,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use jmt::storage::{NibblePath, Node, NodeKey, StaleNodeIndex}; use jmt::Version; -use sov_rollup_interface::da::SequencerCommitment; +use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; use sov_rollup_interface::stf::StateDiff; use sov_rollup_interface::zk::Proof; use sov_schema_db::schema::{KeyDecoder, KeyEncoder, ValueCodec}; @@ -20,7 +20,7 @@ use sov_schema_db::{CodecError, SeekKeyEncoder}; use uuid::Uuid; use super::types::batch_proof::{StoredBatchProof, StoredVerifiedProof}; -use super::types::da_jobs::{Job, JobProgress}; +use super::types::da_jobs::JobProgress; use super::types::l2_block::StoredL2Block; use super::types::light_client_proof::StoredLightClientProof; use super::types::{ @@ -44,7 +44,7 @@ pub const STATE_TABLES: &[&str] = &[ /// Note: Please keep the list sorted alphabetically pub const SEQUENCER_LEDGER_TABLES: &[&str] = &[ CommitmentsByNumber::table_name(), - DaJobById::table_name(), + DaTxRequestByJobId::table_name(), DaJobProgressById::table_name(), DaJobStatusIndex::table_name(), ExecutedMigrations::table_name(), @@ -97,7 +97,7 @@ pub const FULL_NODE_LEDGER_TABLES: &[&str] = &[ pub const BATCH_PROVER_LEDGER_TABLES: &[&str] = &[ CommitmentIndicesByJobId::table_name(), CommitmentIndicesByL1::table_name(), - DaJobById::table_name(), + DaTxRequestByJobId::table_name(), DaJobIdByProvingJobId::table_name(), DaJobProgressById::table_name(), DaJobStatusIndex::table_name(), @@ -152,7 +152,7 @@ pub const LEDGER_TABLES: &[&str] = &[ CommitmentIndicesByL1::table_name(), CommitmentMerkleRoots::table_name(), CommitmentsByNumber::table_name(), - DaJobById::table_name(), + DaTxRequestByJobId::table_name(), DaJobProgressById::table_name(), DaJobStatusIndex::table_name(), ExecutedMigrations::table_name(), @@ -513,8 +513,8 @@ define_table_with_seek_key_codec!( ); define_table_with_seek_key_codec!( - /// Da job by uuid - (DaJobById) Uuid => Job + /// DaTxRequest by uuid + (DaTxRequestByJobId) Uuid => DaTxRequest ); define_table_with_seek_key_codec!( diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs index 86ed4d91c5..ded615639c 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs @@ -90,25 +90,3 @@ impl JobProgress { } } } - -/// DA Job representing a transaction to be sent to the DA layer -#[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] -pub struct Job { - /// Job id as uuidv7 - pub id: JobId, - /// Raw job data (serialized RawTxData) - pub data: Vec, - /// Time of job creation - pub created_at: u64, -} - -impl Job { - /// Create a new job with the given serialized data - pub fn new(id: JobId, data: Vec, created_at: u64) -> Self { - Self { - id, - data, - created_at, - } - } -} diff --git a/crates/storage-ops/src/rollback/node/batch_prover.rs b/crates/storage-ops/src/rollback/node/batch_prover.rs index 619683f5ed..74006a5910 100644 --- a/crates/storage-ops/src/rollback/node/batch_prover.rs +++ b/crates/storage-ops/src/rollback/node/batch_prover.rs @@ -2,8 +2,8 @@ use std::collections::HashMap; use std::sync::Arc; use sov_db::schema::tables::{ - CommitmentIndicesByJobId, CommitmentIndicesByL1, DaJobById, DaJobIdByProvingJobId, - DaJobProgressById, DaJobStatusIndex, JobIdOfCommitment, L2BlockByHash, L2BlockByNumber, + CommitmentIndicesByJobId, CommitmentIndicesByL1, DaJobIdByProvingJobId, DaJobProgressById, + DaJobStatusIndex, DaTxRequestByJobId, JobIdOfCommitment, L2BlockByHash, L2BlockByNumber, PendingL1SubmissionJobs, ProofByJobId, ProverLastScannedSlot, ProverPendingCommitments, ProverStateDiffs, SequencerCommitmentByIndex, ShortHeaderProofBySlotHash, SlotByHash, }; @@ -208,8 +208,8 @@ impl BatchProverLedgerRollback { let status_u8 = progress.status.as_u8(); // Delete from all DA job tables - batch.delete::(&job_id)?; - increment_table_counter!("DaJobById", rollback_result); + batch.delete::(&job_id)?; + increment_table_counter!("DaTxRequestByJobId", rollback_result); batch.delete::(&job_id)?; increment_table_counter!("DaJobProgressById", rollback_result); diff --git a/crates/storage-ops/src/rollback/node/sequencer.rs b/crates/storage-ops/src/rollback/node/sequencer.rs index d7ae777bf4..8b6e570423 100644 --- a/crates/storage-ops/src/rollback/node/sequencer.rs +++ b/crates/storage-ops/src/rollback/node/sequencer.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use sov_db::schema::tables::{ - CommitmentsByNumber, DaJobById, DaJobProgressById, DaJobStatusIndex, L2BlockByHash, + CommitmentsByNumber, DaJobProgressById, DaJobStatusIndex, DaTxRequestByJobId, L2BlockByHash, L2BlockByNumber, L2RangeByL1Height, SequencerCommitmentByIndex, StateDiffByBlockNumber, }; use sov_db::schema::types::{L2BlockNumber, SlotNumber}; @@ -130,8 +130,8 @@ impl SequencerLedgerRollback { let status_u8 = progress.status.as_u8(); // Delete from all three tables - batch.delete::(&job_id)?; - increment_table_counter!("DaJobById", rollback_result); + batch.delete::(&job_id)?; + increment_table_counter!("DaTxRequestByJobId", rollback_result); batch.delete::(&job_id)?; increment_table_counter!("DaJobProgressById", rollback_result); From 30f64445c5e7a6f19f5ef372ff7353714c4bb45a Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Fri, 24 Oct 2025 17:58:18 +0100 Subject: [PATCH 56/81] Lint --- crates/bitcoin-da/src/job/rpc.rs | 2 +- crates/bitcoin-da/src/job/service.rs | 4 ++-- crates/bitcoin-da/src/service.rs | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs index 69d192c387..8a1b34c0cd 100644 --- a/crates/bitcoin-da/src/job/rpc.rs +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -92,7 +92,7 @@ pub enum JobStatusFilter { Cancelled, /// Only failed jobs Failed, - /// All active jobs (Pending + InProgress) + /// All active jobs (`Pending` + `InProgress`) Active, /// All terminal jobs (Completed + Cancelled + Failed) Terminal, diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 7cbf317665..6b27612d4d 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -33,7 +33,7 @@ pub struct DaJobService { } impl DaJobService { - /// Creates a new DaJobService with ledger_db + /// Creates a new `DaJobService` with `ledger_db` pub fn new(ledger_db: DB, cache_size: Option) -> Self { let cache_size = cache_size.unwrap_or_else(|| NonZeroUsize::new(10).unwrap()); @@ -101,7 +101,7 @@ impl DaJobService { pub(crate) fn get_job_data(&self, job_id: Uuid, job_data: DaTxRequest) -> Result { if let Some(data) = self.raw_tx_data_cache.lock().unwrap().get(&job_id) { return Ok(data.to_owned()); - }; + } let raw_tx_data = match job_data { DaTxRequest::ZKProof(zkproof) => split_proof(zkproof), diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 528e45fd5a..226c765aa4 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -337,7 +337,7 @@ impl BitcoinService { // Get all pending/in-progress jobs let active_job_ids = job_service.get_all_active_job_ids()?; for job_id in active_job_ids { - info!("Processing job {}", job_id); + info!("Processing job {job_id}"); let job_request = job_service .get_job_request(&job_id)? @@ -357,12 +357,12 @@ impl BitcoinService { Ok(completed) => { if completed { job_service.update_job_status(progress, DaJobStatus::Completed)?; - info!("Job {} completed successfully", job_id); + info!("Job {job_id} completed successfully"); previous_job_in_progress = false; } else { job_service.update_job_status(progress, DaJobStatus::InProgress)?; - info!("Job {} partially sent", job_id); + info!("Job {job_id} partially sent"); previous_job_in_progress = true; } @@ -376,7 +376,7 @@ impl BitcoinService { continue; } Err(e) => { - error!("Error processing job {}: {:?}", job_id, e); + error!("Error processing job {job_id}: {e:?}"); job_service.update_job_status( progress, DaJobStatus::Failed { From 46fe7fda942eb8250a728d2f9f46281a46b5bf5d Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 28 Oct 2025 21:33:42 +0000 Subject: [PATCH 57/81] Recover transient error and test --- bin/citrea/tests/bitcoin/da_job.rs | 131 +++++++++++++++++- bin/citrea/tests/bitcoin/da_queue.rs | 1 + crates/bitcoin-da/src/job/rpc.rs | 9 +- crates/bitcoin-da/src/job/service.rs | 14 +- crates/bitcoin-da/src/service.rs | 14 +- .../full-node/db/sov-db/src/ledger_db/mod.rs | 15 +- .../db/sov-db/src/ledger_db/traits.rs | 8 +- .../db/sov-db/src/schema/types/da_jobs.rs | 3 + 8 files changed, 170 insertions(+), 25 deletions(-) diff --git a/bin/citrea/tests/bitcoin/da_job.rs b/bin/citrea/tests/bitcoin/da_job.rs index 282ab7898a..8352417ea8 100644 --- a/bin/citrea/tests/bitcoin/da_job.rs +++ b/bin/citrea/tests/bitcoin/da_job.rs @@ -1,4 +1,5 @@ use std::io::Write; +use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; @@ -386,6 +387,100 @@ impl JobServiceTest { Ok(()) } + + #[allow(clippy::too_many_arguments)] + async fn test_job_error_recovery( + &mut self, + da: &mut BitcoinNode, + tx_backup_dir: PathBuf, + da_service: &Arc, + da_service_client: &HttpClient, + genesis_state_root: [u8; 32], + batch_proof_method_id: [u32; 8], + finalized_height: u64, + commitment: &SequencerCommitment, + commitment_state_root: [u8; 32], + ) -> Result<()> { + let l1_hash = da.get_block_hash(finalized_height).await?; + let state_diff_400kb = create_random_state_diff(400); + let proof = create_serialized_fake_receipt_batch_proof_with_state_roots( + genesis_state_root, + 20, + batch_proof_method_id, + Some(state_diff_400kb), + false, + l1_hash.as_raw_hash().to_byte_array(), + vec![commitment.clone()], + vec![commitment_state_root], + None, + ); + + let (job_id, _) = da_service + .send_transaction(DaTxRequest::ZKProof(proof)) + .await?; + + da.wait_mempool_len(18, None).await?; + assert_eq!(da.get_raw_mempool().await?.len(), 18); + + let job_before: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(job_before.job_id, job_id); + assert_eq!(job_before.status, DaJobStatus::InProgress); + assert_eq!(job_before.sent_count, 9); + + // Make `tx_backup_dir` read-only to trigger a failure. + // Should make the next job processing fail with `There are no UTXOs` + let metadata = tokio::fs::metadata(&tx_backup_dir).await?; + let mut permissions = metadata.permissions(); + + // Keep original perms for resetting + let original_perms = permissions.clone(); + + permissions.set_readonly(true); + tokio::fs::set_permissions(&tx_backup_dir, permissions.clone()).await?; + + // Mine chunks + da.generate(1).await?; + + // Wait for job processing + std::thread::sleep(std::time::Duration::from_millis(1000)); + + let in_progress_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::InProgress), None, None) + .await?; + assert_eq!(in_progress_jobs.len(), 1); + assert_eq!(in_progress_jobs[0].job_id, job_id); + + let failed_job: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert!(matches!(failed_job.status, DaJobStatus::InProgress)); + assert_eq!(failed_job.created_at, job_before.created_at); + assert_eq!( + failed_job.error, + Some( + "Failed to backup transactions to file: Permission denied (os error 13)" + .to_string() + ) + ); + + // Reset permissions + tokio::fs::set_permissions(&tx_backup_dir, original_perms).await?; + + // Trigger job processing + da.generate(1).await?; + + da.wait_mempool_len(6, None).await?; + + let completed_job: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(completed_job.status, DaJobStatus::Completed); + assert_eq!(completed_job.created_at, job_before.created_at); + assert_eq!(completed_job.sent_count, 12); + assert_eq!(completed_job.error, None); + + let active_jobs_final = da_service_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_final.len(), 0); + Ok(()) + } } #[async_trait] @@ -423,6 +518,13 @@ impl TestCase for JobServiceTest { } } + fn batch_prover_config() -> BatchProverConfig { + BatchProverConfig { + proof_sampling_number: 999_999_999, + ..Default::default() + } + } + async fn cleanup(self) -> Result<()> { self.task_manager .unwrap() @@ -437,13 +539,13 @@ impl TestCase for JobServiceTest { let full_node = f.full_node.as_mut().unwrap(); let light_client_prover = f.light_client_prover.as_mut().unwrap(); + let test_dir = Self::test_config().dir; + let tx_backup_dir = test_dir.join("tx_backup_dir"); + // Common setup - let (da_service, da_service_client) = spawn_bitcoin_da_prover_service_with_rpc_server( - &task_executor, - &da.config, - Self::test_config().dir, - ) - .await; + let (da_service, da_service_client) = + spawn_bitcoin_da_prover_service_with_rpc_server(&task_executor, &da.config, test_dir) + .await; let max_l2_blocks_per_commitment = sequencer.max_l2_blocks_per_commitment(); @@ -546,6 +648,23 @@ impl TestCase for JobServiceTest { commitment_state_root, ) .await?; + + // Clean mempool between each step + da.generate(1).await?; + + self.test_job_error_recovery( + da, + tx_backup_dir, + &da_service, + &da_service_client, + genesis_state_root, + batch_proof_method_id, + finalized_height, + &commitment, + commitment_state_root, + ) + .await?; + // Clean mempool between each step da.generate(1).await?; diff --git a/bin/citrea/tests/bitcoin/da_queue.rs b/bin/citrea/tests/bitcoin/da_queue.rs index 552c92c927..5d70741863 100644 --- a/bin/citrea/tests/bitcoin/da_queue.rs +++ b/bin/citrea/tests/bitcoin/da_queue.rs @@ -737,6 +737,7 @@ impl TestCase for DaTransactionQueueingUtxoSelectionModeOldestTest { commitment_1_state_root, ) .await?; + Ok(()) } } diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs index 8a1b34c0cd..a17efa6dd3 100644 --- a/crates/bitcoin-da/src/job/rpc.rs +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -158,19 +158,14 @@ pub struct JobInfoResponse { impl From for JobInfoResponse { fn from(value: JobProgress) -> Self { - let error = match &value.status { - DaJobStatus::Failed { error } => Some(error.clone()), - _ => None, - }; - let created_at = value.job_id.get_timestamp().map_or(0, |ts| ts.to_unix().0); Self { job_id: value.job_id, - status: value.status.clone(), + status: value.status, created_at, last_updated: value.last_updated, sent_count: value.sent_chunks.count(), - error, + error: value.last_error, } } } diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 6b27612d4d..23050ebc83 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -152,7 +152,17 @@ impl DaJobService { Ok(active_jobs) } - /// Update job status by id + /// Save job progress + #[instrument(level = "debug", skip(self))] + pub fn upsert_job_progress(&self, progress: &mut JobProgress) -> Result<()> { + progress.last_updated = get_timestamp(); + + self.ledger_db.upsert_progress(progress)?; + + Ok(()) + } + + /// Update and save job progress to a new status #[instrument(level = "debug", skip(self))] pub fn update_job_status( &self, @@ -167,7 +177,7 @@ impl DaJobService { let db_progress = progress.clone(); self.ledger_db - .upsert_progress(&db_progress, previous_status.as_u8())?; + .upsert_progress_new_status(&db_progress, previous_status.as_u8())?; METRICS.record_status_update(&previous_status, progress); diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 226c765aa4..bfdb3990e3 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -355,6 +355,8 @@ impl BitcoinService { .await { Ok(completed) => { + progress.last_error = None; + if completed { job_service.update_job_status(progress, DaJobStatus::Completed)?; info!("Job {job_id} completed successfully"); @@ -372,17 +374,15 @@ impl BitcoinService { // Save updated progress with last sent attempt value and continue // Fee cap errors should be retried on next `process_job_service` call - job_service.update_job_status(progress, progress.status.clone())?; + job_service.upsert_job_progress(progress)?; continue; } Err(e) => { + // TODO make the distinction between recoverable and unrecoverable error. + // The latter should be updated to Failed status error!("Error processing job {job_id}: {e:?}"); - job_service.update_job_status( - progress, - DaJobStatus::Failed { - error: e.to_string(), - }, - )?; + progress.last_error = Some(e.to_string()); + job_service.upsert_job_progress(progress)?; } } } diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs index db195ec002..49df49105f 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs @@ -1038,7 +1038,20 @@ impl DaLedgerOps for LedgerDB { self.db.get::(job_id) } - fn upsert_progress(&self, progress: &JobProgress, previous_status: u8) -> anyhow::Result<()> { + fn upsert_progress(&self, progress: &JobProgress) -> anyhow::Result<()> { + let mut batch = SchemaBatch::new(); + + batch.put::(&progress.job_id, progress)?; + + self.db.write_schemas(batch)?; + Ok(()) + } + + fn upsert_progress_new_status( + &self, + progress: &JobProgress, + previous_status: u8, + ) -> anyhow::Result<()> { let mut batch = SchemaBatch::new(); let job_id = progress.job_id; diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs index 6360abcd4f..73ab1c8abe 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs @@ -362,8 +362,12 @@ pub trait DaLedgerOps { /// Get a DA job request by id fn get_job_request(&self, job_id: &Uuid) -> Result>; - /// Update a DA job progress by id - fn upsert_progress(&self, progress: &JobProgress, previous_status: u8) -> Result<()>; + /// Upsert a DA job progress + fn upsert_progress(&self, progress: &JobProgress) -> Result<()>; + + /// Upsert a DA job progress with a status change + fn upsert_progress_new_status(&self, progress: &JobProgress, previous_status: u8) + -> Result<()>; /// Get a DA job progress by id fn get_progress(&self, job_id: &Uuid) -> Result>; diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs index ded615639c..bc7f55c93c 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs @@ -77,6 +77,8 @@ pub struct JobProgress { pub sent_chunks: SentChunks, /// Last update timestamp pub last_updated: u64, + /// Last recoverable error message + pub last_error: Option, } impl JobProgress { @@ -87,6 +89,7 @@ impl JobProgress { status: DaJobStatus::Pending, sent_chunks: SentChunks::new(), last_updated, + last_error: None, } } } From ce8a04a7f93943e77e24041818fa603beb8508b1 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 5 Nov 2025 17:24:53 +0000 Subject: [PATCH 58/81] Remove da job RPCs from PROTECTED_METHODS --- crates/common/src/rpc/auth.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/crates/common/src/rpc/auth.rs b/crates/common/src/rpc/auth.rs index 83361371f7..15dff2c4d5 100644 --- a/crates/common/src/rpc/auth.rs +++ b/crates/common/src/rpc/auth.rs @@ -8,13 +8,7 @@ use jsonrpsee::MethodResponse; use serde_json::value::RawValue; use serde_json::Value; -const PROTECTED_METHODS: [&str; 5] = [ - "backup_create", - "backup_validate", - "backup_info", - "da_job_cancel", - "da_job_retry", -]; +const PROTECTED_METHODS: [&str; 5] = ["backup_create", "backup_validate", "backup_info"]; #[derive(Debug, Clone)] pub struct Auth { From 3917bb17049e7e9126cc36d79e17dea6fa62be62 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 10 Nov 2025 09:05:18 +0000 Subject: [PATCH 59/81] Cap list jobs to 100 --- crates/bitcoin-da/src/job/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 23050ebc83..be764c9eb8 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -307,7 +307,7 @@ impl DaJobRpcProvider for DaJobService { } fn list_jobs(&self, filter: JobListFilter) -> Result> { - let limit = filter.limit.unwrap_or(25).min(1000); // Defaults to 25, capped at 1000 + let limit = filter.limit.unwrap_or(25).min(100); // Defaults to 25, capped at 100 let offset = filter.offset.unwrap_or(0); // Get job ids based on status filter From 6bed148e07aa4383cdb73bfb5068cc62f6bdb35b Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 10 Nov 2025 10:00:34 +0000 Subject: [PATCH 60/81] Fix array size --- crates/common/src/rpc/auth.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/common/src/rpc/auth.rs b/crates/common/src/rpc/auth.rs index 15dff2c4d5..9f75efc077 100644 --- a/crates/common/src/rpc/auth.rs +++ b/crates/common/src/rpc/auth.rs @@ -8,7 +8,7 @@ use jsonrpsee::MethodResponse; use serde_json::value::RawValue; use serde_json::Value; -const PROTECTED_METHODS: [&str; 5] = ["backup_create", "backup_validate", "backup_info"]; +const PROTECTED_METHODS: [&str; 3] = ["backup_create", "backup_validate", "backup_info"]; #[derive(Debug, Clone)] pub struct Auth { From e1aed6f7a2bfd565859d4e0f0cbd2d6907ea36cc Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 10 Nov 2025 10:07:33 +0000 Subject: [PATCH 61/81] Move DaTxRequest out of state_machine --- bin/citrea/tests/bitcoin/da_job.rs | 4 ++-- bin/citrea/tests/bitcoin/da_queue.rs | 4 ++-- bin/citrea/tests/bitcoin/full_node.rs | 3 ++- bin/citrea/tests/bitcoin/light_client_test.rs | 5 ++--- .../tests/bitcoin/sequencer_commitments.rs | 3 ++- bin/citrea/tests/bitcoin/utils.rs | 4 ++-- crates/batch-prover/src/rpc.rs | 6 ++++-- crates/bitcoin-da/src/job/service.rs | 3 ++- crates/bitcoin-da/src/service.rs | 4 ++-- crates/bitcoin-da/src/test_utils.rs | 4 ++-- crates/prover-services/src/parallel.rs | 3 +-- crates/sequencer/src/commitment/service.rs | 4 ++-- .../adapters/mock-da/src/service.rs | 6 ++++-- .../full-node/db/sov-db/src/ledger_db/mod.rs | 3 ++- .../db/sov-db/src/ledger_db/traits.rs | 3 ++- .../full-node/db/sov-db/src/schema/tables.rs | 3 ++- .../rollup-interface/src/node/services/da.rs | 20 ++++++++++++++++++- .../rollup-interface/src/state_machine/da.rs | 17 ---------------- 18 files changed, 54 insertions(+), 45 deletions(-) diff --git a/bin/citrea/tests/bitcoin/da_job.rs b/bin/citrea/tests/bitcoin/da_job.rs index 8352417ea8..b6f9904f39 100644 --- a/bin/citrea/tests/bitcoin/da_job.rs +++ b/bin/citrea/tests/bitcoin/da_job.rs @@ -24,8 +24,8 @@ use jsonrpsee::http_client::HttpClient; use reth_tasks::TaskManager; use sov_db::schema::types::da_jobs::DaJobStatus; use sov_ledger_rpc::LedgerRpcClient; -use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; -use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::da::SequencerCommitment; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use super::get_citrea_path; use crate::bitcoin::full_node::create_serialized_fake_receipt_batch_proof_with_state_roots; diff --git a/bin/citrea/tests/bitcoin/da_queue.rs b/bin/citrea/tests/bitcoin/da_queue.rs index 5d70741863..702fc6aa9d 100644 --- a/bin/citrea/tests/bitcoin/da_queue.rs +++ b/bin/citrea/tests/bitcoin/da_queue.rs @@ -14,9 +14,9 @@ use citrea_e2e::Result; use citrea_light_client_prover::rpc::LightClientProverRpcClient; use reth_tasks::TaskManager; use sov_ledger_rpc::LedgerRpcClient; -use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; +use sov_rollup_interface::da::SequencerCommitment; use sov_rollup_interface::rpc::BatchProofMethodIdRpcResponse; -use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use super::light_client_test::create_random_state_diff; use super::{get_citrea_cli_path, get_citrea_path}; diff --git a/bin/citrea/tests/bitcoin/full_node.rs b/bin/citrea/tests/bitcoin/full_node.rs index 092edaca56..6702e6ae97 100644 --- a/bin/citrea/tests/bitcoin/full_node.rs +++ b/bin/citrea/tests/bitcoin/full_node.rs @@ -21,8 +21,9 @@ use risc0_zkvm::{FakeReceipt, InnerReceipt, MaybePruned, ReceiptClaim}; use sov_db::schema::types::L2HeightAndIndex; use sov_ledger_rpc::LedgerRpcClient; use sov_modules_api::BatchProofCircuitOutputV3; -use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; +use sov_rollup_interface::da::SequencerCommitment; use sov_rollup_interface::rpc::block::L2BlockResponse; +use sov_rollup_interface::services::da::DaTxRequest; use sov_rollup_interface::zk::batch_proof::output::{BatchProofCircuitOutput, CumulativeStateDiff}; use tokio::time::sleep; diff --git a/bin/citrea/tests/bitcoin/light_client_test.rs b/bin/citrea/tests/bitcoin/light_client_test.rs index 56483a3651..5762585fa1 100644 --- a/bin/citrea/tests/bitcoin/light_client_test.rs +++ b/bin/citrea/tests/bitcoin/light_client_test.rs @@ -31,11 +31,10 @@ use reth_tasks::TaskManager; use risc0_zkvm::{FakeReceipt, InnerReceipt, MaybePruned, ReceiptClaim}; use sov_modules_api::BlobReaderTrait; use sov_rollup_interface::da::{ - BatchProofMethodId, BatchProofMethodIdBody, DaTxRequest, DaVerifier, DataOnDa, - SequencerCommitment, + BatchProofMethodId, BatchProofMethodIdBody, DaVerifier, DataOnDa, SequencerCommitment, }; use sov_rollup_interface::rpc::BatchProofMethodIdRpcResponse; -use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use sov_rollup_interface::zk::batch_proof::output::v3::BatchProofCircuitOutputV3; use sov_rollup_interface::zk::batch_proof::output::{BatchProofCircuitOutput, CumulativeStateDiff}; use sov_rollup_interface::Network; diff --git a/bin/citrea/tests/bitcoin/sequencer_commitments.rs b/bin/citrea/tests/bitcoin/sequencer_commitments.rs index 77cf162bdd..c8c96f4344 100644 --- a/bin/citrea/tests/bitcoin/sequencer_commitments.rs +++ b/bin/citrea/tests/bitcoin/sequencer_commitments.rs @@ -20,8 +20,9 @@ use reth_tasks::TaskManager; use rs_merkle::algorithms::Sha256; use rs_merkle::MerkleTree; use sov_ledger_rpc::LedgerRpcClient; -use sov_rollup_interface::da::{BlobReaderTrait, DaTxRequest, DataOnDa, SequencerCommitment}; +use sov_rollup_interface::da::{BlobReaderTrait, DataOnDa, SequencerCommitment}; use sov_rollup_interface::rpc::SequencerCommitmentResponse; +use sov_rollup_interface::services::da::DaTxRequest; use tokio::time::sleep; use super::get_citrea_path; diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index fa6d67453d..2de453756f 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -39,11 +39,11 @@ use sov_db::rocks_db_config::RocksdbConfig; use sov_ledger_rpc::LedgerRpcClient; use sov_modules_api::BatchProofCircuitOutputV3; use sov_rollup_interface::da::{ - BatchProofMethodId, BatchProofMethodIdBody, DaTxRequest, SequencerCommitment, + BatchProofMethodId, BatchProofMethodIdBody, SequencerCommitment, SECURITY_COUNCIL_SIGNATURE_SIZE, SECURITY_COUNCIL_SIGNATURE_THRESHOLD, }; use sov_rollup_interface::rpc::{JobRpcResponse, VerifiedBatchProofResponse}; -use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use sov_rollup_interface::zk::batch_proof::output::{BatchProofCircuitOutput, CumulativeStateDiff}; use sov_rollup_interface::Network; use tokio::time::sleep; diff --git a/crates/batch-prover/src/rpc.rs b/crates/batch-prover/src/rpc.rs index e25549c4e9..ec22d2e575 100644 --- a/crates/batch-prover/src/rpc.rs +++ b/crates/batch-prover/src/rpc.rs @@ -30,11 +30,11 @@ use sov_db::schema::types::job_status::JobStatus; use sov_db::schema::types::{L2BlockNumber, SlotNumber}; use sov_modules_api::{BatchProofCircuitOutputV3, SpecId, Zkvm}; use sov_prover_storage_manager::ProverStorageManager; -use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; +use sov_rollup_interface::da::SequencerCommitment; use sov_rollup_interface::rpc::{ BatchProofResponse, JobRpcResponse, SequencerCommitmentResponse, SequencerCommitmentRpcParam, }; -use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use sov_rollup_interface::zk::batch_proof::output::{BatchProofCircuitOutput, CumulativeStateDiff}; use tokio::sync::{mpsc, oneshot}; use tracing::info; @@ -706,6 +706,8 @@ where proof_path: PathBuf, output: Vec, ) -> RpcResult { + use sov_rollup_interface::services::da::DaTxRequest; + let ledger_db = &self.context.ledger_db; let proving_job_id = Uuid::now_v7(); info!("Submitting proof from file {proof_path:?} with id {proving_job_id}"); diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index be764c9eb8..2ee9b5ef0b 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -8,7 +8,8 @@ use bitcoin::Txid; use lru::LruCache; use sov_db::ledger_db::DaLedgerOps; use sov_db::schema::types::da_jobs::{DaJobStatus, JobId, JobProgress}; -use sov_rollup_interface::da::{DaTxRequest, DataOnDa}; +use sov_rollup_interface::da::DataOnDa; +use sov_rollup_interface::services::da::DaTxRequest; use tokio::sync::oneshot; use tracing::{info, instrument}; use uuid::Uuid; diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index bfdb3990e3..d229ed19db 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -30,8 +30,8 @@ use reth_tasks::shutdown::GracefulShutdown; use serde::{Deserialize, Serialize}; use sov_db::ledger_db::LedgerDB; use sov_db::schema::types::da_jobs::{DaJobStatus, JobProgress, SentChunks}; -use sov_rollup_interface::da::{DaSpec, DaTxRequest, DataOnDa, SequencerCommitment}; -use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::da::{DaSpec, DataOnDa, SequencerCommitment}; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use sov_rollup_interface::zk::Proof; use sov_rollup_interface::Network; use tokio::select; diff --git a/crates/bitcoin-da/src/test_utils.rs b/crates/bitcoin-da/src/test_utils.rs index fd22981703..b4fe048ba4 100644 --- a/crates/bitcoin-da/src/test_utils.rs +++ b/crates/bitcoin-da/src/test_utils.rs @@ -1,8 +1,8 @@ //! This module provides the implementation for sending separate chunk transactions with a specified fee rate. use bitcoin::hashes::Hash; -use sov_rollup_interface::da::{DaTxRequest, DataOnDa}; -use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::da::DataOnDa; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use uuid::Uuid; use crate::error::BitcoinServiceError; diff --git a/crates/prover-services/src/parallel.rs b/crates/prover-services/src/parallel.rs index a4bb844c42..26465972a8 100644 --- a/crates/prover-services/src/parallel.rs +++ b/crates/prover-services/src/parallel.rs @@ -3,8 +3,7 @@ use std::time::Instant; use anyhow::anyhow; use rand::Rng; -use sov_rollup_interface::da::DaTxRequest; -use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use sov_rollup_interface::zk::{Proof, ProofWithJob, ReceiptType, ZkvmHost}; use tokio::sync::{oneshot, OwnedSemaphorePermit, Semaphore}; use tracing::{debug, error, info, instrument, warn}; diff --git a/crates/sequencer/src/commitment/service.rs b/crates/sequencer/src/commitment/service.rs index 0adb93f6bd..4d89c28129 100644 --- a/crates/sequencer/src/commitment/service.rs +++ b/crates/sequencer/src/commitment/service.rs @@ -14,8 +14,8 @@ use sov_db::ledger_db::SequencerLedgerOps; use sov_db::schema::types::L2BlockNumber; use sov_modules_api::WorkingSet; use sov_prover_storage_manager::ProverStorageManager; -use sov_rollup_interface::da::{BlockHeaderTrait, DaTxRequest, SequencerCommitment}; -use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::da::{BlockHeaderTrait, SequencerCommitment}; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use sov_state::ProverStorage; use tokio::select; use tokio::sync::mpsc; diff --git a/crates/sovereign-sdk/adapters/mock-da/src/service.rs b/crates/sovereign-sdk/adapters/mock-da/src/service.rs index dcf48d247b..8de414623f 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/service.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/service.rs @@ -7,9 +7,9 @@ use borsh::BorshDeserialize; use sha2::Digest; use sov_db::ledger_db::{DaLedgerOps, LedgerDB}; use sov_rollup_interface::da::{ - BlobReaderTrait, BlockHeaderTrait, DaSpec, DaTxRequest, DataOnDa, SequencerCommitment, Time, + BlobReaderTrait, BlockHeaderTrait, DaSpec, DataOnDa, SequencerCommitment, Time, }; -use sov_rollup_interface::services::da::{DaService, SlotData}; +use sov_rollup_interface::services::da::{DaService, DaTxRequest, SlotData}; use sov_rollup_interface::zk::Proof; use tokio::sync::{broadcast, oneshot, Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; use tokio::time; @@ -639,6 +639,8 @@ mod tests { } mod reo4g_control { + use sov_rollup_interface::services::da::DaTxRequest; + use super::*; use crate::{MockAddress, MockDaService}; diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs index 49df49105f..37d53e09ad 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs @@ -5,8 +5,9 @@ use std::sync::Arc; use anyhow::Context; use rocksdb::{ReadOptions, WriteBatch}; use sov_rollup_interface::block::L2Block; -use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; +use sov_rollup_interface::da::SequencerCommitment; use sov_rollup_interface::fork::{Fork, ForkMigration}; +use sov_rollup_interface::services::da::DaTxRequest; use sov_rollup_interface::stf::StateDiff; use sov_rollup_interface::zk::{Proof, StorageRootHash}; use sov_schema_db::{ScanDirection, Schema, SchemaBatch, SchemaIterator, SeekKeyEncoder, DB}; diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs index 73ab1c8abe..fbb3869a5f 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs @@ -4,7 +4,8 @@ use std::sync::Arc; use anyhow::Result; use sov_rollup_interface::block::L2Block; -use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; +use sov_rollup_interface::da::SequencerCommitment; +use sov_rollup_interface::services::da::DaTxRequest; use sov_rollup_interface::stf::StateDiff; use sov_rollup_interface::zk::{Proof, StorageRootHash}; use sov_schema_db::SchemaIterator; diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs index 99b6d76049..19f4500639 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs @@ -12,7 +12,8 @@ use borsh::{BorshDeserialize, BorshSerialize}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use jmt::storage::{NibblePath, Node, NodeKey, StaleNodeIndex}; use jmt::Version; -use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; +use sov_rollup_interface::da::SequencerCommitment; +use sov_rollup_interface::services::da::DaTxRequest; use sov_rollup_interface::stf::StateDiff; use sov_rollup_interface::zk::Proof; use sov_schema_db::schema::{KeyDecoder, KeyEncoder, ValueCodec}; diff --git a/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs b/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs index c6ab4e1453..d4259b95b5 100644 --- a/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs +++ b/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs @@ -1,4 +1,7 @@ //! The da module defines traits used by the full node to interact with the DA layer. + +#[cfg(feature = "native")] +use borsh::{BorshDeserialize, BorshSerialize}; use serde::de::DeserializeOwned; use serde::Serialize; #[cfg(feature = "native")] @@ -8,10 +11,25 @@ use uuid::Uuid; use crate::da::BlockHeaderTrait; #[cfg(feature = "native")] -use crate::da::{DaSpec, DaTxRequest, DaVerifier, SequencerCommitment}; +use crate::da::{BatchProofMethodId, DaSpec, DaVerifier, SequencerCommitment}; #[cfg(feature = "native")] use crate::zk::Proof; +/// Transaction request to send to the DA queue. +#[cfg(feature = "native")] +#[allow(clippy::large_enum_variant)] +#[derive(Debug, Clone, Eq, PartialEq, BorshDeserialize, BorshSerialize)] +pub enum DaTxRequest { + /// A commitment from the sequencer + SequencerCommitment(SequencerCommitment), + /// Or a zk proof and state diff + ZKProof(Proof), + /// Or a job id for a stored proof + StoredProof(Uuid), + /// Batch proof method id update for light client + BatchProofMethodId(BatchProofMethodId), +} + /// This type represents a queued request to send_transaction #[cfg(feature = "native")] pub struct TxRequestWithNotifier { diff --git a/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs b/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs index 0b299d51c1..a2e60103f8 100644 --- a/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs +++ b/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs @@ -6,8 +6,6 @@ use borsh::{BorshDeserialize, BorshSerialize}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; -#[cfg(feature = "native")] -use uuid::Uuid; use crate::zk::Proof; use crate::{BasicAddress, Network}; @@ -103,21 +101,6 @@ impl core::cmp::Ord for SequencerCommitment { } } -/// Transaction request to send to the DA queue. -#[cfg(feature = "native")] -#[allow(clippy::large_enum_variant)] -#[derive(Debug, Clone, Eq, PartialEq, BorshDeserialize, BorshSerialize)] -pub enum DaTxRequest { - /// A commitment from the sequencer - SequencerCommitment(SequencerCommitment), - /// Or a zk proof and state diff - ZKProof(Proof), - /// Or a job id for a stored proof - StoredProof(Uuid), - /// Batch proof method id update for light client - BatchProofMethodId(BatchProofMethodId), -} - /// Data written to DA and read from DA must be the borsh serialization of this enum #[derive(Debug, Clone, Eq, PartialEq, BorshDeserialize, BorshSerialize)] #[allow(clippy::large_enum_variant)] From 89f62b5cd47cbf49aa2a12a7f4b7cdd6e856081e Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 10 Nov 2025 10:17:09 +0000 Subject: [PATCH 62/81] Add missing DaJobIdByProvingJobId to LEDGER_TABLES MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Erce Can Bektüre <47954181+ercecan@users.noreply.github.com> --- crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs index 19f4500639..a3a743a33a 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs @@ -154,6 +154,7 @@ pub const LEDGER_TABLES: &[&str] = &[ CommitmentMerkleRoots::table_name(), CommitmentsByNumber::table_name(), DaTxRequestByJobId::table_name(), + DaJobIdByProvingJobId::table_name(), DaJobProgressById::table_name(), DaJobStatusIndex::table_name(), ExecutedMigrations::table_name(), From e480de73ad2993cef5e7128c79722bbe8555a943 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 10 Nov 2025 16:27:17 +0000 Subject: [PATCH 63/81] Dont wait for seqcom txid and add logging --- crates/sequencer/src/commitment/service.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/sequencer/src/commitment/service.rs b/crates/sequencer/src/commitment/service.rs index 4d89c28129..6d204d44d8 100644 --- a/crates/sequencer/src/commitment/service.rs +++ b/crates/sequencer/src/commitment/service.rs @@ -221,7 +221,7 @@ where let tx_request = DaTxRequest::SequencerCommitment(commitment.clone()); - let (_, rx) = self + let (da_job_id, rx) = self .da_service .send_transaction(tx_request) .await @@ -237,8 +237,7 @@ where let _txid = rx .await - .map_err(|_| anyhow!("DA notification channel closed"))? // Handle RecvError - .map_err(|e| anyhow!("DA job failed: {e}"))?; + .map_err(|_| anyhow!("DA notification channel closed"))?; SM.send_commitment_execution.record( Instant::now() @@ -252,7 +251,10 @@ where ledger_db.delete_state_diff_by_range(commitment_range)?; - info!("New commitment. L2 range: #{}-{}", l2_start.0, l2_end.0); + info!( + "New commitment. L2 range: #{}-{}, index: {}, da job id {da_job_id}", + l2_start.0, l2_end.0, commitment.index + ); Ok(()) } From a99ac5bda5ae158be0a56d269091c81b6f2ba27d Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 10 Nov 2025 16:28:02 +0000 Subject: [PATCH 64/81] Rename wait_for_existing_da_job --- crates/batch-prover/src/prover.rs | 2 +- crates/prover-services/src/parallel.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/batch-prover/src/prover.rs b/crates/batch-prover/src/prover.rs index c95c34484c..81ae8a5089 100644 --- a/crates/batch-prover/src/prover.rs +++ b/crates/batch-prover/src/prover.rs @@ -851,7 +851,7 @@ where da_job_id, proving_job_id ); prover_service - .wait_for_existing_da_job(da_job_id) + .get_existing_da_job_waiter(da_job_id) .await .expect("Should recover da job receiver") } else { diff --git a/crates/prover-services/src/parallel.rs b/crates/prover-services/src/parallel.rs index 0f4d07d719..fb875a6220 100644 --- a/crates/prover-services/src/parallel.rs +++ b/crates/prover-services/src/parallel.rs @@ -247,7 +247,7 @@ where } /// Used for recovery - pub async fn wait_for_existing_da_job( + pub async fn get_existing_da_job_waiter( &self, da_job_id: Uuid, ) -> Result, ::Error> { From 6e8b54725252260bd11dc2eb3b299c97d3e72907 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 10 Nov 2025 16:41:48 +0000 Subject: [PATCH 65/81] Rename RPC with additional ById suffix --- crates/bitcoin-da/src/job/rpc.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs index a17efa6dd3..ff239e2c20 100644 --- a/crates/bitcoin-da/src/job/rpc.rs +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -201,7 +201,7 @@ pub trait DaJobRpc { /// # Errors /// * Job not found /// * Job cannot be cancelled (already completed, failed, or cancelled) - #[method(name = "cancel")] + #[method(name = "cancelById")] async fn da_job_cancel(&self, job_id: JobId) -> RpcResult; /// Retries a failed or cancelled job by creating a new job with the same data. @@ -215,7 +215,7 @@ pub trait DaJobRpc { /// # Errors /// * Job not found /// * Job is not in a retryable state (pending, in-progress, or completed) - #[method(name = "retry")] + #[method(name = "retryById")] async fn da_job_retry(&self, job_id: JobId) -> RpcResult; /// Lists jobs with optional filtering and pagination. @@ -245,7 +245,7 @@ pub trait DaJobRpc { /// /// # Errors /// * Database error related errors - #[method(name = "get")] + #[method(name = "getById")] async fn da_job_get_info(&self, job_id: JobId) -> RpcResult; } From 8077122bfd36a479e2dcf089b8f0cf586afb50ab Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 10 Nov 2025 16:45:01 +0000 Subject: [PATCH 66/81] Additional documentation for InProgress and Completed status --- .../full-node/db/sov-db/src/schema/types/da_jobs.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs index bc7f55c93c..0591a35a94 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs @@ -8,17 +8,17 @@ pub type JobId = Uuid; /// Job status representing the current state of transaction processing #[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize, PartialEq)] pub enum DaJobStatus { - /// Job is queued and waiting to be processed + /// Job is queued and waiting to be processed. Pending, - /// Job is in progress + /// Job is in progress. None or some its txs have been sent to DA. InProgress, - /// Job completed successfully + /// Job completed successfully. All its txs have been sent to DA. Completed, - /// Job was cancelled before completion + /// Job was cancelled before completion. Cancelled, - /// Job failed with error + /// Job failed with error. Failed { - /// Error associated with the failure + /// Error associated with the failure. error: String, }, } From 600498c3d9987e2056d3a5f6ec7349dd648453bc Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 10 Nov 2025 17:05:59 +0000 Subject: [PATCH 67/81] Don't go through file for test helper method --- bin/citrea/tests/bitcoin/da_job.rs | 5 +---- crates/batch-prover/src/rpc.rs | 24 ++++++------------------ 2 files changed, 7 insertions(+), 22 deletions(-) diff --git a/bin/citrea/tests/bitcoin/da_job.rs b/bin/citrea/tests/bitcoin/da_job.rs index 53b2d4359f..245a62f5a1 100644 --- a/bin/citrea/tests/bitcoin/da_job.rs +++ b/bin/citrea/tests/bitcoin/da_job.rs @@ -682,11 +682,8 @@ impl BatchProverRecoveryJobServiceTest { None, ); - let mut tempfile = tempfile::NamedTempFile::new().unwrap(); - tempfile.write_all(&proof).unwrap(); - let job_id = batch_prover_client - .submit_proof_from_file(tempfile.path().to_path_buf(), output) + .submit_proof_with_output(proof, output) .await?; wait_for_prover_job_count(batch_prover, 1, None).await?; diff --git a/crates/batch-prover/src/rpc.rs b/crates/batch-prover/src/rpc.rs index ec22d2e575..eb71569d76 100644 --- a/crates/batch-prover/src/rpc.rs +++ b/crates/batch-prover/src/rpc.rs @@ -278,17 +278,16 @@ pub trait BatchProverRpc { #[method(name = "retryProvingJob")] async fn retry_proving_job(&self, job_id: Uuid) -> RpcResult; - /// Submit a proof from a file path. Only available with `testing` feature. + /// Submit a proof with output. Only available with `testing` feature. /// /// # Arguments - /// * `proof_path` - Path to the serialized proof file to submit + /// * `proof` - Serialized proof /// * `output` - Serialized `BatchProofCircuitOutput` /// /// # Returns /// The bitcoin-da job id #[method(name = "submitProofFromFile")] - async fn submit_proof_from_file(&self, proof_path: PathBuf, output: Vec) - -> RpcResult; + async fn submit_proof_with_output(&self, proof: Vec, output: Vec) -> RpcResult; } /// Server implementation of the Batch Prover RPC interface @@ -692,28 +691,17 @@ where } #[cfg(not(feature = "testing"))] - async fn submit_proof_from_file( - &self, - _proof_path: PathBuf, - _output: Vec, - ) -> RpcResult { + async fn submit_proof_with_output(&self, _proof: Vec, _output: Vec) -> RpcResult { Err(internal_rpc_error("Unsupported test method")) } #[cfg(feature = "testing")] - async fn submit_proof_from_file( - &self, - proof_path: PathBuf, - output: Vec, - ) -> RpcResult { + async fn submit_proof_with_output(&self, proof: Vec, output: Vec) -> RpcResult { use sov_rollup_interface::services::da::DaTxRequest; let ledger_db = &self.context.ledger_db; let proving_job_id = Uuid::now_v7(); - info!("Submitting proof from file {proof_path:?} with id {proving_job_id}"); - - let proof = fs::read(&proof_path) - .map_err(|e| internal_rpc_error(format!("Failed to read proof file: {e}")))?; + info!("Submitting proof with id {proving_job_id}"); let output: BatchProofCircuitOutput = borsh::from_slice(&output).unwrap(); From 84e05d948c17869cd2eed0e129e1f07f398f0a1a Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 11 Nov 2025 10:04:24 +0000 Subject: [PATCH 68/81] Lint --- bin/citrea/tests/bitcoin/da_job.rs | 1 - crates/batch-prover/src/rpc.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/citrea/tests/bitcoin/da_job.rs b/bin/citrea/tests/bitcoin/da_job.rs index 245a62f5a1..580fd577a0 100644 --- a/bin/citrea/tests/bitcoin/da_job.rs +++ b/bin/citrea/tests/bitcoin/da_job.rs @@ -1,4 +1,3 @@ -use std::io::Write; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; diff --git a/crates/batch-prover/src/rpc.rs b/crates/batch-prover/src/rpc.rs index eb71569d76..c01fe0995e 100644 --- a/crates/batch-prover/src/rpc.rs +++ b/crates/batch-prover/src/rpc.rs @@ -7,7 +7,7 @@ use std::collections::HashMap; use std::fmt::Debug; -use std::path::{Path, PathBuf}; +use std::path::Path; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use std::{env, fs}; From bd0fdb8f4bd14a3a85b303c2328ee40d1b81d0a6 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 11 Nov 2025 10:10:17 +0000 Subject: [PATCH 69/81] Rename SentChunks to SentTxs to better reflect Complete tx tracking --- crates/bitcoin-da/src/fee.rs | 8 +++---- crates/bitcoin-da/src/job/metrics.rs | 2 +- crates/bitcoin-da/src/job/rpc.rs | 2 +- crates/bitcoin-da/src/job/service.rs | 18 ++++----------- crates/bitcoin-da/src/service.rs | 18 +++++++-------- .../db/sov-db/src/schema/types/da_jobs.rs | 22 +++++++++---------- 6 files changed, 30 insertions(+), 40 deletions(-) diff --git a/crates/bitcoin-da/src/fee.rs b/crates/bitcoin-da/src/fee.rs index b0816a2672..b530d6ebd5 100644 --- a/crates/bitcoin-da/src/fee.rs +++ b/crates/bitcoin-da/src/fee.rs @@ -11,7 +11,7 @@ use bitcoincore_rpc::json::{ BumpFeeResult, CreateRawTransactionInput, EstimateMode, WalletCreateFundedPsbtOptions, }; use bitcoincore_rpc::{Client, RpcApi}; -use sov_db::schema::types::da_jobs::SentChunks; +use sov_db::schema::types::da_jobs::SentTxs; use thiserror::Error; use tracing::{debug, instrument, trace, warn}; @@ -238,7 +238,7 @@ impl FeeService { pub(crate) async fn validate_txs_fee_rate( &self, txs: &[SignedTxPair], - sent_chunks: &SentChunks, + sent_txs: &SentTxs, fee_rate: u64, utxos: Vec, prev_utxo: Option, @@ -256,7 +256,7 @@ impl FeeService { // Recover sent chunks let mut commit_txs = vec![]; - for tx in &sent_chunks.commit_txs { + for tx in &sent_txs.commit { let id = Txid::from_byte_array(*tx); let tx = self .client @@ -266,7 +266,7 @@ impl FeeService { commit_txs.push(TxWithId { tx, id }); } let mut reveal_txs = vec![]; - for tx in &sent_chunks.reveal_txs { + for tx in &sent_txs.reveal { let id = Txid::from_byte_array(*tx); let tx = self .client diff --git a/crates/bitcoin-da/src/job/metrics.rs b/crates/bitcoin-da/src/job/metrics.rs index 70054dd920..0b99e7e65c 100644 --- a/crates/bitcoin-da/src/job/metrics.rs +++ b/crates/bitcoin-da/src/job/metrics.rs @@ -89,7 +89,7 @@ impl DaJobMetrics { // Record total chunks sent self.job_chunks_sent - .record(progress.sent_chunks.count() as f64); + .record(progress.sent_txs.count() as f64); } DaJobStatus::Cancelled => { self.jobs_cancelled.increment(1.0); diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs index ff239e2c20..65c12b0c4d 100644 --- a/crates/bitcoin-da/src/job/rpc.rs +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -164,7 +164,7 @@ impl From for JobInfoResponse { status: value.status, created_at, last_updated: value.last_updated, - sent_count: value.sent_chunks.count(), + sent_count: value.sent_txs.count(), error: value.last_error, } } diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 2ee9b5ef0b..b4da23e3c2 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -198,22 +198,12 @@ impl DaJobService { for job_id in active_job_ids { if let Some(JobProgress { status: DaJobStatus::InProgress, - sent_chunks, + sent_txs, .. }) = self.get_progress(&job_id)? { - txids.extend( - sent_chunks - .commit_txs - .into_iter() - .map(Txid::from_byte_array), - ); - txids.extend( - sent_chunks - .reveal_txs - .into_iter() - .map(Txid::from_byte_array), - ); + txids.extend(sent_txs.commit.into_iter().map(Txid::from_byte_array)); + txids.extend(sent_txs.reveal.into_iter().map(Txid::from_byte_array)); } } @@ -232,7 +222,7 @@ impl DaJobService { fn notify_new_status(&self, job_id: JobId, progress: &JobProgress) { let result = match &progress.status { DaJobStatus::Completed => { - if let Some(last_tx) = progress.sent_chunks.reveal_txs.last() { + if let Some(last_tx) = progress.sent_txs.reveal.last() { Ok(TxidWrapper(Txid::from_byte_array(*last_tx))) } else { Err(JobServiceError::NoTransactionsFound(job_id).into()) diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 04ed98afe8..4f6315b6e8 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -29,7 +29,7 @@ use lru::LruCache; use reth_tasks::shutdown::GracefulShutdown; use serde::{Deserialize, Serialize}; use sov_db::ledger_db::LedgerDB; -use sov_db::schema::types::da_jobs::{DaJobStatus, JobProgress, SentChunks}; +use sov_db::schema::types::da_jobs::{DaJobStatus, JobProgress, SentTxs}; use sov_rollup_interface::da::{DaSpec, DataOnDa, SequencerCommitment}; use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use sov_rollup_interface::zk::Proof; @@ -463,11 +463,11 @@ impl BitcoinService { utxos.clone(), prev_utxo.clone(), job_data, - progress.sent_chunks.clone(), + progress.sent_txs.clone(), ) .await?; - let current_idx = progress.sent_chunks.count(); + let current_idx = progress.sent_txs.count(); let signed_txs = self .tx_signer .sign_da_txs(da_txs.clone(), current_idx) @@ -479,7 +479,7 @@ impl BitcoinService { self.fee .validate_txs_fee_rate( &signed_txs, - &progress.sent_chunks, + &progress.sent_txs, fee_sat_per_vbyte, utxos, prev_utxo, @@ -505,7 +505,7 @@ impl BitcoinService { sent_count += 1; txids.extend(&ids); - progress.sent_chunks.extend( + progress.sent_txs.extend( vec![signed_tx.commit.tx.compute_txid().to_byte_array()], vec![signed_tx.reveal.tx.compute_txid().to_byte_array()], ); @@ -650,7 +650,7 @@ impl BitcoinService { utxos: Vec, prev_utxo: Option, data: RawTxData, - sent_chunks: SentChunks, + sent_txs: SentTxs, ) -> Result { let network = self.network; let da_private_key = self.da_private_key.expect("No private key set"); @@ -664,7 +664,7 @@ impl BitcoinService { let prefix = self.reveal_tx_prefix.clone(); let mut previous_commit_chunks = Vec::new(); - for txid in &sent_chunks.commit_txs { + for txid in &sent_txs.commit { let txid = Txid::from_byte_array(*txid); previous_commit_chunks.push( self.client @@ -675,7 +675,7 @@ impl BitcoinService { } let mut previous_reveal_chunks = Vec::new(); - for txid in &sent_chunks.reveal_txs { + for txid in &sent_txs.reveal { let txid = Txid::from_byte_array(*txid); previous_reveal_chunks.push( self.client @@ -1415,7 +1415,7 @@ impl DaService for BitcoinService { match progress.status { DaJobStatus::Completed => { // Job already finished before we subscribed - if let Some(last_tx) = progress.sent_chunks.reveal_txs.last() { + if let Some(last_tx) = progress.sent_txs.reveal.last() { let _ = tx.send(Ok(TxidWrapper(Txid::from_byte_array(*last_tx)))); } else { let _ = tx.send(Err(JobServiceError::NoTransactionsFound(job_id).into())); diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs index 0591a35a94..0451cc5b56 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs @@ -38,26 +38,26 @@ impl DaJobStatus { /// Track sent chunk for partial sending and recovery #[derive(Debug, Default, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] -pub struct SentChunks { +pub struct SentTxs { /// Sent commit txids - pub commit_txs: Vec<[u8; 32]>, + pub commit: Vec<[u8; 32]>, /// Sent reveal txids - pub reveal_txs: Vec<[u8; 32]>, + pub reveal: Vec<[u8; 32]>, } -impl SentChunks { +impl SentTxs { /// Number of sent commit/reveal pair pub fn count(&self) -> usize { - self.reveal_txs.len() + self.reveal.len() } /// Extend with sent commit and reveal chunks pub fn extend(&mut self, commits: Vec<[u8; 32]>, reveals: Vec<[u8; 32]>) { - self.commit_txs.extend(commits); - self.reveal_txs.extend(reveals); + self.commit.extend(commits); + self.reveal.extend(reveals); } - /// Return a default SentChunk with empty vectors + /// Return a default SentTxs with empty vectors pub fn new() -> Self { Self::default() } @@ -73,8 +73,8 @@ pub struct JobProgress { pub job_id: JobId, /// Current job status pub status: DaJobStatus, - /// Partially sent commit/reveal chunks for partial sending and recovery - pub sent_chunks: SentChunks, + /// Sent commit/reveal txs for tracking, partial sending and recovery + pub sent_txs: SentTxs, /// Last update timestamp pub last_updated: u64, /// Last recoverable error message @@ -87,7 +87,7 @@ impl JobProgress { Self { job_id, status: DaJobStatus::Pending, - sent_chunks: SentChunks::new(), + sent_txs: SentTxs::new(), last_updated, last_error: None, } From d308b7cd4a8796aa8146b4e55eed782c37f0dc1e Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 11 Nov 2025 11:00:42 +0000 Subject: [PATCH 70/81] Bump max_rebroadcast_attemps for test_queue_da_transactions_oldest_mode --- bin/citrea/tests/bitcoin/utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index bdd5174626..57534a6e0f 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -260,7 +260,7 @@ pub async fn spawn_bitcoin_da_service( check_interval: 1, history_limit: 1_000, max_history_size: 200_000_000, - max_rebroadcast_attempts: 5, + max_rebroadcast_attempts: 50, rebroadcast_delay: 1, }), mempool_space_url: None, From c31ef6df2fc3b96e50b9cc08b9f7e384d1874f79 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 11 Nov 2025 11:14:29 +0000 Subject: [PATCH 71/81] Move recover job logic --- crates/bitcoin-da/src/job/service.rs | 36 ++++++++++++++++++++++++++++ crates/bitcoin-da/src/service.rs | 34 +++----------------------- 2 files changed, 39 insertions(+), 31 deletions(-) diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index b4da23e3c2..4156306448 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -247,6 +247,42 @@ impl DaJobService { ) { self.job_waiters.lock().unwrap().insert(job_id, waiter); } + + pub(crate) fn recover_job( + &self, + job_id: Uuid, + ) -> Result>> { + let progress = self + .get_progress(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + + let (tx, rx) = oneshot::channel(); + + match progress.status { + DaJobStatus::Completed => { + // Job already finished before we subscribed + if let Some(last_tx) = progress.sent_txs.reveal.last() { + let _ = tx.send(Ok(TxidWrapper(Txid::from_byte_array(*last_tx)))); + } else { + let _ = tx.send(Err(JobServiceError::NoTransactionsFound(job_id).into())); + } + } + DaJobStatus::Failed { error } => { + // Job already failed + let _ = tx.send(Err(JobServiceError::JobFailed(job_id, error).into())); + } + DaJobStatus::Cancelled => { + // Job already cancelled + let _ = tx.send(Err(JobServiceError::JobCancelled(job_id).into())); + } + DaJobStatus::Pending | DaJobStatus::InProgress => { + // Job still running, register for notification + self.insert_waiter(job_id, tx); + } + } + + Ok(rx) + } } /// Implementation of RPC provider methods diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 4f6315b6e8..fb6cf96005 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -1403,39 +1403,11 @@ impl DaService for BitcoinService { &self, job_id: Uuid, ) -> Result>> { - let progress = self - .job_service + self.job_service .lock() .await - .get_progress(&job_id)? - .ok_or(JobServiceError::JobNotFound(job_id))?; - - let (tx, rx) = oneshot::channel(); - - match progress.status { - DaJobStatus::Completed => { - // Job already finished before we subscribed - if let Some(last_tx) = progress.sent_txs.reveal.last() { - let _ = tx.send(Ok(TxidWrapper(Txid::from_byte_array(*last_tx)))); - } else { - let _ = tx.send(Err(JobServiceError::NoTransactionsFound(job_id).into())); - } - } - DaJobStatus::Failed { error } => { - // Job already failed - let _ = tx.send(Err(JobServiceError::JobFailed(job_id, error).into())); - } - DaJobStatus::Cancelled => { - // Job already cancelled - let _ = tx.send(Err(JobServiceError::JobCancelled(job_id).into())); - } - DaJobStatus::Pending | DaJobStatus::InProgress => { - // Job still running, register for notification - self.job_service.lock().await.insert_waiter(job_id, tx); - } - } - - Ok(rx) + .recover_job(job_id) + .map_err(Into::into) } #[instrument(level = "trace", skip(self))] From d1130f17f21e3ef8fbeba468957e09366ffb8231 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 11 Nov 2025 12:00:27 +0000 Subject: [PATCH 72/81] Extract fee validation logic from process_job --- crates/bitcoin-da/src/job/mod.rs | 3 ++ crates/bitcoin-da/src/job/utils.rs | 14 ++++++ crates/bitcoin-da/src/service.rs | 80 +++++++++++++++--------------- 3 files changed, 56 insertions(+), 41 deletions(-) create mode 100644 crates/bitcoin-da/src/job/utils.rs diff --git a/crates/bitcoin-da/src/job/mod.rs b/crates/bitcoin-da/src/job/mod.rs index 0695397b5f..bd35726e9c 100644 --- a/crates/bitcoin-da/src/job/mod.rs +++ b/crates/bitcoin-da/src/job/mod.rs @@ -15,6 +15,9 @@ pub mod rpc; /// Core job queue implementation and state management pub mod service; +/// Job related utility methods +pub mod utils; + /// Job related metrics mod metrics; diff --git a/crates/bitcoin-da/src/job/utils.rs b/crates/bitcoin-da/src/job/utils.rs new file mode 100644 index 0000000000..add3dac940 --- /dev/null +++ b/crates/bitcoin-da/src/job/utils.rs @@ -0,0 +1,14 @@ +use sov_db::schema::types::da_jobs::JobProgress; + +use crate::helpers::get_timestamp; + +/// Calculates elapsed time since job creation using job uuidv7 +pub fn get_job_elapsed_time(progress: &JobProgress) -> u64 { + let job_created_at = progress + .job_id + .get_timestamp() + .map(|ts| ts.to_unix().0) + .unwrap_or(0); + + get_timestamp().saturating_sub(job_created_at) +} diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index fb6cf96005..4788ff2310 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -47,9 +47,10 @@ use crate::helpers::builders::body_builders::{create_inscription_transactions, D use crate::helpers::builders::TxWithId; use crate::helpers::merkle_tree::BitcoinMerkleTree; use crate::helpers::parsers::{parse_relevant_transaction, ParsedTransaction, VerifyParsed}; -use crate::helpers::{get_timestamp, merkle_tree, TransactionKind}; +use crate::helpers::{merkle_tree, TransactionKind}; use crate::job::error::JobServiceError; use crate::job::service::DaJobService; +use crate::job::utils::get_job_elapsed_time; use crate::metrics::BITCOIN_DA_METRICS as BM; use crate::monitoring::{MonitoredTxKind, MonitoringConfig, MonitoringService, TxStatus}; use crate::network_constants::NetworkConstants; @@ -404,47 +405,9 @@ impl BitcoinService { // Get current fee rate as sat/vb let fee_sat_per_vbyte = self.fee.get_fee_rate().await?; - let current_time = get_timestamp(); - - let job_created_at = progress - .job_id - .get_timestamp() - .map(|ts| ts.to_unix().0) - .unwrap_or(0); - - let elapsed_secs = current_time.saturating_sub(job_created_at); - - // Cap fee at self.max_fee_rate_sat_to_pay for a maximum of `self.fee_rate_cap_duration_secs`. - // If `self.fee_rate_cap_duration_secs` is exceeded, send transaction with fee rate above `self.max_fee_rate_sat_to_pay` anyway - if fee_sat_per_vbyte > self.max_fee_rate_sat_to_pay { - if elapsed_secs < self.fee_rate_cap_duration_secs { - warn!( - "Job {} fee rate {} sat/vb exceeds cap of {} sat/vb. \ - Waiting (elapsed: {}s / max: {}s)", - progress.job_id, - fee_sat_per_vbyte, - self.max_fee_rate_sat_to_pay, - elapsed_secs, - self.fee_rate_cap_duration_secs - ); - - return Err(BitcoinServiceError::FeeCapExceeded { - current_rate: fee_sat_per_vbyte, - max_rate: self.max_fee_rate_sat_to_pay, - elapsed_secs, - max_duration_secs: self.fee_rate_cap_duration_secs, - }); - } - warn!( - "Job {} fee rate {} sat/vb exceeds cap of {} sat/vb, \ - but cap duration of {}s exceeded. Sending anyway", - progress.job_id, - fee_sat_per_vbyte, - self.max_fee_rate_sat_to_pay, - self.fee_rate_cap_duration_secs - ); - } + // Validate fee rate against cap + self.validate_fee_rate(progress, fee_sat_per_vbyte)?; // get all available utxos let utxos = self.get_utxos(sent_txids).await?; @@ -531,6 +494,41 @@ impl BitcoinService { Ok(completed) } + /// Validates fee rate against `max_fee_rate_sat_to_pay` + fn validate_fee_rate(&self, progress: &JobProgress, fee_sat_per_vbyte: u64) -> Result<()> { + if fee_sat_per_vbyte <= self.max_fee_rate_sat_to_pay { + return Ok(()); + } + + let elapsed_secs = get_job_elapsed_time(progress); + + if elapsed_secs < self.fee_rate_cap_duration_secs { + warn!( + "Job {} fee rate {} sat/vb exceeds cap of {} sat/vb. \ + Waiting (elapsed: {}s / max: {}s)", + progress.job_id, + fee_sat_per_vbyte, + self.max_fee_rate_sat_to_pay, + elapsed_secs, + self.fee_rate_cap_duration_secs + ); + + return Err(BitcoinServiceError::FeeCapExceeded { + current_rate: fee_sat_per_vbyte, + max_rate: self.max_fee_rate_sat_to_pay, + elapsed_secs, + max_duration_secs: self.fee_rate_cap_duration_secs, + }); + } + + warn!( + "Job {} fee rate {} sat/vb exceeds cap but cap duration of {}s exceeded. Sending anyway", + progress.job_id, fee_sat_per_vbyte, self.fee_rate_cap_duration_secs + ); + + Ok(()) + } + async fn select_prev_utxo( &self, utxos: &[UTXO], From a8b7768488a7a40aa84e73f7efe385c54679ac96 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 12 Nov 2025 09:52:41 +0000 Subject: [PATCH 73/81] Fix lint --- bin/citrea/tests/bitcoin/utils.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index 57534a6e0f..da954db157 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -190,6 +190,7 @@ pub async fn spawn_bitcoin_da_prover_service_with_rpc_server( timeout: 30, enable_js_tracer: true, api_key: None, + ..Default::default() }; // Add da rpc and da job rpc methods From 04205996fecd2119d27be4a7d264a7585a8f9e4c Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 12 Nov 2025 13:42:33 +0000 Subject: [PATCH 74/81] Steamline transaction recovery --- crates/bitcoin-da/src/fee.rs | 41 ++-------- .../src/helpers/builders/body_builders.rs | 18 ++--- crates/bitcoin-da/src/service.rs | 76 ++++++++++++------- 3 files changed, 66 insertions(+), 69 deletions(-) diff --git a/crates/bitcoin-da/src/fee.rs b/crates/bitcoin-da/src/fee.rs index b530d6ebd5..774f12e381 100644 --- a/crates/bitcoin-da/src/fee.rs +++ b/crates/bitcoin-da/src/fee.rs @@ -5,18 +5,15 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; -use bitcoin::hashes::Hash; -use bitcoin::{Amount, Network, Sequence, Txid}; +use bitcoin::{Amount, Network, Sequence, Transaction, Txid}; use bitcoincore_rpc::json::{ BumpFeeResult, CreateRawTransactionInput, EstimateMode, WalletCreateFundedPsbtOptions, }; use bitcoincore_rpc::{Client, RpcApi}; -use sov_db::schema::types::da_jobs::SentTxs; use thiserror::Error; use tracing::{debug, instrument, trace, warn}; use crate::error::BitcoinServiceError; -use crate::helpers::builders::TxWithId; use crate::monitoring::{MonitoredTx, MonitoredTxKind}; use crate::spec::utxo::UTXO; use crate::tx_signer::SignedTxPair; @@ -238,7 +235,8 @@ impl FeeService { pub(crate) async fn validate_txs_fee_rate( &self, txs: &[SignedTxPair], - sent_txs: &SentTxs, + sent_commits: &[Transaction], + sent_reveals: &[Transaction], fee_rate: u64, utxos: Vec, prev_utxo: Option, @@ -254,43 +252,20 @@ impl FeeService { ); } - // Recover sent chunks - let mut commit_txs = vec![]; - for tx in &sent_txs.commit { - let id = Txid::from_byte_array(*tx); - let tx = self - .client - .get_transaction(&id, None) - .await? - .transaction()?; - commit_txs.push(TxWithId { tx, id }); - } - let mut reveal_txs = vec![]; - for tx in &sent_txs.reveal { - let id = Txid::from_byte_array(*tx); - let tx = self - .client - .get_transaction(&id, None) - .await? - .transaction()?; - reveal_txs.push(TxWithId { tx, id }); - } - // Add sent chunks as available inputs - let get_tx_outputs = |txs: &[TxWithId]| { + let get_tx_outputs = |txs: &[Transaction]| { txs.iter() .flat_map(|tx| { - let txid = tx.id; - tx.tx - .output + let txid = tx.compute_txid(); + tx.output .iter() .enumerate() .map(move |(idx, out)| ((txid, idx as u32), out.value)) }) .collect::>() }; - utxo_map.extend(get_tx_outputs(&commit_txs)); - utxo_map.extend(get_tx_outputs(&reveal_txs)); + utxo_map.extend(get_tx_outputs(sent_commits)); + utxo_map.extend(get_tx_outputs(sent_reveals)); for tx in txs { // Validate commit diff --git a/crates/bitcoin-da/src/helpers/builders/body_builders.rs b/crates/bitcoin-da/src/helpers/builders/body_builders.rs index c96bf5ac4d..355b769ad5 100644 --- a/crates/bitcoin-da/src/helpers/builders/body_builders.rs +++ b/crates/bitcoin-da/src/helpers/builders/body_builders.rs @@ -98,8 +98,8 @@ impl DaTxs { #[instrument(level = "trace", skip_all, err)] pub fn create_inscription_transactions( data: RawTxData, - previous_commit_chunks: Vec, - previous_reveal_chunks: Vec, + sent_commits: Vec, + sent_reveals: Vec, da_private_key: SecretKey, prev_utxo: Option, utxos: Vec, @@ -131,8 +131,8 @@ pub fn create_inscription_transactions( reveal_fee_rate, network, &reveal_tx_prefix, - previous_commit_chunks, - previous_reveal_chunks, + sent_commits, + sent_reveals, ), RawTxData::BatchProofMethodId(body) => create_inscription_type_3( body, @@ -344,16 +344,16 @@ pub fn create_inscription_type_1( reveal_fee_rate: u64, network: Network, reveal_tx_prefix: &[u8], - previous_commit_chunks: Vec, - previous_reveal_chunks: Vec, + sent_commits: Vec, + sent_reveals: Vec, ) -> Result { // Create reveal key let key_pair = UntweakedKeypair::from_secret_key(SECP256K1, da_private_key); let (public_key, _parity) = XOnlyPublicKey::from_keypair(&key_pair); - let current_idx = previous_commit_chunks.len(); - let mut commit_chunks = previous_commit_chunks; - let mut reveal_chunks = previous_reveal_chunks; + let current_idx = sent_commits.len(); + let mut commit_chunks = sent_commits; + let mut reveal_chunks = sent_reveals; if let Some(reveal_tx) = reveal_chunks.last() { prev_utxo = Some(UTXO { diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 4788ff2310..8e66db0646 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -420,13 +420,18 @@ impl BitcoinService { } }; + // Recover sent commits and sent reveals from their txids + let (sent_commits, sent_reveals) = + self.recover_sent_transactions(&progress.sent_txs).await?; + let da_txs = self .create_da_transactions_with_fee_rate( fee_sat_per_vbyte, utxos.clone(), prev_utxo.clone(), job_data, - progress.sent_txs.clone(), + sent_commits.clone(), + sent_reveals.clone(), ) .await?; @@ -442,7 +447,8 @@ impl BitcoinService { self.fee .validate_txs_fee_rate( &signed_txs, - &progress.sent_txs, + &sent_commits, + &sent_reveals, fee_sat_per_vbyte, utxos, prev_utxo, @@ -648,7 +654,8 @@ impl BitcoinService { utxos: Vec, prev_utxo: Option, data: RawTxData, - sent_txs: SentTxs, + sent_commits: Vec, + sent_reveals: Vec, ) -> Result { let network = self.network; let da_private_key = self.da_private_key.expect("No private key set"); @@ -661,35 +668,13 @@ impl BitcoinService { let prefix = self.reveal_tx_prefix.clone(); - let mut previous_commit_chunks = Vec::new(); - for txid in &sent_txs.commit { - let txid = Txid::from_byte_array(*txid); - previous_commit_chunks.push( - self.client - .get_transaction(&txid, None) - .await? - .transaction()?, - ) - } - - let mut previous_reveal_chunks = Vec::new(); - for txid in &sent_txs.reveal { - let txid = Txid::from_byte_array(*txid); - previous_reveal_chunks.push( - self.client - .get_transaction(&txid, None) - .await? - .transaction()?, - ) - } - tokio::task::spawn_blocking(move || { // Since this is CPU bound work, we use spawn_blocking // to release the tokio runtime execution create_inscription_transactions( data, - previous_commit_chunks, - previous_reveal_chunks, + sent_commits, + sent_reveals, da_private_key, prev_utxo, utxos, @@ -935,6 +920,43 @@ impl BitcoinService { }), } } + + /// Recover transaction from `SentTxs` txids + /// 1. Try first through monitoring service. + /// 2. If not found via monitoring service - following a restart - falls back to `get_transaction` RPC. + async fn recover_sent_transactions( + &self, + sent_txs: &SentTxs, + ) -> Result<(Vec, Vec)> { + let monitored_txs = self.monitoring.get_monitored_txs().await; + let recover_transaction = async |txid: &Txid| { + let tx = if let Some(monitored) = monitored_txs.get(txid).cloned() { + monitored.tx + } else { + self.client + .get_transaction(txid, None) + .await? + .transaction()? + }; + Ok::(tx) + }; + + let mut commits = Vec::new(); + for txid in &sent_txs.commit { + let txid = Txid::from_byte_array(*txid); + let transaction = recover_transaction(&txid).await?; + commits.push(transaction) + } + + let mut reveals = Vec::new(); + for txid in &sent_txs.reveal { + let txid = Txid::from_byte_array(*txid); + let transaction = recover_transaction(&txid).await?; + reveals.push(transaction) + } + + Ok((commits, reveals)) + } } #[async_trait] From b43e023bde319693c7f87d89f99880efeccb62df Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 12 Nov 2025 14:25:32 +0000 Subject: [PATCH 75/81] Get rid of Pending status --- crates/bitcoin-da/src/monitoring.rs | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/crates/bitcoin-da/src/monitoring.rs b/crates/bitcoin-da/src/monitoring.rs index 2d7309dd00..bf9f0ab2d9 100644 --- a/crates/bitcoin-da/src/monitoring.rs +++ b/crates/bitcoin-da/src/monitoring.rs @@ -37,8 +37,6 @@ const REBROADCAST_EACH_N_BLOCK: u64 = 1; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum TxStatus { - /// Pending status updated - Pending, /// Tx in mempool #[serde(rename_all = "camelCase")] InMempool { @@ -495,9 +493,7 @@ impl MonitoringService { self.total_size .fetch_add(tx.tx.total_size(), Ordering::SeqCst); - let status = self - .determine_tx_status(&tx_result, &TxStatus::Pending) - .await?; + let status = self.determine_tx_status(&tx_result, None).await?; let monitored_tx = MonitoredTx { tx: tx.tx, txid, @@ -535,7 +531,7 @@ impl MonitoringService { self.total_size.fetch_add(tx.total_size(), Ordering::SeqCst); let status = self - .determine_tx_status(&tx_result, &monitored_tx.status) + .determine_tx_status(&tx_result, Some(&monitored_tx.status)) .await?; let new_tx = MonitoredTx { @@ -626,7 +622,9 @@ impl MonitoringService { if let TxStatus::Confirmed { confirmations, .. } = tx.status { if confirmations <= depth { let tx_result = self.client.get_transaction(txid, None).await?; - tx.status = self.determine_tx_status(&tx_result, &tx.status).await?; + tx.status = self + .determine_tx_status(&tx_result, Some(&tx.status)) + .await?; if let TxStatus::InMempool { .. } = tx.status { info!("Rebroadcasting tx {} {tx:?}", tx.tx.compute_txid()); @@ -649,7 +647,7 @@ impl MonitoringService { TxStatus::Confirmed { .. } | TxStatus::Replaced { .. } => { if let Ok(tx_result) = self.client.get_transaction(txid, None).await { let new_status = self - .determine_tx_status(&tx_result, &monitored_tx.status) + .determine_tx_status(&tx_result, Some(&monitored_tx.status)) .await?; monitored_tx.status = new_status; @@ -662,14 +660,14 @@ impl MonitoringService { } if *rebroadcast_attempts > 0 => { let tx_result = self.client.get_transaction(txid, None).await?; let new_status = self - .determine_tx_status(&tx_result, &monitored_tx.status) + .determine_tx_status(&tx_result, Some(&monitored_tx.status)) .await?; monitored_tx.status = new_status; } TxStatus::InMempool { height, .. } => { let tx_result = self.client.get_transaction(txid, None).await?; let new_status = self - .determine_tx_status(&tx_result, &monitored_tx.status) + .determine_tx_status(&tx_result, Some(&monitored_tx.status)) .await?; // If status is still InMempool, check for how many block it has been in mempool and rebroadcast every REBROADCAST_EACH_N_BLOCK @@ -694,7 +692,7 @@ impl MonitoringService { async fn determine_tx_status( &self, tx_result: &GetTransactionResult, - current_status: &TxStatus, + current_status: Option<&TxStatus>, ) -> Result { let confirmations = tx_result.info.confirmations as u64; let status = if confirmations > 0 { @@ -735,7 +733,7 @@ impl MonitoringService { // Tx not found in mempool Err(_) => match current_status { // If transaction is queued or evicted, keep status as is - TxStatus::Evicted { .. } => current_status.clone(), + Some(status @ TxStatus::Evicted { .. }) => status.clone(), // If transaction was previously in mempool or confirmed, re-org happened and it got evicted from mempool _ => { tracing::info!("Tx {} was evicted from mempool.", tx_result.info.txid); @@ -904,7 +902,9 @@ impl MonitoringService { for txid in txids { if let Some(entry) = monitored_txs.get_mut(txid) { if let Ok(tx_result) = self.client.get_transaction(txid, None).await { - entry.status = self.determine_tx_status(&tx_result, &entry.status).await?; + entry.status = self + .determine_tx_status(&tx_result, Some(&entry.status)) + .await?; entry.last_checked = get_timestamp(); entry.address = tx_result .details From 08ae23908370eb59af11d9bb70b818bfd6bee6f5 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Thu, 13 Nov 2025 17:05:58 +0000 Subject: [PATCH 76/81] Limit data passed down to get_job_elapsed_time --- crates/bitcoin-da/src/job/utils.rs | 10 +++------- crates/bitcoin-da/src/service.rs | 24 ++++++++++-------------- 2 files changed, 13 insertions(+), 21 deletions(-) diff --git a/crates/bitcoin-da/src/job/utils.rs b/crates/bitcoin-da/src/job/utils.rs index add3dac940..fcadf749d7 100644 --- a/crates/bitcoin-da/src/job/utils.rs +++ b/crates/bitcoin-da/src/job/utils.rs @@ -1,14 +1,10 @@ -use sov_db::schema::types::da_jobs::JobProgress; +use sov_db::schema::types::da_jobs::JobId; use crate::helpers::get_timestamp; /// Calculates elapsed time since job creation using job uuidv7 -pub fn get_job_elapsed_time(progress: &JobProgress) -> u64 { - let job_created_at = progress - .job_id - .get_timestamp() - .map(|ts| ts.to_unix().0) - .unwrap_or(0); +pub fn get_job_elapsed_time(job_id: JobId) -> u64 { + let job_created_at = job_id.get_timestamp().map(|ts| ts.to_unix().0).unwrap_or(0); get_timestamp().saturating_sub(job_created_at) } diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 8e66db0646..2df7456630 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -29,7 +29,7 @@ use lru::LruCache; use reth_tasks::shutdown::GracefulShutdown; use serde::{Deserialize, Serialize}; use sov_db::ledger_db::LedgerDB; -use sov_db::schema::types::da_jobs::{DaJobStatus, JobProgress, SentTxs}; +use sov_db::schema::types::da_jobs::{DaJobStatus, JobId, JobProgress, SentTxs}; use sov_rollup_interface::da::{DaSpec, DataOnDa, SequencerCommitment}; use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use sov_rollup_interface::zk::Proof; @@ -407,7 +407,7 @@ impl BitcoinService { let fee_sat_per_vbyte = self.fee.get_fee_rate().await?; // Validate fee rate against cap - self.validate_fee_rate(progress, fee_sat_per_vbyte)?; + self.validate_fee_rate(progress.job_id, fee_sat_per_vbyte)?; // get all available utxos let utxos = self.get_utxos(sent_txids).await?; @@ -501,22 +501,18 @@ impl BitcoinService { } /// Validates fee rate against `max_fee_rate_sat_to_pay` - fn validate_fee_rate(&self, progress: &JobProgress, fee_sat_per_vbyte: u64) -> Result<()> { + fn validate_fee_rate(&self, job_id: JobId, fee_sat_per_vbyte: u64) -> Result<()> { if fee_sat_per_vbyte <= self.max_fee_rate_sat_to_pay { return Ok(()); } - let elapsed_secs = get_job_elapsed_time(progress); + let elapsed_secs = get_job_elapsed_time(job_id); if elapsed_secs < self.fee_rate_cap_duration_secs { warn!( - "Job {} fee rate {} sat/vb exceeds cap of {} sat/vb. \ - Waiting (elapsed: {}s / max: {}s)", - progress.job_id, - fee_sat_per_vbyte, - self.max_fee_rate_sat_to_pay, - elapsed_secs, - self.fee_rate_cap_duration_secs + "Job {job_id} fee rate {fee_sat_per_vbyte} sat/vb exceeds cap of {} sat/vb. \ + Waiting (elapsed: {elapsed_secs}s / max: {}s)", + self.max_fee_rate_sat_to_pay, self.fee_rate_cap_duration_secs ); return Err(BitcoinServiceError::FeeCapExceeded { @@ -528,8 +524,8 @@ impl BitcoinService { } warn!( - "Job {} fee rate {} sat/vb exceeds cap but cap duration of {}s exceeded. Sending anyway", - progress.job_id, fee_sat_per_vbyte, self.fee_rate_cap_duration_secs + "Job {job_id} fee rate {fee_sat_per_vbyte} sat/vb exceeds cap but cap duration of {}s exceeded. Sending anyway", + self.fee_rate_cap_duration_secs ); Ok(()) @@ -604,7 +600,7 @@ impl BitcoinService { // When running in UtxoSelectionMode::Oldest, we're creating multiple utxos chain in parallel // to be able to send multiple proofs in the same block without hitting mempool policy limits. // To make sure there are no conflicts between parallel utxos chain, - // this additional filters out any UTXO used by queued txs and any change UTXO that are not finalized + // this additional filters out any UTXO used by in-progress job txs and any change UTXO that are not finalized UtxoSelectionMode::Oldest => { utxos.into_iter().filter(|utxo| { utxo.spendable From 3378dab1ba3b49f0b7fc61aa37ad27acadf5e387 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Fri, 14 Nov 2025 15:13:11 +0000 Subject: [PATCH 77/81] refactor: UtxoManager --- bin/citrea/tests/bitcoin/da_queue.rs | 3 +- bin/citrea/tests/bitcoin/utils.rs | 6 +- crates/bitcoin-da/src/fee.rs | 9 +- crates/bitcoin-da/src/lib.rs | 3 + crates/bitcoin-da/src/service.rs | 172 +++------------------ crates/bitcoin-da/src/test_utils.rs | 8 +- crates/bitcoin-da/src/utxo_manager.rs | 213 ++++++++++++++++++++++++++ 7 files changed, 253 insertions(+), 161 deletions(-) create mode 100644 crates/bitcoin-da/src/utxo_manager.rs diff --git a/bin/citrea/tests/bitcoin/da_queue.rs b/bin/citrea/tests/bitcoin/da_queue.rs index de01f29cf7..9da9263171 100644 --- a/bin/citrea/tests/bitcoin/da_queue.rs +++ b/bin/citrea/tests/bitcoin/da_queue.rs @@ -4,7 +4,8 @@ use alloy_primitives::{U32, U64}; use async_trait::async_trait; use bitcoin::hashes::Hash; use bitcoin_da::error::BitcoinServiceError; -use bitcoin_da::service::{BitcoinService, UtxoSelectionMode}; +use bitcoin_da::service::BitcoinService; +use bitcoin_da::utxo_manager::UtxoSelectionMode; use bitcoincore_rpc::RpcApi; use citrea_e2e::bitcoin::{BitcoinNode, DEFAULT_FINALITY_DEPTH}; use citrea_e2e::config::{BitcoinConfig, TestCaseConfig}; diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index 28bad77b28..0a8922a211 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -10,11 +10,10 @@ use anyhow::bail; use bitcoin_da::fee::FeeService; use bitcoin_da::monitoring::{MonitoringConfig, MonitoringService}; use bitcoin_da::network_constants::get_network_constants; -use bitcoin_da::service::{ - network_to_bitcoin_network, BitcoinService, BitcoinServiceConfig, UtxoSelectionMode, -}; +use bitcoin_da::service::{network_to_bitcoin_network, BitcoinService, BitcoinServiceConfig}; use bitcoin_da::spec::block::BitcoinBlock; use bitcoin_da::spec::RollupParams; +use bitcoin_da::utxo_manager::UtxoSelectionMode; use bitcoincore_rpc::{Auth, Client, RpcApi}; use citrea_batch_prover::rpc::BatchProverRpcClient; use citrea_e2e::bitcoin::BitcoinNode; @@ -39,7 +38,6 @@ use tokio::time::sleep; use uuid::Uuid; pub enum DaServiceKeyKind { - #[allow(dead_code)] Sequencer, BatchProver, Other(String), diff --git a/crates/bitcoin-da/src/fee.rs b/crates/bitcoin-da/src/fee.rs index d1b6030d45..8f455c3ca4 100644 --- a/crates/bitcoin-da/src/fee.rs +++ b/crates/bitcoin-da/src/fee.rs @@ -17,6 +17,7 @@ use crate::error::BitcoinServiceError; use crate::monitoring::{MonitoredTx, MonitoredTxKind}; use crate::spec::utxo::UTXO; use crate::tx_signer::SignedTxPair; +use crate::utxo_manager::UtxoContext; const DEFAULT_MEMPOOL_SPACE_URL: &str = "https://mempool.space/"; const MEMPOOL_SPACE_RECOMMENDED_FEE_ENDPOINT: &str = "api/v1/fees/recommended"; @@ -261,14 +262,14 @@ pub(crate) async fn get_fee_rate_from_mempool_space( pub(crate) fn validate_txs_fee_rate( txs: &[SignedTxPair], fee_rate: u64, - utxos: Vec, - prev_utxo: Option, + utxo_context: UtxoContext, ) -> std::result::Result<(), BitcoinServiceError> { - let mut utxo_map = utxos + let mut utxo_map = utxo_context + .available_utxos .into_iter() .map(|utxo| ((utxo.tx_id, utxo.vout), Amount::from_sat(utxo.amount))) .collect::>(); - if let Some(prev_utxo) = prev_utxo { + if let Some(prev_utxo) = utxo_context.prev_utxo { utxo_map.insert( (prev_utxo.tx_id, prev_utxo.vout), Amount::from_sat(prev_utxo.amount), diff --git a/crates/bitcoin-da/src/lib.rs b/crates/bitcoin-da/src/lib.rs index e9c567adf5..d317c28368 100644 --- a/crates/bitcoin-da/src/lib.rs +++ b/crates/bitcoin-da/src/lib.rs @@ -69,6 +69,9 @@ pub mod fee; #[cfg(feature = "native")] pub mod rpc; +#[cfg(feature = "native")] +pub mod utxo_manager; + #[cfg(feature = "testing")] pub mod test_utils; diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 91e59d6c05..cca405eaf4 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -20,7 +20,7 @@ use bitcoin::block::Header; use bitcoin::consensus::Decodable; use bitcoin::hashes::Hash; use bitcoin::secp256k1::SecretKey; -use bitcoin::{Amount, BlockHash, CompactTarget, Transaction, Txid, Wtxid}; +use bitcoin::{BlockHash, CompactTarget, Transaction, Txid, Wtxid}; use bitcoincore_rpc::{Client, Error as BitcoinError, Error, RpcApi, RpcError}; use borsh::BorshDeserialize; use citrea_common::utils::read_env; @@ -59,10 +59,10 @@ use crate::spec::transaction::TransactionWrapper; use crate::spec::utxo::UTXO; use crate::spec::{BitcoinSpec, RollupParams}; use crate::tx_signer::{SignedTxPair, TxSigner}; +use crate::utxo_manager::{UtxoManager, UtxoSelectionMode}; use crate::verifier::{ BitcoinVerifier, MINIMUM_WITNESS_COMMITMENT_SIZE, WITNESS_COMMITMENT_PREFIX, }; -use crate::REVEAL_OUTPUT_AMOUNT; pub(crate) type Result = std::result::Result; @@ -78,23 +78,6 @@ pub fn network_to_bitcoin_network(network: &Network) -> bitcoin::Network { } } -/// Utxo selection mode. -/// How previous utxo should be chosen when tx queue is not empty -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -#[serde(rename_all = "lowercase")] -pub enum UtxoSelectionMode { - /// Default behaviour, always use latest utxo and keep transactions chained - Chained, - /// Choose the utxo with the highest amount of confirmations - Oldest, -} - -impl Default for UtxoSelectionMode { - fn default() -> Self { - Self::Chained - } -} - /// Runtime configuration for the DA service. #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct BitcoinServiceConfig { @@ -169,7 +152,7 @@ pub struct BitcoinService { l1_block_hash_to_height: Arc>>, tx_queue: Arc>>, pub(crate) tx_signer: TxSigner, - utxo_selection_mode: UtxoSelectionMode, + pub(crate) utxo_manager: UtxoManager, } impl BitcoinService { @@ -184,7 +167,8 @@ impl BitcoinService { da_private_key: Option, reveal_tx_prefix: Vec, tx_backup_dir: PathBuf, - utxo_selection_mode: UtxoSelectionMode, + tx_queue: Arc>>, + utxo_manager: UtxoManager, ) -> Self { Self { tx_signer: TxSigner::new(client.clone()), @@ -200,8 +184,8 @@ impl BitcoinService { l1_block_hash_to_height: Arc::new(Mutex::new(LruCache::new( NonZeroUsize::new(100).unwrap(), ))), - tx_queue: Arc::new(Mutex::new(VecDeque::new())), - utxo_selection_mode, + tx_queue, + utxo_manager, } } @@ -241,7 +225,15 @@ impl BitcoinService { .transpose() .map_err(|_| BitcoinServiceError::InvalidPrivateKey)?; - let utxo_selection_mode = config.utxo_selection_mode.clone().unwrap_or_default(); + let tx_queue = Arc::new(Mutex::new(VecDeque::new())); + let utxo_manager = UtxoManager::new( + client.clone(), + monitoring.clone(), + tx_queue.clone(), + network_constants, + config.utxo_selection_mode.clone().unwrap_or_default(), + ); + Ok(Self::new( client, network, @@ -252,7 +244,8 @@ impl BitcoinService { da_private_key, chain_params.reveal_tx_prefix, tx_backup_dir.to_path_buf(), - utxo_selection_mode, + tx_queue, + utxo_manager, )) } @@ -344,16 +337,14 @@ impl BitcoinService { ) -> Result> { let now = Instant::now(); - let prev_utxo = self.select_prev_utxo().await?; - // get all available utxos - let utxos = self.get_utxos().await?; + let utxo_context = self.utxo_manager.prepare_context().await?; let da_txs = self .create_da_transactions_with_fee_rate( tx_request, fee_sat_per_vbyte, - utxos.clone(), - prev_utxo.clone(), + utxo_context.available_utxos.clone(), + utxo_context.prev_utxo.clone(), ) .await?; let signed_txs = self.tx_signer.sign_da_txs(da_txs).await?; @@ -362,7 +353,7 @@ impl BitcoinService { if !self.test_mempool_accept_queue_tx(&signed_txs).await? { // If it failed on mempool policy limit, it can also fail on meeting min relay fee // Stateless validation of signed txs fee - validate_txs_fee_rate(&signed_txs, fee_sat_per_vbyte, utxos, prev_utxo)?; + validate_txs_fee_rate(&signed_txs, fee_sat_per_vbyte, utxo_context)?; } // backup to file after mempool acceptance @@ -388,121 +379,6 @@ impl BitcoinService { Ok(txs) } - async fn select_prev_utxo(&self) -> Result> { - let prev_utxo = self.get_prev_utxo().await; - if self.tx_queue.lock().await.is_empty() { - return Ok(prev_utxo); - } - - match self.utxo_selection_mode { - UtxoSelectionMode::Chained => { - // Prevent UTXO conflicts when queue is not empty and running UtxoSelectionMode::Chained mode - Err(BitcoinServiceError::QueueNotEmpty) - } - UtxoSelectionMode::Oldest => Ok(if prev_utxo.is_some() { - // Latest monitored TX has been successfully accepted to mempool and can be used as starting point for another utxo chain - prev_utxo - } else { - // Latest monitored TX has `Queued` status and internal `get_tx_out` errors. - self.get_highest_confirmation_utxo().await? - }), - } - } - - /// Retrieves the most recent spendable UTXO from the transaction chain on startup. - #[instrument(level = "trace", skip_all, ret)] - pub(crate) async fn get_prev_utxo(&self) -> Option { - let (txid, tx) = self.monitoring.get_last_tx().await?; - - let utxos = tx.to_utxos()?; - - // Check that tx out is still spendable - // If not found, utxo is already spent - self.client.get_tx_out(&txid, 0, Some(true)).await.ok()??; - - // Return first vout - utxos.into_iter().next() - } - - #[instrument(level = "trace", skip_all, ret)] - pub(crate) async fn get_utxos(&self) -> Result> { - let utxos = self - .client - .list_unspent(Some(0), None, None, None, None) - .await?; - if utxos.is_empty() { - return Err(BitcoinServiceError::MissingUTXO); - } - - let utxos: Vec = match self.utxo_selection_mode { - UtxoSelectionMode::Chained => { - let commit_txids = self - .monitoring - .get_in_mempool_commit_transaction_ids() - .await; - - utxos - .into_iter() - .filter(|utxo| { - utxo.spendable - && utxo.solvable - // Accept either safe utxos OR unsafe commit change output that are monitored (and can be considered `mine` and thus safe) - && (utxo.safe || (commit_txids.contains(&utxo.txid) && utxo.vout == 1)) - && utxo.amount > Amount::from_sat(REVEAL_OUTPUT_AMOUNT) - }) - .map(Into::into) - .collect() - } - - // When running in UtxoSelectionMode::Oldest, we're creating multiple utxos chain in parallel - // to be able to send multiple proofs in the same block without hitting mempool policy limits. - // To make sure there are no conflicts between parallel utxos chain, - // this additional filters out any UTXO used by queued txs and any change UTXO that are not finalized - UtxoSelectionMode::Oldest => { - let txids = self - .tx_queue - .lock() - .await - .iter() - .flat_map(|tx| { - tx.commit - .tx - .input - .iter() - .map(|input| input.previous_output.txid) - }) - .collect::>(); - - utxos.into_iter().filter(|utxo| { - utxo.spendable - && utxo.solvable - && utxo.safe - && utxo.amount > Amount::from_sat(REVEAL_OUTPUT_AMOUNT) - // Remove utxo already in use by queued txs - && !txids.contains(&utxo.txid) - // Only keep finalized change output - && (utxo.vout == 0 || utxo.confirmations as u64 >= self.network_constants.finality_depth) - }) - .map(Into::into) - .collect() - } - }; - - if utxos.is_empty() { - return Err(BitcoinServiceError::MissingSpendableUTXO); - } - - Ok(utxos) - } - - /// Returns the UTXO with the highest number of confirmations - #[instrument(level = "trace", skip_all, ret)] - async fn get_highest_confirmation_utxo(&self) -> Result> { - let mut utxos = self.get_utxos().await?; - utxos.sort_by(|a, b| b.confirmations.cmp(&a.confirmations)); - Ok(utxos.first().cloned()) - } - #[instrument(level = "trace", skip_all, ret)] async fn get_pending_transactions(&self) -> Vec { self.monitoring @@ -573,7 +449,7 @@ impl BitcoinService { } pub(crate) async fn process_transaction_queue(&self) -> Result> { - match self.utxo_selection_mode { + match self.utxo_manager.mode { UtxoSelectionMode::Chained => self.process_transaction_queue_chained().await, UtxoSelectionMode::Oldest => self.process_transaction_queue_oldest_mode().await, } @@ -754,7 +630,7 @@ impl BitcoinService { return Err(BitcoinServiceError::WrongStatusForBumping(tx.status)); }; - let Some(utxo) = self.get_prev_utxo().await else { + let Some(utxo) = self.utxo_manager.get_prev_utxo().await else { return Err(BitcoinServiceError::MissingPreviousUTXO); }; diff --git a/crates/bitcoin-da/src/test_utils.rs b/crates/bitcoin-da/src/test_utils.rs index f74dd3db76..6bbe0fc408 100644 --- a/crates/bitcoin-da/src/test_utils.rs +++ b/crates/bitcoin-da/src/test_utils.rs @@ -33,7 +33,7 @@ impl BitcoinService { RawTxData::Chunks(chunks) => { for body in chunks { // get all available utxos that are not already spent - let utxos = self.get_utxos().await?; + let utxos = self.utxo_manager.get_available_utxos().await?; let utxos = utxos .into_iter() .filter(|utxo| { @@ -43,7 +43,7 @@ impl BitcoinService { }) .collect::>(); - let prev_utxo = self.get_prev_utxo().await; + let prev_utxo = self.utxo_manager.get_prev_utxo().await; // get address from a utxo let address = utxos[0] @@ -92,12 +92,12 @@ impl BitcoinService { borsh::to_vec(&aggregate).expect("Aggregate serialize must not fail"); // get all available utxos that are not already spent - let utxos = self.get_utxos().await?; + let utxos = self.utxo_manager.get_available_utxos().await?; let utxos = utxos .into_iter() .filter(|utxo| utxo.amount >= 50 * 10_u64.pow(8)) .collect::>(); - let prev_utxo = self.get_prev_utxo().await; + let prev_utxo = self.utxo_manager.get_prev_utxo().await; // get address from a utxo let address = utxos[0] diff --git a/crates/bitcoin-da/src/utxo_manager.rs b/crates/bitcoin-da/src/utxo_manager.rs new file mode 100644 index 0000000000..a351501b3e --- /dev/null +++ b/crates/bitcoin-da/src/utxo_manager.rs @@ -0,0 +1,213 @@ +//! UTXO management for Bitcoin DA service. +//! +//! Handles UTXO selection and filtering. Supports two modes: +//! - Chained: Sequential transaction chains +//! - Oldest: Parallel chains using most-confirmed UTXOs + +use std::collections::VecDeque; +use std::sync::Arc; + +use bitcoin::Amount; +use bitcoincore_rpc::json::ListUnspentResultEntry; +use bitcoincore_rpc::{Client, RpcApi}; +use serde::{Deserialize, Serialize}; +use tokio::sync::Mutex; + +use crate::error::BitcoinServiceError; +use crate::monitoring::MonitoringService; +use crate::network_constants::NetworkConstants; +use crate::service::Result; +use crate::spec::utxo::UTXO; +use crate::tx_signer::SignedTxPair; +use crate::REVEAL_OUTPUT_AMOUNT; + +/// UTXO selection strategy when queue has pending transactions. +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum UtxoSelectionMode { + /// Default behaviour, always use latest UTXO and keep transactions chained + /// Maintain a single sequential transaction chain. + Chained, + /// Choose the UTXO with the highest amount of confirmations and run parallel UTXO chains + Oldest, +} + +impl Default for UtxoSelectionMode { + fn default() -> Self { + Self::Chained + } +} + +/// UTXOs needed to build a transaction. +pub struct UtxoContext { + /// Filtered UTXOs + pub available_utxos: Vec, + /// UTXO to chain from + pub prev_utxo: Option, +} + +/// Manages UTXO selection and filtering +/// +/// Queries available UTXOs via bitcoin RPC, filters based on mode and queue state, +/// and ensures available UTXOs don't conflict with queued transactions. +#[derive(Debug)] +pub(crate) struct UtxoManager { + client: Arc, + monitoring: Arc, + tx_queue: Arc>>, + network_constants: NetworkConstants, + pub mode: UtxoSelectionMode, +} + +impl UtxoManager { + pub fn new( + client: Arc, + monitoring: Arc, + tx_queue: Arc>>, + network_constants: NetworkConstants, + mode: UtxoSelectionMode, + ) -> Self { + Self { + client, + monitoring, + network_constants, + mode, + tx_queue, + } + } + + /// Returns filtered UTXOs and `prev_utxo`. + pub async fn prepare_context(&self) -> Result { + let available_utxos = self.get_available_utxos().await?; + let prev_utxo = self.select_prev_utxo(&available_utxos).await?; + + Ok(UtxoContext { + available_utxos, + prev_utxo, + }) + } + + /// Selects `prev_utxo` to use as first input in subsequent transaction. + /// + /// If queue is empty: uses latest monitored UTXO, that is the latest transaction in current UTXO chain. + /// If queue has pending txs: + /// - Chained mode: returns Err(BitcoinServiceError::QueueNotEmpty) + /// - Oldest mode: uses UTXO with highest number of confirmation to start new chain + pub(crate) async fn select_prev_utxo(&self, available_utxos: &[UTXO]) -> Result> { + let prev_utxo = self.get_prev_utxo().await; + if self.tx_queue.lock().await.is_empty() { + return Ok(prev_utxo); + } + + match self.mode { + UtxoSelectionMode::Chained => { + // Prevent UTXO conflicts when queue is not empty and running UtxoSelectionMode::Chained mode + Err(BitcoinServiceError::QueueNotEmpty) + } + UtxoSelectionMode::Oldest => Ok(if prev_utxo.is_some() { + // Latest monitored TX has been successfully accepted to mempool and can be used as starting point for another utxo chain + prev_utxo + } else { + // Latest monitored TX has `Queued` status and internal `get_tx_out` errors. + // Get UTXO with most confirmations to start new chain + available_utxos + .iter() + .max_by_key(|utxo| utxo.confirmations) + .cloned() + }), + } + } + + /// Retrieves the most recent spendable UTXO from the transaction chain. + pub(crate) async fn get_prev_utxo(&self) -> Option { + let (txid, tx) = self.monitoring.get_last_tx().await?; + + let utxos = tx.to_utxos()?; + + // Check that tx out is still spendable + // If not found, utxo is already spent + self.client.get_tx_out(&txid, 0, Some(true)).await.ok()??; + + // Return first vout + utxos.into_iter().next() + } + + /// Gets available UTXOs from `list_unspent` RPC, and filter by mode. + pub(crate) async fn get_available_utxos(&self) -> Result> { + let utxos = self + .client + .list_unspent(Some(0), None, None, None, None) + .await?; + if utxos.is_empty() { + return Err(BitcoinServiceError::MissingUTXO); + } + + let filtered_utxos = match self.mode { + UtxoSelectionMode::Chained => self.chained_mode_filter(utxos).await, + UtxoSelectionMode::Oldest => self.oldest_mode_filter(utxos).await, + }; + + if filtered_utxos.is_empty() { + return Err(BitcoinServiceError::MissingSpendableUTXO); + } + + Ok(filtered_utxos) + } + + /// Filters UTXOs for Chained mode. + async fn chained_mode_filter(&self, utxos: Vec) -> Vec { + let commit_txids = self + .monitoring + .get_in_mempool_commit_transaction_ids() + .await; + + utxos + .into_iter() + .filter(|utxo| { + utxo.spendable + && utxo.solvable + // Accept either safe utxos OR unsafe commit change output that are monitored (and can be considered `mine` and thus safe) + && (utxo.safe || (commit_txids.contains(&utxo.txid) && utxo.vout == 1)) + && utxo.amount > Amount::from_sat(REVEAL_OUTPUT_AMOUNT) + }) + .map(Into::into) + .collect() + } + + /// Filters UTXOs for Oldest mode. + async fn oldest_mode_filter(&self, utxos: Vec) -> Vec { + let txids = self + .tx_queue + .lock() + .await + .iter() + .flat_map(|tx| { + tx.commit + .tx + .input + .iter() + .map(|input| input.previous_output.txid) + }) + .collect::>(); + + // When running in UtxoSelectionMode::Oldest, we're creating multiple utxos chain in parallel + // to be able to send multiple proofs in the same block without hitting mempool policy limits. + // To make sure there are no conflicts between parallel utxos chain, + // this additional filters out any UTXO used by queued txs and any change UTXO that are not finalized + utxos + .into_iter() + .filter(|utxo| { + utxo.spendable + && utxo.solvable + && utxo.safe + && utxo.amount > Amount::from_sat(REVEAL_OUTPUT_AMOUNT) + // Remove utxo already in use by queued txs + && !txids.contains(&utxo.txid) + // Only keep finalized change output + && (utxo.vout == 0 + || utxo.confirmations as u64 >= self.network_constants.finality_depth) + }) + .map(Into::into) + .collect() + } +} From 84a1e477c15ab311b49c53b619236822d8d63d74 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 10 Dec 2025 09:20:54 +0000 Subject: [PATCH 78/81] Use parking_lot --- Cargo.lock | 1 + crates/bitcoin-da/Cargo.toml | 16 +++++++++------- crates/bitcoin-da/src/job/service.rs | 12 ++++++------ 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 630804e4a7..2d48d9c508 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2862,6 +2862,7 @@ dependencies = [ "lru 0.13.0", "metrics", "metrics-derive", + "parking_lot", "rand 0.8.5", "reqwest", "reth-tasks", diff --git a/crates/bitcoin-da/Cargo.toml b/crates/bitcoin-da/Cargo.toml index c54437e689..064ed5a9b1 100644 --- a/crates/bitcoin-da/Cargo.toml +++ b/crates/bitcoin-da/Cargo.toml @@ -31,6 +31,7 @@ k256 = { workspace = true } lru = { workspace = true, optional = true } metrics = { workspace = true, optional = true } metrics-derive = { workspace = true, optional = true } +parking_lot = { workspace = true, optional = true } reqwest = { workspace = true, optional = true } reth-tasks = { workspace = true, optional = true } secp256k1 = { version = "0.29", optional = true, features = ["rand-std", "std", "global-context"] } @@ -53,22 +54,23 @@ native = [ "dep:async-trait", "dep:backoff", "dep:bincode", + "dep:bitcoincore-rpc", + "dep:citrea-common", "dep:futures", + "dep:jsonrpsee", "dep:lru", - "dep:tokio", "dep:metrics", "dep:metrics-derive", + "dep:parking_lot", "dep:reth-tasks", - "dep:tracing", - "dep:serde_json", - "sov-rollup-interface/native", - "dep:citrea-common", - "dep:bitcoincore-rpc", "dep:reqwest", - "dep:jsonrpsee", "dep:secp256k1", + "dep:serde_json", "dep:sov-db", + "dep:tokio", + "dep:tracing", "dep:uuid", + "sov-rollup-interface/native", ] testing = [] diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 4156306448..d2cf76c849 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -1,11 +1,12 @@ use std::collections::{HashMap, HashSet}; use std::num::NonZeroUsize; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use anyhow::Context; use bitcoin::hashes::Hash; use bitcoin::Txid; use lru::LruCache; +use parking_lot::Mutex; use sov_db::ledger_db::DaLedgerOps; use sov_db::schema::types::da_jobs::{DaJobStatus, JobId, JobProgress}; use sov_rollup_interface::da::DataOnDa; @@ -60,7 +61,7 @@ impl DaJobService { METRICS.record_job_submitted(); - self.job_waiters.lock().unwrap().insert(job_id, tx); + self.job_waiters.lock().insert(job_id, tx); info!("Job {job_id} submitted and persisted"); Ok(job_id) @@ -100,7 +101,7 @@ impl DaJobService { /// * `Result` - The raw transaction data or an error #[instrument(level = "trace", skip(self), ret)] pub(crate) fn get_job_data(&self, job_id: Uuid, job_data: DaTxRequest) -> Result { - if let Some(data) = self.raw_tx_data_cache.lock().unwrap().get(&job_id) { + if let Some(data) = self.raw_tx_data_cache.lock().get(&job_id) { return Ok(data.to_owned()); } @@ -126,7 +127,6 @@ impl DaJobService { self.raw_tx_data_cache .lock() - .unwrap() .push(job_id, raw_tx_data.clone()); Ok(raw_tx_data) @@ -235,7 +235,7 @@ impl DaJobService { DaJobStatus::Pending | DaJobStatus::InProgress => return, }; - if let Some(tx) = self.job_waiters.lock().unwrap().remove(&job_id) { + if let Some(tx) = self.job_waiters.lock().remove(&job_id) { let _ = tx.send(result); } } @@ -245,7 +245,7 @@ impl DaJobService { job_id: JobId, waiter: oneshot::Sender>, ) { - self.job_waiters.lock().unwrap().insert(job_id, waiter); + self.job_waiters.lock().insert(job_id, waiter); } pub(crate) fn recover_job( From 884500c71af326ec9e2c531cfad38a9c98fb9f48 Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 10 Dec 2025 12:52:43 +0000 Subject: [PATCH 79/81] Remove deadcode --- crates/bitcoin-da/src/job/service.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index d2cf76c849..61db996be5 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -210,15 +210,6 @@ impl DaJobService { Ok(txids) } - /// Check if any job is in progress. - pub async fn has_job_in_progress(&self) -> Result { - let in_progress_jobs = self - .ledger_db - .get_job_ids_by_status(DaJobStatus::InProgress.as_u8())?; - - Ok(!in_progress_jobs.is_empty()) - } - fn notify_new_status(&self, job_id: JobId, progress: &JobProgress) { let result = match &progress.status { DaJobStatus::Completed => { From dbc3cb6e8977848d3bb786428724d424ee024f6f Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Wed, 10 Dec 2025 13:37:34 +0000 Subject: [PATCH 80/81] Renames --- crates/bitcoin-da/src/job/service.rs | 2 +- crates/bitcoin-da/src/service.rs | 4 ++-- crates/prover-services/src/parallel.rs | 2 +- crates/sovereign-sdk/adapters/mock-da/src/service.rs | 2 +- crates/sovereign-sdk/rollup-interface/src/node/services/da.rs | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs index 61db996be5..8369ea2052 100644 --- a/crates/bitcoin-da/src/job/service.rs +++ b/crates/bitcoin-da/src/job/service.rs @@ -239,7 +239,7 @@ impl DaJobService { self.job_waiters.lock().insert(job_id, waiter); } - pub(crate) fn recover_job( + pub(crate) fn recover_job_waiter( &self, job_id: Uuid, ) -> Result>> { diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 2df7456630..843070c023 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -1415,14 +1415,14 @@ impl DaService for BitcoinService { Ok((job_id, rx)) } - async fn recover_existing_job( + async fn recover_existing_job_waiter( &self, job_id: Uuid, ) -> Result>> { self.job_service .lock() .await - .recover_job(job_id) + .recover_job_waiter(job_id) .map_err(Into::into) } diff --git a/crates/prover-services/src/parallel.rs b/crates/prover-services/src/parallel.rs index a11aa7526b..e522cc6bcf 100644 --- a/crates/prover-services/src/parallel.rs +++ b/crates/prover-services/src/parallel.rs @@ -254,7 +254,7 @@ where &self, da_job_id: Uuid, ) -> Result, ::Error> { - self.da_service.recover_existing_job(da_job_id).await + self.da_service.recover_existing_job_waiter(da_job_id).await } } diff --git a/crates/sovereign-sdk/adapters/mock-da/src/service.rs b/crates/sovereign-sdk/adapters/mock-da/src/service.rs index 8de414623f..7e3fef181b 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/service.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/service.rs @@ -520,7 +520,7 @@ impl DaService for MockDaService { } } - async fn recover_existing_job( + async fn recover_existing_job_waiter( &self, _job_id: Uuid, ) -> Result>, Self::Error> { diff --git a/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs b/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs index d4259b95b5..d95a2969e5 100644 --- a/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs +++ b/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs @@ -134,7 +134,7 @@ pub trait DaService: Send + Sync + 'static { /// Recover an ongoing da job sending session /// Returns the receiver if available - async fn recover_existing_job( + async fn recover_existing_job_waiter( &self, job_id: Uuid, ) -> Result>, Self::Error>; From 5c5d0e847394ee12a2ea1b2b585631e08990fd9b Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Tue, 16 Dec 2025 10:16:29 +0000 Subject: [PATCH 81/81] Use UtxoContext consistently --- bin/citrea/tests/bitcoin/light_client_test.rs | 13 +++-- bin/citrea/tests/bitcoin/taproot_key_spend.rs | 7 ++- .../src/helpers/builders/body_builders.rs | 49 ++++++++++++------- .../bitcoin-da/src/helpers/builders/tests.rs | 7 ++- crates/bitcoin-da/src/service.rs | 14 ++---- crates/bitcoin-da/src/utxo_manager.rs | 1 + 6 files changed, 56 insertions(+), 35 deletions(-) diff --git a/bin/citrea/tests/bitcoin/light_client_test.rs b/bin/citrea/tests/bitcoin/light_client_test.rs index e0e795cfce..4071cdac16 100644 --- a/bin/citrea/tests/bitcoin/light_client_test.rs +++ b/bin/citrea/tests/bitcoin/light_client_test.rs @@ -8,6 +8,7 @@ use bitcoin::hashes::Hash; use bitcoin::Txid; use bitcoin_da::helpers::parsers::{parse_relevant_transaction, ParsedTransaction, VerifyParsed}; use bitcoin_da::spec::RollupParams; +use bitcoin_da::utxo_manager::UtxoContext; use bitcoin_da::verifier::BitcoinVerifier; use bitcoincore_rpc::{Client, RpcApi}; use borsh::BorshDeserialize; @@ -3281,8 +3282,10 @@ impl UndecompressableBlobTest { let DaTxs::Complete { commit, reveal } = create_inscription_type_0( body, &da_private_key, - None, - utxos, + UtxoContext { + prev_utxo: None, + available_utxos: utxos, + }, change_address, 1, 1, @@ -3339,8 +3342,10 @@ impl UndecompressableBlobTest { } = create_inscription_type_1( chunks, &da_private_key, - None, - utxos, + UtxoContext { + available_utxos: utxos, + prev_utxo: None, + }, change_address, 2, 2, diff --git a/bin/citrea/tests/bitcoin/taproot_key_spend.rs b/bin/citrea/tests/bitcoin/taproot_key_spend.rs index 7c5b7cbc57..c82d06bc26 100644 --- a/bin/citrea/tests/bitcoin/taproot_key_spend.rs +++ b/bin/citrea/tests/bitcoin/taproot_key_spend.rs @@ -11,6 +11,7 @@ use bitcoin::{ }; use bitcoin_da::helpers::builders::body_builders::{create_inscription_type_0, DaTxs}; use bitcoin_da::spec::utxo::UTXO; +use bitcoin_da::utxo_manager::UtxoContext; use bitcoincore_rpc::RpcApi; use citrea_e2e::config::{BitcoinConfig, TestCaseConfig}; use citrea_e2e::framework::TestFramework; @@ -162,8 +163,10 @@ impl TestCase for TaprootKeySpendTest { let inscription_txs = create_inscription_type_0( test_data.to_vec(), &private_key, - None, - vec![utxo.clone()], + UtxoContext { + prev_utxo: None, + available_utxos: vec![utxo.clone()], + }, change_address.clone(), 10, // commit fee rate 10, // reveal fee rate diff --git a/crates/bitcoin-da/src/helpers/builders/body_builders.rs b/crates/bitcoin-da/src/helpers/builders/body_builders.rs index 8e1e15b7de..65b9335bf7 100644 --- a/crates/bitcoin-da/src/helpers/builders/body_builders.rs +++ b/crates/bitcoin-da/src/helpers/builders/body_builders.rs @@ -23,6 +23,7 @@ use super::{ get_size_reveal, sign_blob_with_private_key, update_witness, TransactionKind, TxWithId, }; use crate::spec::utxo::UTXO; +use crate::utxo_manager::UtxoContext; use crate::{REVEAL_OUTPUT_AMOUNT, REVEAL_OUTPUT_THRESHOLD}; /// These are real blobs we put on DA. @@ -85,8 +86,7 @@ pub enum DaTxs { pub fn create_inscription_transactions( data: RawTxData, da_private_key: SecretKey, - prev_utxo: Option, - utxos: Vec, + utxo_context: UtxoContext, change_address: Address, commit_fee_rate: u64, reveal_fee_rate: u64, @@ -97,8 +97,7 @@ pub fn create_inscription_transactions( RawTxData::Complete(body) => create_inscription_type_0( body, &da_private_key, - prev_utxo, - utxos, + utxo_context, change_address, commit_fee_rate, reveal_fee_rate, @@ -108,8 +107,7 @@ pub fn create_inscription_transactions( RawTxData::Chunks(body) => create_inscription_type_1( body, &da_private_key, - prev_utxo, - utxos, + utxo_context, change_address, commit_fee_rate, reveal_fee_rate, @@ -119,8 +117,7 @@ pub fn create_inscription_transactions( RawTxData::BatchProofMethodId(body) => create_inscription_type_3( body, &da_private_key, - prev_utxo, - utxos, + utxo_context, change_address, commit_fee_rate, reveal_fee_rate, @@ -130,8 +127,7 @@ pub fn create_inscription_transactions( RawTxData::SequencerCommitment(body) => create_inscription_type_4( body, &da_private_key, - prev_utxo, - utxos, + utxo_context, change_address, commit_fee_rate, reveal_fee_rate, @@ -147,14 +143,18 @@ pub fn create_inscription_transactions( pub fn create_inscription_type_0( body: Vec, da_private_key: &SecretKey, - prev_utxo: Option, - utxos: Vec, + utxo_context: UtxoContext, change_address: Address, commit_fee_rate: u64, reveal_fee_rate: u64, network: Network, reveal_tx_prefix: &[u8], ) -> Result { + let UtxoContext { + available_utxos: utxos, + prev_utxo, + } = utxo_context; + // Create reveal key let key_pair = UntweakedKeypair::from_secret_key(SECP256K1, da_private_key); let (public_key, _parity) = XOnlyPublicKey::from_keypair(&key_pair); @@ -319,14 +319,18 @@ pub fn create_inscription_type_0( pub fn create_inscription_type_1( chunks: Vec>, da_private_key: &SecretKey, - mut prev_utxo: Option, - mut utxos: Vec, + utxo_context: UtxoContext, change_address: Address, commit_fee_rate: u64, reveal_fee_rate: u64, network: Network, reveal_tx_prefix: &[u8], ) -> Result { + let UtxoContext { + available_utxos: mut utxos, + mut prev_utxo, + } = utxo_context; + // Create reveal key let key_pair = UntweakedKeypair::from_secret_key(SECP256K1, da_private_key); let (public_key, _parity) = XOnlyPublicKey::from_keypair(&key_pair); @@ -679,14 +683,18 @@ pub fn create_inscription_type_1( pub fn create_inscription_type_3( body: Vec, da_private_key: &SecretKey, - prev_utxo: Option, - utxos: Vec, + utxo_context: UtxoContext, change_address: Address, commit_fee_rate: u64, reveal_fee_rate: u64, network: Network, reveal_tx_prefix: &[u8], ) -> Result { + let UtxoContext { + available_utxos: utxos, + prev_utxo, + } = utxo_context; + // Create reveal key let key_pair = UntweakedKeypair::from_secret_key(SECP256K1, da_private_key); let (public_key, _parity) = XOnlyPublicKey::from_keypair(&key_pair); @@ -851,8 +859,7 @@ pub fn create_inscription_type_3( pub fn create_inscription_type_4( body: Vec, da_private_key: &SecretKey, - prev_utxo: Option, - utxos: Vec, + utxo_context: UtxoContext, change_address: Address, commit_fee_rate: u64, reveal_fee_rate: u64, @@ -863,6 +870,12 @@ pub fn create_inscription_type_4( body.len() < 520, "The body of a serialized sequencer commitment exceeds 520 bytes" ); + + let UtxoContext { + available_utxos: utxos, + prev_utxo, + } = utxo_context; + // Create reveal key let key_pair = UntweakedKeypair::from_secret_key(SECP256K1, da_private_key); let (public_key, _parity) = XOnlyPublicKey::from_keypair(&key_pair); diff --git a/crates/bitcoin-da/src/helpers/builders/tests.rs b/crates/bitcoin-da/src/helpers/builders/tests.rs index ad9963b3a9..7daad243bb 100644 --- a/crates/bitcoin-da/src/helpers/builders/tests.rs +++ b/crates/bitcoin-da/src/helpers/builders/tests.rs @@ -12,6 +12,7 @@ use super::body_builders::{DaTxs, RawTxData}; use crate::helpers::builders::sign_blob_with_private_key; use crate::helpers::parsers::{parse_relevant_transaction, ParsedTransaction}; use crate::spec::utxo::UTXO; +use crate::utxo_manager::UtxoContext; use crate::REVEAL_OUTPUT_AMOUNT; #[test] @@ -510,8 +511,10 @@ fn create_inscription_transactions() { let DaTxs::Complete { commit, reveal } = super::body_builders::create_inscription_transactions( RawTxData::Complete(body.clone()), da_private_key, - None, - utxos.clone(), + UtxoContext { + prev_utxo: None, + available_utxos: utxos.clone(), + }, address.clone(), 12, 10, diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index cca405eaf4..91afeccde6 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -56,10 +56,9 @@ use crate::spec::header::HeaderWrapper; use crate::spec::proof::InclusionMultiProof; use crate::spec::short_proof::BitcoinHeaderShortProof; use crate::spec::transaction::TransactionWrapper; -use crate::spec::utxo::UTXO; use crate::spec::{BitcoinSpec, RollupParams}; use crate::tx_signer::{SignedTxPair, TxSigner}; -use crate::utxo_manager::{UtxoManager, UtxoSelectionMode}; +use crate::utxo_manager::{UtxoContext, UtxoManager, UtxoSelectionMode}; use crate::verifier::{ BitcoinVerifier, MINIMUM_WITNESS_COMMITMENT_SIZE, WITNESS_COMMITMENT_PREFIX, }; @@ -343,8 +342,7 @@ impl BitcoinService { .create_da_transactions_with_fee_rate( tx_request, fee_sat_per_vbyte, - utxo_context.available_utxos.clone(), - utxo_context.prev_utxo.clone(), + utxo_context.clone(), ) .await?; let signed_txs = self.tx_signer.sign_da_txs(da_txs).await?; @@ -396,8 +394,7 @@ impl BitcoinService { &self, tx_request: DaTxRequest, fee_sat_per_vbyte: u64, - utxos: Vec, - prev_utxo: Option, + utxo_context: UtxoContext, ) -> Result { let data = match tx_request { DaTxRequest::ZKProof(zkproof) => split_proof(zkproof)?, @@ -416,7 +413,7 @@ impl BitcoinService { let network = self.network; let da_private_key = self.da_private_key.expect("No private key set"); // get address from a utxo - let address = utxos[0] + let address = utxo_context.available_utxos[0] .address .clone() .ok_or(BitcoinServiceError::MissingAddress)? @@ -429,8 +426,7 @@ impl BitcoinService { create_inscription_transactions( data, da_private_key, - prev_utxo, - utxos, + utxo_context, address, fee_sat_per_vbyte, fee_sat_per_vbyte, diff --git a/crates/bitcoin-da/src/utxo_manager.rs b/crates/bitcoin-da/src/utxo_manager.rs index a351501b3e..aa33d181b1 100644 --- a/crates/bitcoin-da/src/utxo_manager.rs +++ b/crates/bitcoin-da/src/utxo_manager.rs @@ -38,6 +38,7 @@ impl Default for UtxoSelectionMode { } } +#[derive(Debug, Clone)] /// UTXOs needed to build a transaction. pub struct UtxoContext { /// Filtered UTXOs