diff --git a/Cargo.lock b/Cargo.lock index 28e0c50755..27cfcd983d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2847,6 +2847,7 @@ dependencies = [ "anyhow", "async-trait", "backoff", + "bincode", "bitcoin", "bitcoincore-rpc", "borsh", @@ -2861,6 +2862,7 @@ dependencies = [ "lru 0.13.0", "metrics", "metrics-derive", + "parking_lot", "rand 0.8.5", "reqwest", "reth-tasks", @@ -2868,10 +2870,12 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", + "sov-db", "sov-rollup-interface", "thiserror 2.0.12", "tokio", "tracing", + "uuid", ] [[package]] @@ -12119,10 +12123,12 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", + "sov-db", "sov-rollup-interface", "tempfile", "tokio", "tracing", + "uuid", ] [[package]] diff --git a/bin/citrea/src/main.rs b/bin/citrea/src/main.rs index 842669ab8a..c98d9c3d7f 100644 --- a/bin/citrea/src/main.rs +++ b/bin/citrea/src/main.rs @@ -203,6 +203,7 @@ where matches!(node_type, NodeWithConfig::Sequencer(_)) || matches!(node_type, NodeWithConfig::BatchProver(_)), network, + ledger_db.clone(), ) .await?; diff --git a/bin/citrea/src/rollup/bitcoin.rs b/bin/citrea/src/rollup/bitcoin.rs index 65df3f1f12..70afcc58b3 100644 --- a/bin/citrea/src/rollup/bitcoin.rs +++ b/bin/citrea/src/rollup/bitcoin.rs @@ -4,12 +4,11 @@ use std::time::Duration; use async_trait::async_trait; use bitcoin_da::fee::FeeService; +use bitcoin_da::job::rpc::create_rpc_module as create_da_job_rpc_module; use bitcoin_da::monitoring::MonitoringService; use bitcoin_da::network_constants::get_network_constants; use bitcoin_da::rpc::create_rpc_module as create_da_rpc_module; -use bitcoin_da::service::{ - network_to_bitcoin_network, BitcoinService, BitcoinServiceConfig, TxidWrapper, -}; +use bitcoin_da::service::{network_to_bitcoin_network, BitcoinService, BitcoinServiceConfig}; use bitcoin_da::spec::{BitcoinSpec, RollupParams}; use bitcoin_da::verifier::BitcoinVerifier; use bitcoincore_rpc::{Auth, Client}; @@ -30,9 +29,7 @@ use sov_modules_api::default_context::DefaultContext; use sov_modules_api::{Address, SpecId, Zkvm}; use sov_modules_rollup_blueprint::RollupBlueprint; use sov_prover_storage_manager::ProverStorageManager; -use sov_rollup_interface::services::da::TxRequestWithNotifier; use sov_state::ProverStorage; -use tokio::sync::mpsc::unbounded_channel; use tracing::instrument; use crate::guests::{ @@ -85,6 +82,9 @@ impl RollupBlueprint for BitcoinRollup { let da_methods = create_da_rpc_module(da_service.clone()); rpc_methods.merge(da_methods)?; + let da_methods = create_da_job_rpc_module(da_service.clone()); + rpc_methods.merge(da_methods)?; + Ok(rpc_methods) } @@ -107,9 +107,8 @@ impl RollupBlueprint for BitcoinRollup { require_wallet_check: bool, task_executor: TaskExecutor, network: Network, + ledger_db: LedgerDB, ) -> Result, anyhow::Error> { - let (tx, rx) = unbounded_channel::>(); - let chain_params = RollupParams { reveal_tx_prefix: REVEAL_TX_PREFIX.to_vec(), network, @@ -155,7 +154,7 @@ impl RollupBlueprint for BitcoinRollup { monitoring_service, fee_service, require_wallet_check, - tx, + ledger_db, ) .await?, ); @@ -167,7 +166,7 @@ impl RollupBlueprint for BitcoinRollup { service.monitoring.restore().await?; task_executor.spawn_with_graceful_shutdown_signal(|tk| { - Arc::clone(&service).run_da_queue(rx, block_rx, tk) + Arc::clone(&service).run_da_queue(block_rx, tk) }); task_executor .spawn_with_graceful_shutdown_signal(|tk| Arc::clone(&service.monitoring).run(tk)); diff --git a/bin/citrea/src/rollup/mock.rs b/bin/citrea/src/rollup/mock.rs index 3ecca2b584..d0668c5a47 100644 --- a/bin/citrea/src/rollup/mock.rs +++ b/bin/citrea/src/rollup/mock.rs @@ -71,10 +71,12 @@ impl RollupBlueprint for MockDemoRollup { _require_wallet_check: bool, _task_manager: TaskExecutor, _network: Network, + ledger_db: LedgerDB, ) -> Result, anyhow::Error> { - Ok(Arc::new(MockDaService::new( + Ok(Arc::new(MockDaService::new_with_ledger_db( rollup_config.da.sender_address.clone(), &rollup_config.da.db_path, + ledger_db, ))) } diff --git a/bin/citrea/src/rollup/mod.rs b/bin/citrea/src/rollup/mod.rs index 74dd385600..218eb47a0b 100644 --- a/bin/citrea/src/rollup/mod.rs +++ b/bin/citrea/src/rollup/mod.rs @@ -77,6 +77,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { rollup_config: &FullNodeConfig, require_da_wallet: bool, network: Network, + ledger_db: LedgerDB, ) -> Result> { let task_manager = TaskManager::current(); let da_service = self @@ -85,6 +86,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { require_da_wallet, task_manager.executor(), network, + ledger_db, ) .await?; let (l2_block_tx, l2_block_rx) = broadcast::channel(10); diff --git a/bin/citrea/tests/bitcoin/batch_prover_test.rs b/bin/citrea/tests/bitcoin/batch_prover_test.rs index 1dd7d8d95f..45fe485c16 100644 --- a/bin/citrea/tests/bitcoin/batch_prover_test.rs +++ b/bin/citrea/tests/bitcoin/batch_prover_test.rs @@ -313,7 +313,7 @@ async fn basic_prover_test() -> Result<()> { // // Send the same commitment that was already proven. // bitcoin_da_service -// .send_transaction_with_fee_rate( +// .send_transaction_and_wait( // DaTxRequest::SequencerCommitment(commitments.first().unwrap().clone()), // 1, // ) @@ -1561,6 +1561,12 @@ impl TestCase for RetryProvingTest { .unwrap(); assert_eq!(proving_job.commitments.len(), 4); + let da_job_id = batch_prover + .client + .http_client() + .get_da_job_id_by_job_id(proving_job.id) + .await?; + // retry proving the same job let new_job_id = batch_prover .client @@ -1569,6 +1575,18 @@ impl TestCase for RetryProvingTest { .await?; assert_ne!(new_job_id, proving_job.id, "new job id should be different"); + wait_for_prover_job(batch_prover, new_job_id, None).await?; + + let retried_da_job_id = batch_prover + .client + .http_client() + .get_da_job_id_by_job_id(new_job_id) + .await?; + assert_ne!( + da_job_id, retried_da_job_id, + "new da job id should be different" + ); + // check the commitments of the new proving job let new_proving_job = wait_for_prover_job(batch_prover, new_job_id, None).await?; assert_eq!(new_proving_job.commitments.len(), 4); diff --git a/bin/citrea/tests/bitcoin/bitcoin_service.rs b/bin/citrea/tests/bitcoin/bitcoin_service.rs index 00f35b8462..d40f2502fc 100644 --- a/bin/citrea/tests/bitcoin/bitcoin_service.rs +++ b/bin/citrea/tests/bitcoin/bitcoin_service.rs @@ -162,7 +162,7 @@ impl TestCase for BitcoinServiceTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_bitcoin_service() -> Result<()> { TestCaseRunner::new(BitcoinServiceTest { task_manager: TaskManager::current(), diff --git a/bin/citrea/tests/bitcoin/bitcoin_test.rs b/bin/citrea/tests/bitcoin/bitcoin_test.rs index 8e5a1896ee..a2b33223fd 100644 --- a/bin/citrea/tests/bitcoin/bitcoin_test.rs +++ b/bin/citrea/tests/bitcoin/bitcoin_test.rs @@ -4,13 +4,13 @@ use std::time::Duration; use anyhow::bail; use async_trait::async_trait; use bitcoin::hashes::Hash; -use bitcoin::{Amount, Txid}; +use bitcoin::Txid; use bitcoin_da::monitoring::TxStatus; use bitcoin_da::rpc::DaRpcClient; -use bitcoincore_rpc::{Client, RpcApi}; +use bitcoincore_rpc::RpcApi; use citrea_batch_prover::rpc::BatchProverRpcClient; use citrea_e2e::bitcoin::{BitcoinNode, DEFAULT_FINALITY_DEPTH}; -use citrea_e2e::config::{BitcoinConfig, TestCaseConfig}; +use citrea_e2e::config::TestCaseConfig; use citrea_e2e::framework::TestFramework; use citrea_e2e::node::NodeKind; use citrea_e2e::test_case::{TestCase, TestCaseRunner}; @@ -491,89 +491,89 @@ async fn test_cpfp_fee_bump() -> Result<()> { .await } -struct MinRelayFeeTest; - -impl MinRelayFeeTest { - async fn drain_wallet( - &self, - da: &BitcoinNode, - client: &Client, - amount_to_keep: Amount, - ) -> Result<()> { - let balance = da.get_balance(None, None).await?; - - let amount_to_send = balance - amount_to_keep; - - if amount_to_send <= Amount::ZERO { - return Ok(()); - } - - let drain_address = da.get_new_address(None, None).await?.assume_checked(); - - client - .send_to_address( - &drain_address, - amount_to_send, - None, - None, - None, - None, - None, - None, - ) - .await?; - da.generate(1).await?; - - Ok(()) - } -} - -#[async_trait] -impl TestCase for MinRelayFeeTest { - fn test_config() -> TestCaseConfig { - TestCaseConfig { - with_sequencer: true, - with_batch_prover: false, - ..Default::default() - } - } - - fn bitcoin_config() -> BitcoinConfig { - BitcoinConfig { - extra_args: vec!["-fallbackfee=0.00001", "-minrelaytxfee=0.00002"], - ..Default::default() - } - } - - async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { - let da = f.bitcoin_nodes.get(0).unwrap(); - let sequencer = f.sequencer.as_mut().unwrap(); - - self.drain_wallet(da, &sequencer.da, Amount::from_sat(8000)) - .await?; - - let max_l2_blocks_per_commitment = sequencer.max_l2_blocks_per_commitment(); - - // Generate seqcommitments - for _ in 0..max_l2_blocks_per_commitment { - sequencer.client.send_publish_batch_request().await?; - } - - da.wait_mempool_len(2, None).await?; - - // Assert that we hit MinRelayFeeNotMet error but recover and end up sending the tx by increasing fee_rate_multiplier - let sequencer_stdout = - std::fs::read_to_string(sequencer.config.base.dir.join("stdout.log"))?; - assert!(sequencer_stdout.contains("MinRelayFeeNotMet")); - - Ok(()) - } -} - -#[tokio::test] -async fn test_min_relay_fee_handling() -> Result<()> { - TestCaseRunner::new(MinRelayFeeTest) - .set_citrea_path(get_citrea_path()) - .run() - .await -} +// struct MinRelayFeeTest; + +// impl MinRelayFeeTest { +// async fn drain_wallet( +// &self, +// da: &BitcoinNode, +// client: &Client, +// amount_to_keep: Amount, +// ) -> Result<()> { +// let balance = da.get_balance(None, None).await?; + +// let amount_to_send = balance - amount_to_keep; + +// if amount_to_send <= Amount::ZERO { +// return Ok(()); +// } + +// let drain_address = da.get_new_address(None, None).await?.assume_checked(); + +// client +// .send_to_address( +// &drain_address, +// amount_to_send, +// None, +// None, +// None, +// None, +// None, +// None, +// ) +// .await?; +// da.generate(1).await?; + +// Ok(()) +// } +// } + +// #[async_trait] +// impl TestCase for MinRelayFeeTest { +// fn test_config() -> TestCaseConfig { +// TestCaseConfig { +// with_sequencer: true, +// with_batch_prover: false, +// ..Default::default() +// } +// } + +// fn bitcoin_config() -> BitcoinConfig { +// BitcoinConfig { +// extra_args: vec!["-fallbackfee=0.00001", "-minrelaytxfee=0.00002"], +// ..Default::default() +// } +// } + +// async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { +// let da = f.bitcoin_nodes.get(0).unwrap(); +// let sequencer = f.sequencer.as_mut().unwrap(); + +// self.drain_wallet(da, &sequencer.da, Amount::from_sat(8000)) +// .await?; + +// let max_l2_blocks_per_commitment = sequencer.max_l2_blocks_per_commitment(); + +// // Generate seqcommitments +// for _ in 0..max_l2_blocks_per_commitment { +// sequencer.client.send_publish_batch_request().await?; +// } + +// da.wait_mempool_len(2, None).await?; + +// // Assert that we hit MinRelayFeeNotMet error but recover and end up sending the tx by increasing fee_rate_multiplier +// let sequencer_stdout = +// std::fs::read_to_string(sequencer.config.base.dir.join("stdout.log"))?; +// assert!(sequencer_stdout.contains("MinRelayFeeNotMet")); + +// Ok(()) +// } +// } + +// #[tokio::test] +// async fn test_min_relay_fee_handling() -> Result<()> { +// TestCaseRunner::new(MinRelayFeeTest) +// .set_citrea_path(get_citrea_path()) +// .run() +// .await +// } diff --git a/bin/citrea/tests/bitcoin/bitcoin_verifier.rs b/bin/citrea/tests/bitcoin/bitcoin_verifier.rs index a97674338b..978d8e88b7 100644 --- a/bin/citrea/tests/bitcoin/bitcoin_verifier.rs +++ b/bin/citrea/tests/bitcoin/bitcoin_verifier.rs @@ -744,7 +744,7 @@ impl BitcoinVerifierTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_bitcoin_verifier() -> Result<()> { TestCaseRunner::new(BitcoinVerifierTest { task_manager: TaskManager::current(), diff --git a/bin/citrea/tests/bitcoin/da_job.rs b/bin/citrea/tests/bitcoin/da_job.rs new file mode 100644 index 0000000000..580fd577a0 --- /dev/null +++ b/bin/citrea/tests/bitcoin/da_job.rs @@ -0,0 +1,837 @@ +use std::path::PathBuf; +use std::sync::Arc; +use std::time::Duration; + +use alloy_primitives::{U32, U64}; +use async_trait::async_trait; +use bitcoin::hashes::Hash; +use bitcoin_da::job::rpc::{DaJobRpcClient, JobInfoResponse, JobStatusFilter, RetryJobResponse}; +use bitcoin_da::service::BitcoinService; +use bitcoincore_rpc::RpcApi; +use citrea_batch_prover::rpc::BatchProverRpcClient; +use citrea_e2e::bitcoin::{BitcoinNode, DEFAULT_FINALITY_DEPTH}; +use citrea_e2e::config::{BatchProverConfig, BitcoinConfig, TestCaseConfig}; +use citrea_e2e::framework::TestFramework; +use citrea_e2e::node::BatchProver; +use citrea_e2e::test_case::{TestCase, TestCaseRunner}; +use citrea_e2e::traits::Restart; +use citrea_e2e::Result; +use jsonrpsee::http_client::HttpClient; +use reth_tasks::TaskManager; +use sov_db::schema::types::da_jobs::DaJobStatus; +use sov_ledger_rpc::LedgerRpcClient; +use sov_rollup_interface::da::SequencerCommitment; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; + +use super::get_citrea_path; +use crate::bitcoin::full_node::create_serialized_fake_receipt_batch_proof_with_state_roots; +use crate::bitcoin::light_client_test::create_random_state_diff; +use crate::bitcoin::utils::{ + create_serialized_fake_receipt_batch_proof_and_serialized_output, + spawn_bitcoin_da_prover_service_with_rpc_server, wait_for_prover_job_count, +}; + +struct JobServiceTest { + task_manager: Option, +} + +impl JobServiceTest { + #[allow(clippy::too_many_arguments)] + async fn test_job_lifecycle( + &self, + da: &BitcoinNode, + da_service: &BitcoinService, + da_service_client: &HttpClient, + genesis_state_root: [u8; 32], + finalized_height: u64, + commitment: &SequencerCommitment, + commitment_state_root: [u8; 32], + ) -> Result<()> { + let state_diff = create_random_state_diff(10); + let l1_hash = da.get_block_hash(finalized_height).await?; + + let proof = create_serialized_fake_receipt_batch_proof_with_state_roots( + genesis_state_root, + 20, + Some(state_diff), + false, + l1_hash.as_raw_hash().to_byte_array(), + vec![commitment.clone()], + vec![commitment_state_root], + None, + ); + + // Make sure we start with no jobs + let all_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::All), None, None) + .await?; + assert!(all_jobs.is_empty()); + + let job_id = da_service + .send_transaction_and_wait(DaTxRequest::ZKProof(proof)) + .await?; + + da.wait_mempool_len(2, None).await?; + da.generate(1).await?; + + // Check that job is not active anymore and has been processed + let active_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert!(active_jobs.is_empty()); + + // Check Completed status + let completed_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::Completed), None, None) + .await?; + assert_eq!(completed_jobs.len(), 1); + + let completed_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::Terminal), None, None) + .await?; + assert_eq!(completed_jobs.len(), 1); + + let job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + + assert_eq!(job_by_id.status, DaJobStatus::Completed); + assert_eq!(job_by_id.sent_count, 1); + assert_eq!(job_by_id.error, None); + + Ok(()) + } + + /// Test job cancellation for in-progress jobs + /// Test job retry for cancelled jobs + #[allow(clippy::too_many_arguments)] + async fn test_job_cancellation_and_retry( + &self, + da: &BitcoinNode, + da_service: &BitcoinService, + da_service_client: &HttpClient, + genesis_state_root: [u8; 32], + finalized_height: u64, + commitment: &SequencerCommitment, + commitment_state_root: [u8; 32], + ) -> Result<()> { + let l1_hash = da.get_block_hash(finalized_height).await?; + + // Create a 400kb proof that will hit mempool limits and get stuck in progress + let state_diff_100kb = create_random_state_diff(400); + let proof = create_serialized_fake_receipt_batch_proof_with_state_roots( + genesis_state_root, + 20, + Some(state_diff_100kb), + false, + l1_hash.as_raw_hash().to_byte_array(), + vec![commitment.clone()], + vec![commitment_state_root], + None, + ); + + let (job_id, rx) = da_service + .send_transaction(DaTxRequest::ZKProof(proof.clone())) + .await?; + + // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit + // The three first proofs should hit the mempool + 1 chunk + da.wait_mempool_len(18, None).await?; + + assert_eq!(da.get_raw_mempool().await?.len(), 18); + + let job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(job_by_id.status, DaJobStatus::InProgress); + assert_eq!(job_by_id.sent_count, 9); // 9 commit/reveal pair + + // Cancel job + let cancel_job_response = da_service_client.da_job_cancel(job_id).await?; + assert!(cancel_job_response.success); + + let job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(job_by_id.status, DaJobStatus::Cancelled); + + // Mine sent txs + da.generate(1).await?; + + // Make sure job doesn't get processed after freeing space in mempool + let res = rx.await.unwrap(); + assert!(res.is_err()); + + let retry_job_response: RetryJobResponse = da_service_client.da_job_retry(job_id).await?; + + let old_job_by_id: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(old_job_by_id.status, DaJobStatus::Cancelled); + + let new_job_by_id: JobInfoResponse = da_service_client + .da_job_get_info(retry_job_response.new_job_id) + .await?; + assert_eq!(new_job_by_id.status, DaJobStatus::Pending); + da.generate(1).await?; + + // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit + // The three first proofs should hit the mempool + 1 chunk + da.wait_mempool_len(18, None).await?; + + assert_eq!(da.get_raw_mempool().await?.len(), 18); + + let new_job_by_id: JobInfoResponse = da_service_client + .da_job_get_info(retry_job_response.new_job_id) + .await?; + assert_eq!(new_job_by_id.status, DaJobStatus::InProgress); + da.generate(1).await?; + + // TODO find a way to deterministically wait for retry completion + tokio::time::sleep(Duration::from_secs(3)).await; + + let new_job_by_id: JobInfoResponse = da_service_client + .da_job_get_info(retry_job_response.new_job_id) + .await?; + assert_eq!(new_job_by_id.status, DaJobStatus::Completed); + + Ok(()) + } + + /// Test job listing with various filters and pagination + #[allow(clippy::too_many_arguments)] + async fn test_job_listing( + &self, + da: &BitcoinNode, + da_service: &BitcoinService, + da_service_client: &HttpClient, + genesis_state_root: [u8; 32], + finalized_height: u64, + commitment: &SequencerCommitment, + commitment_state_root: [u8; 32], + ) -> Result<()> { + let state_diff = create_random_state_diff(400); + let l1_hash = da.get_block_hash(finalized_height).await?; + + let proof = create_serialized_fake_receipt_batch_proof_with_state_roots( + genesis_state_root, + 20, + Some(state_diff), + false, + l1_hash.as_raw_hash().to_byte_array(), + vec![commitment.clone()], + vec![commitment_state_root], + None, + ); + + // Create multiple jobs to check list handling + let (_, rx) = da_service + .send_transaction(DaTxRequest::ZKProof(proof.clone())) + .await?; + + da.wait_mempool_len(18, None).await?; + + // List all jobs + let all_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::All), None, None) + .await?; + assert!(all_jobs.len() >= 3); + + // List active jobs + let active_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs.len(), 1); + + // List cancelled jobs + let cancelled_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::Cancelled), None, None) + .await?; + assert_eq!(cancelled_jobs.len(), 1); + + // List failed jobs + let failed_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::Failed), None, None) + .await?; + assert_eq!(failed_jobs.len(), 0); + + // Test pagination + let first_page = da_service_client + .da_job_list(Some(JobStatusFilter::All), Some(1), Some(0)) + .await?; + assert_eq!(first_page.len(), 1); + + // Test pagination + let second_page = da_service_client + .da_job_list(Some(JobStatusFilter::All), Some(1), Some(1)) + .await?; + assert_eq!(second_page.len(), 1); + + // Make sure we don't get the same job_id + assert_ne!(first_page[0].job_id, second_page[0].job_id); + + // Verify uuidv7 chronological ordering + assert!(first_page[0].created_at <= second_page[0].created_at,); + + // Test limit + let limited_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::All), Some(2), None) + .await?; + assert_eq!(limited_jobs.len(), 2); + + // Mine all sent txs + da.generate(1).await?; + + let res = rx.await.unwrap(); + assert!(res.is_ok()); + + // Verify completed jobs + let completed_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::Completed), None, None) + .await?; + assert_eq!(completed_jobs.len(), 3); + + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + async fn test_job_persistence( + &mut self, + da: &BitcoinNode, + da_service: Arc, + da_service_client: HttpClient, + genesis_state_root: [u8; 32], + finalized_height: u64, + commitment: &SequencerCommitment, + commitment_state_root: [u8; 32], + ) -> Result<()> { + let l1_hash = da.get_block_hash(finalized_height).await?; + let state_diff_400kb = create_random_state_diff(400); + let proof = create_serialized_fake_receipt_batch_proof_with_state_roots( + genesis_state_root, + 20, + Some(state_diff_400kb), + false, + l1_hash.as_raw_hash().to_byte_array(), + vec![commitment.clone()], + vec![commitment_state_root], + None, + ); + + let (job_id, _) = da_service + .send_transaction(DaTxRequest::ZKProof(proof)) + .await?; + + da.wait_mempool_len(18, None).await?; + assert_eq!(da.get_raw_mempool().await?.len(), 18); + + let job_before: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(job_before.job_id, job_id); + assert_eq!(job_before.status, DaJobStatus::InProgress); + assert_eq!(job_before.sent_count, 9); + + let active_jobs_before = da_service_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_before.len(), 1); + assert_eq!(active_jobs_before[0].job_id, job_id); + + // Send graceful shutdown to da_service and drop da_service + drop(da_service); + drop(da_service_client); + self.task_manager.take().unwrap().graceful_shutdown(); + tokio::time::sleep(Duration::from_secs(5)).await; + + // Create a new task_manager as previous was consumed + self.task_manager = Some(TaskManager::current()); + let task_executor = self.task_manager.as_ref().unwrap().executor(); + + let (_, da_service_client) = spawn_bitcoin_da_prover_service_with_rpc_server( + &task_executor, + &da.config, + Self::test_config().dir, + ) + .await; + + let job_after: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + + assert_eq!(job_after.job_id, job_before.job_id); + assert_eq!(job_after.status, job_before.status); + assert_eq!(job_after.created_at, job_before.created_at); + assert_eq!(job_after.sent_count, job_before.sent_count); + + let active_jobs_after = da_service_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_after.len(), 1); + assert_eq!(active_jobs_after[0].job_id, job_id); + assert_eq!(active_jobs_after[0].status, DaJobStatus::InProgress); + + da.generate(1).await?; + + da.wait_mempool_len(6, None).await?; + + let completed_job: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(completed_job.status, DaJobStatus::Completed); + assert_eq!(completed_job.created_at, job_before.created_at); + assert_eq!(completed_job.error, None); + + let active_jobs_final = da_service_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_final.len(), 0); + + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + async fn test_job_error_recovery( + &mut self, + da: &mut BitcoinNode, + tx_backup_dir: PathBuf, + da_service: &Arc, + da_service_client: &HttpClient, + genesis_state_root: [u8; 32], + finalized_height: u64, + commitment: &SequencerCommitment, + commitment_state_root: [u8; 32], + ) -> Result<()> { + let l1_hash = da.get_block_hash(finalized_height).await?; + let state_diff_400kb = create_random_state_diff(400); + let proof = create_serialized_fake_receipt_batch_proof_with_state_roots( + genesis_state_root, + 20, + Some(state_diff_400kb), + false, + l1_hash.as_raw_hash().to_byte_array(), + vec![commitment.clone()], + vec![commitment_state_root], + None, + ); + + let (job_id, _) = da_service + .send_transaction(DaTxRequest::ZKProof(proof)) + .await?; + + da.wait_mempool_len(18, None).await?; + assert_eq!(da.get_raw_mempool().await?.len(), 18); + + let job_before: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(job_before.job_id, job_id); + assert_eq!(job_before.status, DaJobStatus::InProgress); + assert_eq!(job_before.sent_count, 9); + + // Make `tx_backup_dir` read-only to trigger a failure. + // Should make the next job processing fail with `There are no UTXOs` + let metadata = tokio::fs::metadata(&tx_backup_dir).await?; + let mut permissions = metadata.permissions(); + + // Keep original perms for resetting + let original_perms = permissions.clone(); + + permissions.set_readonly(true); + tokio::fs::set_permissions(&tx_backup_dir, permissions.clone()).await?; + + // Mine chunks + da.generate(1).await?; + + // Wait for job processing + std::thread::sleep(std::time::Duration::from_millis(1000)); + + let in_progress_jobs = da_service_client + .da_job_list(Some(JobStatusFilter::InProgress), None, None) + .await?; + assert_eq!(in_progress_jobs.len(), 1); + assert_eq!(in_progress_jobs[0].job_id, job_id); + + let failed_job: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert!(matches!(failed_job.status, DaJobStatus::InProgress)); + assert_eq!(failed_job.created_at, job_before.created_at); + assert_eq!( + failed_job.error, + Some( + "Failed to backup transactions to file: Permission denied (os error 13)" + .to_string() + ) + ); + + // Reset permissions + tokio::fs::set_permissions(&tx_backup_dir, original_perms).await?; + + // Trigger job processing + da.generate(1).await?; + + da.wait_mempool_len(6, None).await?; + + let completed_job: JobInfoResponse = da_service_client.da_job_get_info(job_id).await?; + assert_eq!(completed_job.status, DaJobStatus::Completed); + assert_eq!(completed_job.created_at, job_before.created_at); + assert_eq!(completed_job.sent_count, 12); + assert_eq!(completed_job.error, None); + + let active_jobs_final = da_service_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_final.len(), 0); + Ok(()) + } +} + +#[async_trait] +impl TestCase for JobServiceTest { + fn test_config() -> TestCaseConfig { + TestCaseConfig { + with_full_node: true, + with_sequencer: true, + with_light_client_prover: true, + ..Default::default() + } + } + + fn bitcoin_config() -> BitcoinConfig { + BitcoinConfig { + extra_args: vec![ + "-limitancestorcount=100", + "-limitdescendantcount=100", + "-fallbackfee=0.00001", + ], + ..Default::default() + } + } + + fn scan_l1_start_height() -> Option { + Some(150) + } + + fn batch_prover_config() -> BatchProverConfig { + BatchProverConfig { + proof_sampling_number: 999_999_999, + ..Default::default() + } + } + + async fn cleanup(self) -> Result<()> { + self.task_manager + .unwrap() + .graceful_shutdown_with_timeout(Duration::from_secs(1)); + Ok(()) + } + + async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { + let task_executor = self.task_manager.as_ref().unwrap().executor(); + let da = f.bitcoin_nodes.get_mut(0).unwrap(); + let sequencer = f.sequencer.as_mut().unwrap(); + let full_node = f.full_node.as_mut().unwrap(); + + let test_dir = Self::test_config().dir; + let tx_backup_dir = test_dir.join("tx_backup_dir"); + + // Common setup + let (da_service, da_service_client) = + spawn_bitcoin_da_prover_service_with_rpc_server(&task_executor, &da.config, test_dir) + .await; + + let max_l2_blocks_per_commitment = sequencer.max_l2_blocks_per_commitment(); + + let genesis_state_root = full_node + .client + .http_client() + .get_l2_genesis_state_root() + .await? + .unwrap() + .0 + .try_into() + .unwrap(); + + // Generate sequencer commitment + for _ in 0..max_l2_blocks_per_commitment { + sequencer.client.send_publish_batch_request().await?; + } + + da.wait_mempool_len(2, None).await?; + da.generate(DEFAULT_FINALITY_DEPTH).await?; + let finalized_height = da.get_finalized_height(None).await?; + + full_node + .wait_for_l2_height(max_l2_blocks_per_commitment, None) + .await?; + full_node.wait_for_l1_height(finalized_height, None).await?; + + let commitment = full_node + .client + .http_client() + .get_sequencer_commitment_by_index(U32::from(1)) + .await? + .map(|c| SequencerCommitment { + merkle_root: c.merkle_root, + l2_end_block_number: c.l2_end_block_number.to::(), + index: c.index.to::(), + }) + .unwrap(); + + let commitment_state_root = sequencer + .client + .http_client() + .get_l2_block_by_number(U64::from(commitment.l2_end_block_number)) + .await? + .unwrap() + .header + .state_root; + + self.test_job_lifecycle( + da, + &da_service, + &da_service_client, + genesis_state_root, + finalized_height, + &commitment, + commitment_state_root, + ) + .await?; + + // Clean mempool between each step + da.generate(1).await?; + + self.test_job_cancellation_and_retry( + da, + &da_service, + &da_service_client, + genesis_state_root, + finalized_height, + &commitment, + commitment_state_root, + ) + .await?; + + // Clean mempool between each step + da.generate(1).await?; + + self.test_job_listing( + da, + &da_service, + &da_service_client, + genesis_state_root, + finalized_height, + &commitment, + commitment_state_root, + ) + .await?; + + // Clean mempool between each step + da.generate(1).await?; + + self.test_job_error_recovery( + da, + tx_backup_dir, + &da_service, + &da_service_client, + genesis_state_root, + finalized_height, + &commitment, + commitment_state_root, + ) + .await?; + + // Clean mempool between each step + da.generate(1).await?; + + self.test_job_persistence( + da, + da_service, + da_service_client, + genesis_state_root, + finalized_height, + &commitment, + commitment_state_root, + ) + .await?; + + Ok(()) + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_bitcoin_job_service() -> Result<()> { + TestCaseRunner::new(JobServiceTest { + task_manager: Some(TaskManager::current()), + }) + .set_citrea_path(get_citrea_path()) + .run() + .await +} + +struct BatchProverRecoveryJobServiceTest; + +impl BatchProverRecoveryJobServiceTest { + #[allow(clippy::too_many_arguments)] + async fn test_batch_prover_da_job_recovery( + &mut self, + da: &BitcoinNode, + batch_prover: &mut BatchProver, + genesis_state_root: [u8; 32], + finalized_height: u64, + commitment: &SequencerCommitment, + commitment_state_root: [u8; 32], + ) -> Result<()> { + let batch_prover_client = batch_prover.client.http_client().clone(); + + let l1_hash = da.get_block_hash(finalized_height).await?; + // Create 400kb proof that should be chunked and sent over multiple bitcoin blocks + let state_diff_400kb = create_random_state_diff(400); + let (proof, output) = create_serialized_fake_receipt_batch_proof_and_serialized_output( + genesis_state_root, + 20, + Some(state_diff_400kb), + false, + l1_hash.as_raw_hash().to_byte_array(), + vec![commitment.clone()], + vec![commitment_state_root], + None, + ); + + let job_id = batch_prover_client + .submit_proof_with_output(proof, output) + .await?; + + wait_for_prover_job_count(batch_prover, 1, None).await?; + + da.wait_mempool_len(18, None).await?; + assert_eq!(da.get_raw_mempool().await?.len(), 18); + + let job_in_progress: JobInfoResponse = batch_prover_client.da_job_get_info(job_id).await?; + assert_eq!(job_in_progress.job_id, job_id); + assert_eq!(job_in_progress.status, DaJobStatus::InProgress); + assert_eq!(job_in_progress.sent_count, 9); + + let active_jobs_before = batch_prover_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_before.len(), 1); + assert_eq!(active_jobs_before[0].job_id, job_id); + + batch_prover.restart(None, None).await?; + + // Assert that restart doesn't create any new job + let active_jobs_after_restart = batch_prover_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_after_restart.len(), 1); + assert_eq!(active_jobs_after_restart[0].job_id, job_id); + + da.generate(1).await?; + + da.wait_mempool_len(6, None).await?; + + let completed_job: JobInfoResponse = batch_prover_client.da_job_get_info(job_id).await?; + assert_eq!(completed_job.status, DaJobStatus::Completed); + assert_eq!(completed_job.error, None); + + let active_jobs_final = batch_prover_client + .da_job_list(Some(JobStatusFilter::Active), None, None) + .await?; + assert_eq!(active_jobs_final.len(), 0); + + Ok(()) + } +} + +#[async_trait] +impl TestCase for BatchProverRecoveryJobServiceTest { + fn test_config() -> TestCaseConfig { + TestCaseConfig { + with_full_node: true, + with_sequencer: true, + with_light_client_prover: true, + with_batch_prover: true, + ..Default::default() + } + } + + fn bitcoin_config() -> BitcoinConfig { + BitcoinConfig { + extra_args: vec![ + "-limitancestorcount=100", + "-limitdescendantcount=100", + "-fallbackfee=0.00001", + ], + ..Default::default() + } + } + + fn batch_prover_config() -> BatchProverConfig { + BatchProverConfig { + proof_sampling_number: 99999999, // Prevent prover from proving on its own + ..Default::default() + } + } + + fn scan_l1_start_height() -> Option { + Some(150) + } + + async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { + let da = f.bitcoin_nodes.get_mut(0).unwrap(); + let sequencer = f.sequencer.as_mut().unwrap(); + let full_node = f.full_node.as_mut().unwrap(); + let batch_prover = f.batch_prover.as_mut().unwrap(); + + let max_l2_blocks_per_commitment = sequencer.max_l2_blocks_per_commitment(); + + let genesis_state_root = full_node + .client + .http_client() + .get_l2_genesis_state_root() + .await? + .unwrap() + .0 + .try_into() + .unwrap(); + + // Generate sequencer commitment + for _ in 0..max_l2_blocks_per_commitment { + sequencer.client.send_publish_batch_request().await?; + } + + da.wait_mempool_len(2, None).await?; + da.generate(DEFAULT_FINALITY_DEPTH).await?; + let finalized_height = da.get_finalized_height(None).await?; + + full_node + .wait_for_l2_height(max_l2_blocks_per_commitment, None) + .await?; + full_node.wait_for_l1_height(finalized_height, None).await?; + + let commitment = full_node + .client + .http_client() + .get_sequencer_commitment_by_index(U32::from(1)) + .await? + .map(|c| SequencerCommitment { + merkle_root: c.merkle_root, + l2_end_block_number: c.l2_end_block_number.to::(), + index: c.index.to::(), + }) + .unwrap(); + + let commitment_state_root = sequencer + .client + .http_client() + .get_l2_block_by_number(U64::from(commitment.l2_end_block_number)) + .await? + .unwrap() + .header + .state_root; + + self.test_batch_prover_da_job_recovery( + da, + batch_prover, + genesis_state_root, + finalized_height, + &commitment, + commitment_state_root, + ) + .await?; + + Ok(()) + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_batch_prover_job_service_recovery() -> Result<()> { + TestCaseRunner::new(BatchProverRecoveryJobServiceTest {}) + .set_citrea_path(get_citrea_path()) + .run() + .await +} diff --git a/bin/citrea/tests/bitcoin/da_queue.rs b/bin/citrea/tests/bitcoin/da_queue.rs index 960c73dace..771402e863 100644 --- a/bin/citrea/tests/bitcoin/da_queue.rs +++ b/bin/citrea/tests/bitcoin/da_queue.rs @@ -14,8 +14,8 @@ use citrea_e2e::test_case::{TestCase, TestCaseRunner}; use citrea_e2e::Result; use reth_tasks::TaskManager; use sov_ledger_rpc::LedgerRpcClient; -use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; -use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::da::SequencerCommitment; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use super::light_client_test::create_random_state_diff; use super::{get_citrea_cli_path, get_citrea_path}; @@ -60,50 +60,39 @@ impl DaTransactionQueueingTest { // Fill mempool for i in 1..=3 { da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::ZKProof( + verifiable_100kb_batch_proof.clone(), + )) .await?; da.wait_mempool_len(8 * i, None).await?; } da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone()), - 1.0, - ) + .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await?; // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit // The three first proofs should hit the mempool + 1 chunk da.wait_mempool_len(8 * 3 + 2, None).await?; + assert_eq!(da.get_raw_mempool().await?.len(), 26); - // Assert that all queued txs are monitored + // Assert that all sent txs are monitored let monitored_txs = da_service.monitoring.get_monitored_txs().await; - assert_eq!(monitored_txs.len(), 32); + assert_eq!(monitored_txs.len(), 26); // Try to send when queue is already filled up. // This is to test that utxos is correctly selected and that it's doesn't hang on waiting for list of queued txids to be returned let res = da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await; - assert!(matches!(res, Err(BitcoinServiceError::QueueNotEmpty))); + assert!(matches!( + res, + Err(BitcoinServiceError::PreviousJobInProgress) + )); - // Send transaction hangs until a new block is detected - // Tests that transactions properly waits for block notification - tokio::select! { - _ = tokio::time::sleep(std::time::Duration::from_secs(2)) => { - da.generate(1).await?; - } - _ = da_service.send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) => { - } - } + da.generate(1).await?; // We mine the first three proofs + the 1 chunk pair and make sure that the remaining chunks and aggregate // and the extra proof is properly queued and sent on next block when mempool size is freed @@ -117,6 +106,15 @@ impl DaTransactionQueueingTest { assert_eq!(relevant_txs.len(), 13); + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + // Send additional proof and make sure it doesn't hit PreviousJobInProgress error + let res = da_service + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) + .await; + + assert!(res.is_ok()); + + assert_eq!(da.get_raw_mempool().await?.len(), 8 + 6); // Remaining chunks and aggregate + extra queued proof should now hit the mempool da.wait_mempool_len(8 + 6, None).await?; assert_eq!(da.get_raw_mempool().await?.len(), 8 + 6); @@ -165,25 +163,22 @@ impl DaTransactionQueueingTest { // This over the mempool limit proof should be accepted and split up over multiple blocks let res = da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_400kb_batch_proof.clone()), - 1.0, - ) + .send_transaction(DaTxRequest::ZKProof(verifiable_400kb_batch_proof.clone())) .await; assert!(res.is_ok()); // Queue is already not empty and proof cannot be sent. let res = da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_400kb_batch_proof), 1.0) + .send_transaction(DaTxRequest::ZKProof(verifiable_400kb_batch_proof)) .await; assert!(res.is_err()); da.wait_mempool_len(18, None).await?; assert_eq!(da.get_raw_mempool().await?.len(), 18); - // Assert that all queued txs are monitored + // Assert that all sent txs are monitored let monitored_txs = da_service.monitoring.get_monitored_txs().await; - assert_eq!(monitored_txs.len(), 64); + assert_eq!(monitored_txs.len(), 58); da.generate(1).await?; // Assert that all chunks were mined and mempool space is freed @@ -351,7 +346,7 @@ impl TestCase for DaTransactionQueueingTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_queue_da_transactions() -> Result<()> { TestCaseRunner::new(DaTransactionQueueingTest { task_manager: TaskManager::current(), @@ -398,19 +393,16 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { // Fill mempool for i in 1..=3 { da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::ZKProof( + verifiable_100kb_batch_proof.clone(), + )) .await?; + da.wait_mempool_len(8 * i, None).await?; } da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone()), - 1.0, - ) + .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await?; // Last tx chunk should hit mempool policy `DEFAULT_DESCENDANT_SIZE_LIMIT_KVB` limit @@ -418,27 +410,24 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { da.wait_mempool_len(8 * 3 + 2, None).await?; assert_eq!(da.get_raw_mempool().await?.len(), 26); - // Assert that all queued txs are monitored + // Assert that all sent txs are monitored let monitored_txs = da_service.monitoring.get_monitored_txs().await; - assert_eq!(monitored_txs.len(), 32); + assert_eq!(monitored_txs.len(), 26); // Try to send when queue is already filled up. // This is to test that utxos is correctly selected and that it's doesn't hang on waiting for list of queued txids to be returned let res = da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone()), - 1.0, - ) + .send_transaction(DaTxRequest::ZKProof(verifiable_100kb_batch_proof.clone())) .await; assert!(res.is_ok()); - let monitored_txs = da_service.monitoring.get_monitored_txs().await; - assert_eq!(monitored_txs.len(), 40); - // Txs starting from a new chain should be accepted to mempool da.wait_mempool_len(8 * 3 + 2 + 8, None).await?; + let monitored_txs = da_service.monitoring.get_monitored_txs().await; + assert_eq!(monitored_txs.len(), 34); + // We mine the first three proofs + the 1 chunk pair + the extra proof starting another UTXO chain // and make sure that the remaining chunks and aggregate and sent on next block when mempool size is freed // Assert that all chunks were mined and mempool space is freed @@ -500,27 +489,25 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { // This over the mempool limit proof should be accepted and split up over multiple blocks let res = da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(verifiable_400kb_batch_proof.clone()), - 1.0, - ) + .send_transaction(DaTxRequest::ZKProof(verifiable_400kb_batch_proof.clone())) .await; assert!(res.is_ok()); // Should be able to send another proof that is also split up over multiple blocks let res = da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_400kb_batch_proof), 1.0) + .send_transaction(DaTxRequest::ZKProof(verifiable_400kb_batch_proof)) .await; assert!(res.is_ok()); da.wait_mempool_len(18 * 2, None).await?; assert_eq!(da.get_raw_mempool().await?.len(), 18 * 2); - // Assert that all queued txs are monitored + // Assert that all sent txs are monitored let monitored_txs = da_service.monitoring.get_monitored_txs().await; - assert_eq!(monitored_txs.len(), 88); + assert_eq!(monitored_txs.len(), 76); da.generate(1).await?; + // Assert that all chunks were mined and mempool space is freed assert_eq!(da.get_raw_mempool().await?.len(), 0); @@ -534,8 +521,10 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { let rollback_first_hash = hash; da.wait_mempool_len(6 * 2, None).await?; + assert_eq!(da.get_raw_mempool().await?.len(), 6 * 2); da.generate(1).await?; + // Assert that all chunks and aggregate were mined assert_eq!(da.get_raw_mempool().await?.len(), 0); @@ -556,6 +545,7 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { let dropped_txs = &da.get_raw_mempool().await?[2..]; da.invalidate_block(&rollback_first_hash).await?; + // Should be (6 + 18) * 2 if all mined txs were restored to mempool but 5 * 2 txs are dropped due to being over mempool policy limit assert_eq!(da.get_raw_mempool().await?.len(), (18 + 1) * 2); let remaining_txs = da.get_raw_mempool().await?; @@ -566,6 +556,7 @@ impl DaTransactionQueueingUtxoSelectionModeOldestTest { // Make sure txs are rebroadcasted from monitoring service da.wait_mempool_len(5 * 2, None).await?; + let raw_mempool = da.get_raw_mempool().await?; assert_eq!(dropped_txs, raw_mempool); @@ -641,6 +632,7 @@ impl TestCase for DaTransactionQueueingUtxoSelectionModeOldestTest { } da.wait_mempool_len(2, None).await?; + da.generate(DEFAULT_FINALITY_DEPTH).await?; let finalized_height = da.get_finalized_height(None).await?; @@ -688,11 +680,12 @@ impl TestCase for DaTransactionQueueingUtxoSelectionModeOldestTest { commitment_1_state_root, ) .await?; + Ok(()) } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_queue_da_transactions_oldest_mode() -> Result<()> { TestCaseRunner::new(DaTransactionQueueingUtxoSelectionModeOldestTest { task_manager: TaskManager::current(), diff --git a/bin/citrea/tests/bitcoin/full_node.rs b/bin/citrea/tests/bitcoin/full_node.rs index d7e3bd942e..84803a8ec0 100644 --- a/bin/citrea/tests/bitcoin/full_node.rs +++ b/bin/citrea/tests/bitcoin/full_node.rs @@ -22,8 +22,9 @@ use risc0_zkvm::{FakeReceipt, InnerReceipt, MaybePruned, ReceiptClaim}; use sov_db::schema::types::L2HeightAndIndex; use sov_ledger_rpc::LedgerRpcClient; use sov_modules_api::BatchProofCircuitOutputV3; -use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; +use sov_rollup_interface::da::SequencerCommitment; use sov_rollup_interface::rpc::block::L2BlockResponse; +use sov_rollup_interface::services::da::DaTxRequest; use sov_rollup_interface::zk::batch_proof::output::{BatchProofCircuitOutput, CumulativeStateDiff}; use tokio::time::sleep; @@ -130,7 +131,7 @@ impl TestCase for PreStateRootMismatchTest { // Send the first proof prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(proof), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(proof)) .await .unwrap(); @@ -190,7 +191,7 @@ impl TestCase for PreStateRootMismatchTest { // Send the invalid proof prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(invalid_proof), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(invalid_proof)) .await .unwrap(); @@ -240,7 +241,7 @@ impl TestCase for PreStateRootMismatchTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_pre_state_root_mismatch() -> Result<()> { TestCaseRunner::new(PreStateRootMismatchTest { task_manager: TaskManager::current(), @@ -327,10 +328,7 @@ impl TestCase for SequencerCommitmentHashMismatchTest { // Send the `correct_commitment` so it's stored and will trigger the pre-hash mismatch against `wrong_commitment` sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(correct_commitment.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(correct_commitment.clone())) .await .unwrap(); @@ -384,7 +382,7 @@ impl TestCase for SequencerCommitmentHashMismatchTest { None, ); prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(fake_proof), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(fake_proof)) .await .unwrap(); @@ -418,7 +416,7 @@ impl TestCase for SequencerCommitmentHashMismatchTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_sequencer_commitment_hash_mismatch() -> Result<()> { TestCaseRunner::new(SequencerCommitmentHashMismatchTest { task_manager: TaskManager::current(), @@ -473,10 +471,9 @@ impl TestCase for PendingCommitmentHaltingErrorTest { }; bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(wrong_merkle_root_commitment.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + wrong_merkle_root_commitment.clone(), + )) .await .unwrap(); @@ -545,7 +542,7 @@ impl TestCase for PendingCommitmentHaltingErrorTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_halting_pending_commitment_merkle_root_mismatch() -> Result<()> { TestCaseRunner::new(PendingCommitmentHaltingErrorTest { task_manager: TaskManager::current(), @@ -937,10 +934,9 @@ impl TestCase for OutOfOrderCommitmentsTest { // Send the zero index commitment first, should be ignored bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(zero_index_commitment.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + zero_index_commitment.clone(), + )) .await .unwrap(); @@ -963,10 +959,7 @@ impl TestCase for OutOfOrderCommitmentsTest { // Send the second commitment first bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(second_commitment.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(second_commitment.clone())) .await .unwrap(); @@ -990,10 +983,7 @@ impl TestCase for OutOfOrderCommitmentsTest { // Send the first commitment bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(first_commitment.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(first_commitment.clone())) .await .unwrap(); @@ -1026,7 +1016,7 @@ impl TestCase for OutOfOrderCommitmentsTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_out_of_order_commitments() -> Result<()> { TestCaseRunner::new(OutOfOrderCommitmentsTest { task_manager: TaskManager::current(), @@ -1123,10 +1113,7 @@ impl TestCase for ConflictingCommitmentsTest { // Send commitment A bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment_a.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment_a.clone())) .await .unwrap(); @@ -1148,10 +1135,9 @@ impl TestCase for ConflictingCommitmentsTest { // Send conflicting commitment with different merkle root, should be ignored bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(conflicting_commitment_different_root.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + conflicting_commitment_different_root.clone(), + )) .await .unwrap(); @@ -1174,10 +1160,7 @@ impl TestCase for ConflictingCommitmentsTest { // Send conflicting commitment B bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment_b.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment_b.clone())) .await .unwrap(); @@ -1222,10 +1205,7 @@ impl TestCase for ConflictingCommitmentsTest { // Send commitment C that follows A bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment_c.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment_c.clone())) .await .unwrap(); @@ -1252,7 +1232,7 @@ impl TestCase for ConflictingCommitmentsTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_conflicting_commitments() -> Result<()> { TestCaseRunner::new(ConflictingCommitmentsTest { task_manager: TaskManager::current(), @@ -1502,7 +1482,7 @@ impl TestCase for OutOfRangeProofTest { // Send the proof first. It should be discard as none of its commitments exist prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(proof1.clone()), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(proof1.clone())) .await .unwrap(); @@ -1523,10 +1503,7 @@ impl TestCase for OutOfRangeProofTest { ); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment1.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment1.clone())) .await .unwrap(); @@ -1555,10 +1532,7 @@ impl TestCase for OutOfRangeProofTest { assert!(proven_height.is_none(), "Proof should have been discarded"); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment2.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment2.clone())) .await .unwrap(); @@ -1631,18 +1605,12 @@ impl TestCase for OutOfRangeProofTest { full_node.start(None, None).await?; sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment1.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment1.clone())) .await .unwrap(); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment2.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment2.clone())) .await .unwrap(); @@ -1665,7 +1633,7 @@ impl TestCase for OutOfRangeProofTest { // Send the proof first. It should be processed as its commitments exist prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(proof1), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(proof1)) .await .unwrap(); @@ -1687,18 +1655,12 @@ impl TestCase for OutOfRangeProofTest { // Send commitments for proof 2 and proof 3 sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment3.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment3.clone())) .await .unwrap(); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment4.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment4.clone())) .await .unwrap(); @@ -1750,7 +1712,7 @@ impl TestCase for OutOfRangeProofTest { ); // Send the third proof first. It should be set as pending as its commitments exist but it's starting commitment index is not proven proof last commitment index + 1 prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(proof3), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(proof3)) .await .unwrap(); @@ -1808,7 +1770,7 @@ impl TestCase for OutOfRangeProofTest { // Now send the second proof. It should be processed and trigger a processing of pending proof3 prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(proof2), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(proof2)) .await .unwrap(); @@ -1851,7 +1813,7 @@ impl TestCase for OutOfRangeProofTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_out_of_range_proof() -> Result<()> { TestCaseRunner::new(OutOfRangeProofTest { task_manager: TaskManager::current(), @@ -2035,10 +1997,7 @@ impl TestCase for OverlappingProofRangesTest { full_node.start(None, None).await?; sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment1.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment1.clone())) .await .unwrap(); @@ -2069,18 +2028,12 @@ impl TestCase for OverlappingProofRangesTest { .state_root; sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment2.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment2.clone())) .await .unwrap(); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment3.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment3.clone())) .await .unwrap(); @@ -2183,34 +2136,22 @@ impl TestCase for OverlappingProofRangesTest { // Send all 4 commitments in order sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment1.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment1.clone())) .await .unwrap(); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment2.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment2.clone())) .await .unwrap(); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment3.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment3.clone())) .await .unwrap(); sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment4.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment4.clone())) .await .unwrap(); @@ -2255,7 +2196,7 @@ impl TestCase for OverlappingProofRangesTest { // Send proof_a over commitments [1,2,3] prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(proof_a.clone()), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(proof_a.clone())) .await .unwrap(); @@ -2341,7 +2282,7 @@ impl TestCase for OverlappingProofRangesTest { // Send proof_b with overlapping range of [2,3,4] prover_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(proof_b.clone()), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(proof_b.clone())) .await .unwrap(); @@ -2405,7 +2346,7 @@ impl TestCase for OverlappingProofRangesTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_overlapping_proof_ranges() -> Result<()> { TestCaseRunner::new(OverlappingProofRangesTest { task_manager: TaskManager::current(), @@ -2546,10 +2487,7 @@ impl TestCase for UnsyncedCommitmentL2RangeTest { }; sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment_1.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment_1.clone())) .await .unwrap(); @@ -2592,10 +2530,7 @@ impl TestCase for UnsyncedCommitmentL2RangeTest { /*------- */ sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment_2.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment_2.clone())) .await .unwrap(); @@ -2637,10 +2572,7 @@ impl TestCase for UnsyncedCommitmentL2RangeTest { /*------- */ sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(commitment_3.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment_3.clone())) .await .unwrap(); @@ -2825,7 +2757,7 @@ impl TestCase for UnsyncedCommitmentL2RangeTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_unsynced_commitment_l2_range_test() -> Result<()> { TestCaseRunner::new(UnsyncedCommitmentL2RangeTest { task_manager: TaskManager::current(), @@ -3542,7 +3474,8 @@ impl TestCase for FullNodeLcpChunkProofTest { Ok(()) } } -#[tokio::test] + +#[tokio::test(flavor = "multi_thread")] async fn test_full_node_lcp_chunk_proof() -> Result<()> { TestCaseRunner::new(FullNodeLcpChunkProofTest { task_manager: TaskManager::current(), @@ -3625,10 +3558,7 @@ impl TestCase for FullNodeL1SyncHaltOnMerkleRootMismatch { .await; sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(correct_commitment), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(correct_commitment)) .await .unwrap(); @@ -3653,10 +3583,9 @@ impl TestCase for FullNodeL1SyncHaltOnMerkleRootMismatch { }; sequencer_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(wrong_merkle_root_commitment), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + wrong_merkle_root_commitment, + )) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -3724,7 +3653,7 @@ impl TestCase for FullNodeL1SyncHaltOnMerkleRootMismatch { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_full_node_l1_sync_halt_on_merkle_root_mismatch() -> Result<()> { TestCaseRunner::new(FullNodeL1SyncHaltOnMerkleRootMismatch { task_manager: TaskManager::current(), diff --git a/bin/citrea/tests/bitcoin/light_client_test.rs b/bin/citrea/tests/bitcoin/light_client_test.rs index c26e32bb2f..60fa6f4251 100644 --- a/bin/citrea/tests/bitcoin/light_client_test.rs +++ b/bin/citrea/tests/bitcoin/light_client_test.rs @@ -32,11 +32,10 @@ use reth_tasks::TaskManager; use risc0_zkvm::{FakeReceipt, InnerReceipt, MaybePruned, ReceiptClaim}; use sov_modules_api::BlobReaderTrait; use sov_rollup_interface::da::{ - BatchProofMethodId, BatchProofMethodIdBody, DaTxRequest, DaVerifier, DataOnDa, - SequencerCommitment, + BatchProofMethodId, BatchProofMethodIdBody, DaVerifier, DataOnDa, SequencerCommitment, }; use sov_rollup_interface::rpc::BatchProofMethodIdRpcResponse; -use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use sov_rollup_interface::zk::batch_proof::output::v3::BatchProofCircuitOutputV3; use sov_rollup_interface::zk::batch_proof::output::{BatchProofCircuitOutput, CumulativeStateDiff}; use sov_rollup_interface::zk::ProvingSessionInfo; @@ -685,13 +684,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateTest { let signatures_with_index = create_valid_signatures(&signers, &prehash); bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body, - signatures_with_index, - }), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body, + signatures_with_index, + })) .await .unwrap(); @@ -770,7 +766,7 @@ impl TestCase for LightClientBatchProofMethodIdUpdateTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_light_client_batch_proof_method_id_update() -> Result<()> { TestCaseRunner::new(LightClientBatchProofMethodIdUpdateTest { task_manager: TaskManager::current(), @@ -913,13 +909,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { let signatures_with_index = create_valid_signatures(&signers, &prehash); bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body.clone(), - signatures_with_index, - }), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body.clone(), + signatures_with_index, + })) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -954,13 +947,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { signatures_with_index[0].0[0] ^= 0xFF; bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body2.clone(), - signatures_with_index, - }), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body2.clone(), + signatures_with_index, + })) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -995,13 +985,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { // Corrupt one signature signatures_with_index[0].1 = signatures_with_index[2].1; bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body3.clone(), - signatures_with_index, - }), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body3.clone(), + signatures_with_index, + })) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -1035,13 +1022,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { // Corrupt one signature signatures_with_index[2].1 = 5; // out of bounds bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body3.clone(), - signatures_with_index, - }), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body3.clone(), + signatures_with_index, + })) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -1079,13 +1063,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { signatures_with_index[2].1 = tmp; bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body3.clone(), - signatures_with_index, - }), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body3.clone(), + signatures_with_index, + })) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -1115,13 +1096,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { let prehash4 = eip191_hash_message(msg4.as_slice()); let signatures_with_index = create_valid_signatures(&signers, &prehash4); bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body4.clone(), - signatures_with_index, - }), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body4.clone(), + signatures_with_index, + })) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -1155,13 +1133,10 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { signatures_with_index.swap(0, 2); bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::BatchProofMethodId(BatchProofMethodId { - body: method_id_body5.clone(), - signatures_with_index, - }), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(BatchProofMethodId { + body: method_id_body5.clone(), + signatures_with_index, + })) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -1184,7 +1159,7 @@ impl TestCase for LightClientBatchProofMethodIdUpdateSecurityCouncilTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_light_client_batch_proof_method_id_update_security_council() -> Result<()> { TestCaseRunner::new(LightClientBatchProofMethodIdUpdateSecurityCouncilTest { task_manager: TaskManager::current(), @@ -1280,10 +1255,9 @@ impl TestCase for LightClientUnverifiableBatchProofTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); @@ -1294,10 +1268,9 @@ impl TestCase for LightClientUnverifiableBatchProofTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment_2.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment_2.clone(), + )) .await .unwrap(); @@ -1308,10 +1281,9 @@ impl TestCase for LightClientUnverifiableBatchProofTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment_3.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment_3.clone(), + )) .await .unwrap(); @@ -1322,10 +1294,9 @@ impl TestCase for LightClientUnverifiableBatchProofTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment_4.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment_4.clone(), + )) .await .unwrap(); @@ -1345,7 +1316,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { None, ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_batch_proof)) .await .unwrap(); @@ -1360,7 +1331,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { Some(fake_sequencer_commitment_2.serialize_and_calculate_sha_256()), ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_batch_proof)) .await .unwrap(); @@ -1376,7 +1347,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { Some(fake_sequencer_commitment_3.serialize_and_calculate_sha_256()), ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(unparsable_batch_proof), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(unparsable_batch_proof)) .await .unwrap(); @@ -1391,7 +1362,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { Some(fake_sequencer_commitment.serialize_and_calculate_sha_256()), ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_batch_proof)) .await .unwrap(); @@ -1408,7 +1379,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { Some(fake_sequencer_commitment_3.serialize_and_calculate_sha_256()), ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(unverifiable_batch_proof), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(unverifiable_batch_proof)) .await .unwrap(); @@ -1444,7 +1415,7 @@ impl TestCase for LightClientUnverifiableBatchProofTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_light_client_unverifiable_batch_proof() -> Result<()> { TestCaseRunner::new(LightClientUnverifiableBatchProofTest { task_manager: TaskManager::current(), @@ -1520,10 +1491,9 @@ impl TestCase for VerifyChunkedTxsInLightClient { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); @@ -1534,10 +1504,9 @@ impl TestCase for VerifyChunkedTxsInLightClient { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment2.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment2.clone(), + )) .await .unwrap(); @@ -1548,10 +1517,9 @@ impl TestCase for VerifyChunkedTxsInLightClient { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment3.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment3.clone(), + )) .await .unwrap(); @@ -1604,7 +1572,7 @@ impl TestCase for VerifyChunkedTxsInLightClient { ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_100kb_batch_proof), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_100kb_batch_proof)) .await .unwrap(); @@ -1665,7 +1633,7 @@ impl TestCase for VerifyChunkedTxsInLightClient { ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_130kb_batch_proof), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_130kb_batch_proof)) .await .unwrap(); @@ -1777,10 +1745,7 @@ impl TestCase for VerifyChunkedTxsInLightClient { Some(fake_sequencer_commitment2.serialize_and_calculate_sha_256()), ); let _ = bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::ZKProof(unverifiable_100kb_batch_proof), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::ZKProof(unverifiable_100kb_batch_proof)) .await .unwrap(); @@ -1822,7 +1787,7 @@ impl TestCase for VerifyChunkedTxsInLightClient { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_verify_chunked_txs_in_light_client() -> Result<()> { TestCaseRunner::new(VerifyChunkedTxsInLightClient { task_manager: TaskManager::current(), @@ -1895,10 +1860,9 @@ impl TestCase for UnchainedBatchProofsTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); @@ -1909,10 +1873,9 @@ impl TestCase for UnchainedBatchProofsTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment2.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment2.clone(), + )) .await .unwrap(); @@ -1923,10 +1886,9 @@ impl TestCase for UnchainedBatchProofsTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment3.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment3.clone(), + )) .await .unwrap(); @@ -1937,10 +1899,9 @@ impl TestCase for UnchainedBatchProofsTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment4.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment4.clone(), + )) .await .unwrap(); @@ -2019,17 +1980,17 @@ impl TestCase for UnchainedBatchProofsTest { ); bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp1), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp1)) .await .unwrap(); bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp2), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp2)) .await .unwrap(); bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp3), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp3)) .await .unwrap(); @@ -2057,7 +2018,7 @@ impl TestCase for UnchainedBatchProofsTest { assert_eq!(lcp_output.last_sequencer_commitment_index, U32::from(1)); bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp4), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp4)) .await .unwrap(); @@ -2087,7 +2048,7 @@ impl TestCase for UnchainedBatchProofsTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_unchained_batch_proofs_in_light_client() -> Result<()> { TestCaseRunner::new(UnchainedBatchProofsTest { task_manager: TaskManager::current(), @@ -2159,10 +2120,9 @@ impl TestCase for UnknownL1HashBatchProofTest { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -2205,7 +2165,7 @@ impl TestCase for UnknownL1HashBatchProofTest { ); bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp)) .await .unwrap(); @@ -2237,7 +2197,7 @@ impl TestCase for UnknownL1HashBatchProofTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_unknown_l1_hash_batch_proof_in_light_client() -> Result<()> { TestCaseRunner::new(UnknownL1HashBatchProofTest { task_manager: TaskManager::current(), @@ -2312,10 +2272,9 @@ impl TestCase for ChainProofByCommitmentIndex { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); @@ -2326,10 +2285,9 @@ impl TestCase for ChainProofByCommitmentIndex { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment2.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment2.clone(), + )) .await .unwrap(); @@ -2340,10 +2298,9 @@ impl TestCase for ChainProofByCommitmentIndex { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment3.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment3.clone(), + )) .await .unwrap(); @@ -2396,7 +2353,7 @@ impl TestCase for ChainProofByCommitmentIndex { ); bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp)) .await .unwrap(); @@ -2415,7 +2372,7 @@ impl TestCase for ChainProofByCommitmentIndex { ); bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp)) .await .unwrap(); @@ -2453,7 +2410,7 @@ impl TestCase for ChainProofByCommitmentIndex { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_chain_proof_by_commitment_index() -> Result<()> { TestCaseRunner::new(ChainProofByCommitmentIndex { task_manager: TaskManager::current(), @@ -2564,7 +2521,7 @@ impl TestCase for ProofWithMissingCommitment { ); bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp)) .await .unwrap(); @@ -2602,7 +2559,7 @@ impl TestCase for ProofWithMissingCommitment { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_proof_with_missing_commitment_is_discarded() -> Result<()> { TestCaseRunner::new(ProofWithMissingCommitment { task_manager: TaskManager::current(), @@ -2688,10 +2645,9 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { }; let _ = malicious_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); @@ -2733,7 +2689,7 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { ); batch_prover_bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp1), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp1)) .await .unwrap(); @@ -2763,10 +2719,9 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { // Now send with the correct da service let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); @@ -2793,7 +2748,7 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { ); batch_prover_bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp1), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp1)) .await .unwrap(); @@ -2830,10 +2785,9 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { // Now send with the correct da service let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment2.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment2.clone(), + )) .await .unwrap(); @@ -2860,7 +2814,7 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { ); malicious_bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp2.clone()), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp2.clone())) .await .unwrap(); @@ -2890,7 +2844,7 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { // Now send batch proof with the correct da pub key and expect it to transition batch_prover_bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(bp2.clone()), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(bp2.clone())) .await .unwrap(); @@ -2921,7 +2875,7 @@ impl TestCase for ProofAndCommitmentWithWrongDaPubkey { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_proof_and_commitment_with_wrong_da_pubkey() -> Result<()> { TestCaseRunner::new(ProofAndCommitmentWithWrongDaPubkey { task_manager: TaskManager::current(), @@ -3025,10 +2979,9 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment.clone(), + )) .await .unwrap(); @@ -3039,10 +2992,9 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { }; let _ = sequencer_bitcoin_da_service - .send_transaction_with_fee_rate( - DaTxRequest::SequencerCommitment(fake_sequencer_commitment_2.clone()), - 1.0, - ) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment( + fake_sequencer_commitment_2.clone(), + )) .await .unwrap(); @@ -3062,7 +3014,7 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { None, ); let _ = batch_prover_bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(verifiable_batch_proof)) .await .unwrap(); @@ -3101,7 +3053,7 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { ), ); let _ = batch_prover_bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(wrong_prev_hash_batch_proof), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(wrong_prev_hash_batch_proof)) .await .unwrap(); @@ -3133,7 +3085,7 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { Some(fake_sequencer_commitment.serialize_and_calculate_sha_256()), ); let _ = batch_prover_bitcoin_da_service - .send_transaction_with_fee_rate(DaTxRequest::ZKProof(correct_prev_hash_proof), 1.0) + .send_transaction_and_wait(DaTxRequest::ZKProof(correct_prev_hash_proof)) .await .unwrap(); @@ -3160,7 +3112,7 @@ impl TestCase for ProofWithWrongPreviousCommitmentHash { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_proof_with_wrong_previous_commitment_hash() -> Result<()> { TestCaseRunner::new(ProofWithWrongPreviousCommitmentHash { task_manager: TaskManager::current(), @@ -3387,6 +3339,8 @@ impl UndecompressableBlobTest { 2.0, bitcoin::Network::Regtest, REVEAL_TX_PREFIX, + vec![], + vec![], )? else { panic!("Wrong DaTxs kind"); diff --git a/bin/citrea/tests/bitcoin/mod.rs b/bin/citrea/tests/bitcoin/mod.rs index 1dd898a183..fc4c4988c0 100644 --- a/bin/citrea/tests/bitcoin/mod.rs +++ b/bin/citrea/tests/bitcoin/mod.rs @@ -15,6 +15,8 @@ pub mod bitcoin_service; pub mod bitcoin_test; pub mod bitcoin_verifier; #[cfg(feature = "testing")] +pub mod da_job; +#[cfg(feature = "testing")] pub mod da_queue; pub mod fork; #[cfg(feature = "testing")] diff --git a/bin/citrea/tests/bitcoin/sequencer_commitments.rs b/bin/citrea/tests/bitcoin/sequencer_commitments.rs index 2f0b9ef3b1..c8c96f4344 100644 --- a/bin/citrea/tests/bitcoin/sequencer_commitments.rs +++ b/bin/citrea/tests/bitcoin/sequencer_commitments.rs @@ -20,8 +20,9 @@ use reth_tasks::TaskManager; use rs_merkle::algorithms::Sha256; use rs_merkle::MerkleTree; use sov_ledger_rpc::LedgerRpcClient; -use sov_rollup_interface::da::{BlobReaderTrait, DaTxRequest, DataOnDa, SequencerCommitment}; +use sov_rollup_interface::da::{BlobReaderTrait, DataOnDa, SequencerCommitment}; use sov_rollup_interface::rpc::SequencerCommitmentResponse; +use sov_rollup_interface::services::da::DaTxRequest; use tokio::time::sleep; use super::get_citrea_path; @@ -370,7 +371,7 @@ impl TestCase for SequencerCommitmentsFromDaTest { index: 1, }; da_service - .send_transaction_with_fee_rate(DaTxRequest::SequencerCommitment(commitment), 1.0) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment)) .await .unwrap(); da.wait_mempool_len(2, None).await?; @@ -383,7 +384,7 @@ impl TestCase for SequencerCommitmentsFromDaTest { index: 2, }; da_service - .send_transaction_with_fee_rate(DaTxRequest::SequencerCommitment(commitment), 1.0) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment)) .await .unwrap(); // Restart sequencer, it should fetch commitment with index 1 and 2 @@ -448,7 +449,7 @@ impl TestCase for SequencerCommitmentsFromDaTest { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn test_sequencer_commitments_from_da_layer() -> Result<()> { TestCaseRunner::new(SequencerCommitmentsFromDaTest { task_manager: TaskManager::current(), diff --git a/bin/citrea/tests/bitcoin/utils.rs b/bin/citrea/tests/bitcoin/utils.rs index 3756c73fc4..8530fd04dc 100644 --- a/bin/citrea/tests/bitcoin/utils.rs +++ b/bin/citrea/tests/bitcoin/utils.rs @@ -8,31 +8,43 @@ use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; use anyhow::bail; use bitcoin_da::fee::FeeService; +use bitcoin_da::job::rpc::create_rpc_module as create_da_job_rpc_module; use bitcoin_da::monitoring::{MonitoringConfig, MonitoringService}; use bitcoin_da::network_constants::get_network_constants; +use bitcoin_da::rpc::create_rpc_module as create_da_rpc_module; use bitcoin_da::service::{network_to_bitcoin_network, BitcoinService, BitcoinServiceConfig}; use bitcoin_da::spec::block::BitcoinBlock; use bitcoin_da::spec::RollupParams; use bitcoin_da::utxo_manager::UtxoSelectionMode; use bitcoincore_rpc::{Auth, Client, RpcApi}; use citrea_batch_prover::rpc::BatchProverRpcClient; +use citrea_common::rpc::server::start_rpc_server; +use citrea_common::RpcConfig; use citrea_e2e::bitcoin::BitcoinNode; use citrea_e2e::config::BitcoinConfig; use citrea_e2e::node::{BatchProver, FullNode, NodeKind}; use citrea_e2e::traits::NodeT; +use citrea_light_client_prover::circuit::initial_values::bitcoinda::NIGHTLY_INITIAL_BATCH_PROOF_METHOD_IDS; use citrea_light_client_prover::circuit::{ citrea_network_to_chain_id, SECURITY_COUNCIL_COMPRESSED_PUBKEY_SIZE, SECURITY_COUNCIL_MEMBER_COUNT, }; use citrea_primitives::{MAX_TX_BODY_SIZE, REVEAL_TX_PREFIX}; +use jsonrpsee::http_client::{HttpClient, HttpClientBuilder}; +use jsonrpsee::RpcModule; use reth_tasks::TaskExecutor; +use risc0_zkvm::{FakeReceipt, InnerReceipt, MaybePruned, ReceiptClaim}; +use sov_db::ledger_db::LedgerDB; +use sov_db::rocks_db_config::RocksdbConfig; use sov_ledger_rpc::LedgerRpcClient; +use sov_modules_api::BatchProofCircuitOutputV3; use sov_rollup_interface::da::{ - BatchProofMethodId, BatchProofMethodIdBody, DaTxRequest, SequencerCommitment, + BatchProofMethodId, BatchProofMethodIdBody, SequencerCommitment, SECURITY_COUNCIL_SIGNATURE_SIZE, SECURITY_COUNCIL_SIGNATURE_THRESHOLD, }; use sov_rollup_interface::rpc::{JobRpcResponse, VerifiedBatchProofResponse}; -use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; +use sov_rollup_interface::zk::batch_proof::output::{BatchProofCircuitOutput, CumulativeStateDiff}; use sov_rollup_interface::Network; use tokio::time::sleep; use uuid::Uuid; @@ -111,9 +123,12 @@ pub async fn spawn_bitcoin_da_sequencer_service( config: &BitcoinConfig, dir: PathBuf, ) -> Arc { + let mut sequencer_config = config.clone(); + sequencer_config.data_dir = sequencer_config.data_dir.join("sequencer"); + spawn_bitcoin_da_service( task_executor, - config, + &sequencer_config, dir, DaServiceKeyKind::Sequencer, REVEAL_TX_PREFIX.to_vec(), @@ -128,9 +143,12 @@ pub async fn spawn_bitcoin_da_prover_service( config: &BitcoinConfig, dir: PathBuf, ) -> Arc { + let mut prover_config = config.clone(); + prover_config.data_dir = prover_config.data_dir.join("prover"); + spawn_bitcoin_da_service( task_executor, - config, + &prover_config, dir, DaServiceKeyKind::BatchProver, REVEAL_TX_PREFIX.to_vec(), @@ -140,6 +158,60 @@ pub async fn spawn_bitcoin_da_prover_service( .await } +pub async fn spawn_bitcoin_da_prover_service_with_rpc_server( + task_executor: &TaskExecutor, + config: &BitcoinConfig, + dir: PathBuf, +) -> (Arc, HttpClient) { + let service = spawn_bitcoin_da_service( + task_executor, + config, + dir, + DaServiceKeyKind::BatchProver, + REVEAL_TX_PREFIX.to_vec(), + None, + None, + ) + .await; + + let rpc_config = RpcConfig { + bind_host: "127.0.0.1".into(), + bind_port: 0, + max_connections: 100, + max_request_body_size: 10 * 1024 * 1024, + max_response_body_size: 10 * 1024 * 1024, + batch_requests_limit: 50, + enable_subscriptions: true, + max_subscriptions_per_connection: 100, + trace_chain_block_limit: None, + proving_jobs_limit: 100, + timeout: 30, + enable_js_tracer: true, + api_key: None, + ..Default::default() + }; + + // Add da rpc and da job rpc methods + let mut rpc_methods = RpcModule::new(()); + let da_methods = create_da_rpc_module(service.clone()); + rpc_methods.merge(da_methods).unwrap(); + + let da_methods = create_da_job_rpc_module(service.clone()); + rpc_methods.merge(da_methods).unwrap(); + + let (port_tx, port_rx) = tokio::sync::oneshot::channel(); + start_rpc_server(rpc_config, task_executor, rpc_methods, Some(port_tx)); + + let addr = port_rx.await.unwrap(); + let http_host = format!("http://localhost:{}", addr.port()); + let http_client = HttpClientBuilder::default() + .request_timeout(Duration::from_secs(120)) + .build(http_host) + .unwrap(); + + (service, http_client) +} + #[cfg(feature = "testing")] pub async fn spawn_bitcoin_da_prover_service_with_utxo_selection_mode( task_executor: &TaskExecutor, @@ -161,7 +233,7 @@ pub async fn spawn_bitcoin_da_prover_service_with_utxo_selection_mode( pub async fn spawn_bitcoin_da_service( task_executor: &TaskExecutor, - da_config: &BitcoinConfig, + bitcoin_config: &BitcoinConfig, test_dir: PathBuf, kind: DaServiceKeyKind, reveal_tx_prefix: Vec, @@ -175,26 +247,29 @@ pub async fn spawn_bitcoin_da_service( }; let wallet = wallet.unwrap_or(NodeKind::Bitcoin.to_string()); let da_config = BitcoinServiceConfig { - node_url: format!("http://127.0.0.1:{}/wallet/{}", da_config.rpc_port, wallet), - node_username: da_config.rpc_user.clone(), - node_password: da_config.rpc_password.clone(), + node_url: format!( + "http://127.0.0.1:{}/wallet/{}", + bitcoin_config.rpc_port, wallet + ), + node_username: bitcoin_config.rpc_user.clone(), + node_password: bitcoin_config.rpc_password.clone(), da_private_key: Some(da_private_key), tx_backup_dir: test_dir.join("tx_backup_dir").display().to_string(), monitoring: Some(MonitoringConfig { check_interval: 1, history_limit: 1_000, max_history_size: 200_000_000, - max_rebroadcast_attempts: 5, + max_rebroadcast_attempts: 50, rebroadcast_delay: 1, }), mempool_space_url: None, utxo_selection_mode, rpc_timeout_secs: None, rpc_connect_timeout_secs: None, + max_fee_rate_sat_to_pay: None, + fee_rate_cap_duration_secs: None, }; - let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); - let network = Network::Nightly; let chain_params = RollupParams { reveal_tx_prefix, @@ -230,6 +305,10 @@ pub async fn spawn_bitcoin_da_service( let fee_service = FeeService::new(client.clone(), network, da_config.mempool_space_url.clone()); + let ledger_db_path = bitcoin_config.data_dir.join("da_ledger_db"); + let rocksdb_config = RocksdbConfig::new(&ledger_db_path, None, None); + let ledger_db = LedgerDB::with_config(&rocksdb_config).unwrap(); + let service = Arc::new( BitcoinService::from_config( &da_config, @@ -240,14 +319,14 @@ pub async fn spawn_bitcoin_da_service( monitoring_service, fee_service, true, - tx, + ledger_db, ) .await .unwrap(), ); task_executor - .spawn_with_graceful_shutdown_signal(|tk| service.clone().run_da_queue(rx, block_rx, tk)); + .spawn_with_graceful_shutdown_signal(|tk| service.clone().run_da_queue(block_rx, tk)); service.monitoring.restore().await.unwrap(); task_executor.spawn_with_graceful_shutdown_signal(|tk| Arc::clone(&service.monitoring).run(tk)); @@ -441,9 +520,13 @@ pub async fn generate_mock_txs( let prefix_str = "wrong_prefix"; let wrong_prefix_wallet = PathBuf::from_str(prefix_str).unwrap(); create_and_fund_wallet(prefix_str.to_string(), da_node).await; + + let mut first_config = da_node.config.clone(); + first_config.data_dir = first_config.data_dir.join("1"); + let wrong_prefix_da_service = spawn_bitcoin_da_service( task_executor, - &da_node.config, + &first_config, wrong_prefix_wallet, DaServiceKeyKind::Sequencer, vec![6], @@ -455,9 +538,13 @@ pub async fn generate_mock_txs( let wrong_key_str = "wrong_key"; let wrong_key_wallet = PathBuf::from_str(wrong_key_str).unwrap(); create_and_fund_wallet(wrong_key_str.to_string(), da_node).await; + + let mut second_config = da_node.config.clone(); + second_config.data_dir = second_config.data_dir.join("2"); + let wrong_key_da_service = spawn_bitcoin_da_service( task_executor, - &da_node.config, + &second_config, wrong_key_wallet, DaServiceKeyKind::Other( "E9873D79C6D87DC0FB6A5778633389F4453213303DA61F20BD67FC233AA33263".to_string(), @@ -498,8 +585,9 @@ pub async fn generate_mock_txs( signatures_with_index, }; valid_method_ids.push(method_id.clone()); + da_service - .send_transaction(DaTxRequest::BatchProofMethodId(method_id)) + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(method_id)) .await .expect("Failed to send transaction"); @@ -511,7 +599,7 @@ pub async fn generate_mock_txs( seq_index += 1; valid_commitments.push(commitment.clone()); da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment)) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment)) .await .expect("Failed to send transaction"); @@ -523,7 +611,7 @@ pub async fn generate_mock_txs( seq_index += 1; valid_commitments.push(commitment.clone()); da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment)) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment)) .await .expect("Failed to send transaction"); @@ -532,7 +620,7 @@ pub async fn generate_mock_txs( valid_proofs.push(blob.clone()); da_service - .send_transaction(DaTxRequest::ZKProof(blob)) + .send_transaction_and_wait(DaTxRequest::ZKProof(blob)) .await .expect("Failed to send transaction"); @@ -542,13 +630,13 @@ pub async fn generate_mock_txs( valid_proofs.push(blob.clone()); da_service - .send_transaction(DaTxRequest::ZKProof(blob)) + .send_transaction_and_wait(DaTxRequest::ZKProof(blob)) .await .expect("Failed to send transaction"); // Sequencer commitment with wrong tx prefix wrong_prefix_da_service - .send_transaction(DaTxRequest::SequencerCommitment(SequencerCommitment { + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(SequencerCommitment { merkle_root: [15; 32], index: seq_index, l2_end_block_number: 1268, @@ -561,13 +649,13 @@ pub async fn generate_mock_txs( valid_proofs.push(blob.clone()); da_service - .send_transaction(DaTxRequest::ZKProof(blob)) + .send_transaction_and_wait(DaTxRequest::ZKProof(blob)) .await .expect("Failed to send transaction"); // Sequencer commitment with wrong key and signature wrong_key_da_service - .send_transaction(DaTxRequest::SequencerCommitment(SequencerCommitment { + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(SequencerCommitment { merkle_root: [15; 32], index: seq_index, l2_end_block_number: 1268, @@ -582,7 +670,7 @@ pub async fn generate_mock_txs( }; valid_commitments.push(commitment.clone()); da_service - .send_transaction(DaTxRequest::SequencerCommitment(commitment)) + .send_transaction_and_wait(DaTxRequest::SequencerCommitment(commitment)) .await .expect("Failed to send transaction"); @@ -592,7 +680,7 @@ pub async fn generate_mock_txs( valid_proofs.push(blob.clone()); da_service - .send_transaction(DaTxRequest::ZKProof(blob)) + .send_transaction_and_wait(DaTxRequest::ZKProof(blob)) .await .expect("Failed to send transaction"); @@ -619,7 +707,7 @@ pub async fn generate_mock_txs( }; valid_method_ids.push(method_id.clone()); da_service - .send_transaction(DaTxRequest::BatchProofMethodId(method_id)) + .send_transaction_and_wait(DaTxRequest::BatchProofMethodId(method_id)) .await .expect("Failed to send transaction"); @@ -679,3 +767,58 @@ pub mod macros { pub(crate) use assert_panic; } + +#[allow(clippy::too_many_arguments)] +pub fn create_serialized_fake_receipt_batch_proof_and_serialized_output( + initial_state_root: [u8; 32], + last_l2_height: u64, + state_diff: Option, + malformed_journal: bool, + last_l1_hash_on_bitcoin_light_client_contract: [u8; 32], + sequencer_commitments: Vec, + state_roots_of_seq_comms: Vec<[u8; 32]>, + prev_sequencer_commitment_hash: Option<[u8; 32]>, +) -> (Vec, Vec) { + let method_id = NIGHTLY_INITIAL_BATCH_PROOF_METHOD_IDS.inner()[0].1; + let sequencer_commitment_hashes = sequencer_commitments + .iter() + .map(|c| c.serialize_and_calculate_sha_256()) + .collect::>(); + let previous_commitment_index = if sequencer_commitments[0].index == 1 { + None + } else { + Some(sequencer_commitments[0].index - 1) + }; + let mut state_roots = vec![initial_state_root]; + + // For the sake of easiness of impl tests, we can use merkle root as state root + state_roots.extend(state_roots_of_seq_comms); + + let output_v3 = BatchProofCircuitOutputV3 { + state_roots, + last_l2_height, + final_l2_block_hash: [0u8; 32], + state_diff: state_diff.unwrap_or_default(), + sequencer_commitment_hashes, + last_l1_hash_on_bitcoin_light_client_contract, + sequencer_commitment_index_range: ( + sequencer_commitments[0].index, + sequencer_commitments[sequencer_commitments.len() - 1].index, + ), + previous_commitment_index, + previous_commitment_hash: prev_sequencer_commitment_hash, + }; + let batch_proof_output = BatchProofCircuitOutput::V3(output_v3); + let mut output_serialized = borsh::to_vec(&batch_proof_output).unwrap(); + + // Distorts the output and make it unparsable + if malformed_journal { + output_serialized.push(1u8); + } + + let claim = MaybePruned::Value(ReceiptClaim::ok(method_id, output_serialized.clone())); + let fake_receipt = FakeReceipt::new(claim); + // Receipt with verifiable claim + let receipt = InnerReceipt::Fake(fake_receipt); + (bincode::serialize(&receipt).unwrap(), output_serialized) +} diff --git a/bin/citrea/tests/common/helpers.rs b/bin/citrea/tests/common/helpers.rs index 356e579e19..d4e4dd2611 100644 --- a/bin/citrea/tests/common/helpers.rs +++ b/bin/citrea/tests/common/helpers.rs @@ -145,6 +145,7 @@ pub async fn start_rollup( &rollup_config, sequencer_config.is_some() || rollup_prover_config.is_some(), network.unwrap_or(Network::Nightly), + ledger_db.clone(), ) .await .expect("Dependencies setup should work"); diff --git a/crates/batch-prover/Cargo.toml b/crates/batch-prover/Cargo.toml index cc57e58962..bb728aa32e 100644 --- a/crates/batch-prover/Cargo.toml +++ b/crates/batch-prover/Cargo.toml @@ -63,3 +63,6 @@ tempfile = { workspace = true } [lints] workspace = true + +[features] +testing = [] diff --git a/crates/batch-prover/src/prover.rs b/crates/batch-prover/src/prover.rs index 0c57eadfda..8bfd574782 100644 --- a/crates/batch-prover/src/prover.rs +++ b/crates/batch-prover/src/prover.rs @@ -706,7 +706,7 @@ where // start watching the proving jobs to finish in the background tokio::spawn(async move { - while let Some((job_id, rx)) = proving_jobs.recv().await { + while let Some((proving_job_id, rx)) = proving_jobs.recv().await { let ProofWithDuration { proof, duration, @@ -714,15 +714,19 @@ where } = rx.await.expect("Proof channel should never close"); info!( "Proving job finished {}, took {:?} seconds", - job_id, duration + proving_job_id, duration ); - let output = - extract_proof_output::(&job_id, &proof, &code_commitments_by_spec, network); + let output = extract_proof_output::( + &proving_job_id, + &proof, + &code_commitments_by_spec, + network, + ); // stores proof and marks job as waiting for da ledger_db - .put_proof_by_job_id(job_id, proof.clone(), output.into(), info) + .put_proof_by_job_id(proving_job_id, proof.clone(), output.into(), info) .expect("Should put proof to db"); // Record the proving time metric @@ -733,16 +737,26 @@ where // submit the proof to the DA service in the background tokio::spawn(async move { - let tx_id = prover_service - .submit_proof(proof, job_id) + let (da_job_id, rx) = prover_service + .submit_proof_by_id(proving_job_id) .await .expect("Failed to submit proof"); - info!("Job {} proof sent to DA", job_id); + info!("Job {proving_job_id} proof submitted to DA. Da job id {da_job_id}"); + + ledger_db + .set_da_job_id_by_prover_job_id(proving_job_id, da_job_id) + .expect("Failed to save da job by id"); + + // Todo handle da job sending failure + let txid = rx + .await + .expect("Da job channel should never close") + .unwrap(); // stores tx id and removes job from pending da submission ledger_db - .finalize_proving_job(job_id, tx_id.into()) + .finalize_proving_job(proving_job_id, txid.into()) .expect("Should update proving job tx id"); }); } @@ -808,6 +822,7 @@ where .ledger_db .get_pending_l1_submission_jobs() .expect("Should get pending l1 jobs"); + for job_id in job_ids { if let hash_map::Entry::Vacant(entry) = proofs.entry(job_id) { let stored_proof = self @@ -824,23 +839,48 @@ where } // submit all proofs to da - for (job_id, proof) in proofs { + for (proving_job_id, _) in proofs { let prover_service = self.prover_service.clone(); let ledger_db = self.ledger_db.clone(); - info!("Submitting recovered proof for job {}", job_id); + info!("Submitting recovered proof for job {}", proving_job_id); + + // Recovery on-going in progress proof on DA + let rx = if let Some(da_job_id) = ledger_db + .get_da_job_id_by_prover_job_id(proving_job_id) + .expect("DB call shouldn't fail") + { + info!( + "DA job {} already exists for proving job {}", + da_job_id, proving_job_id + ); + prover_service + .get_existing_da_job_waiter(da_job_id) + .await + .expect("Should recover da job receiver") + } else { + // No on going da job, submit a new one + let (da_job_id, rx) = prover_service + .submit_proof_by_id(proving_job_id) + .await + .expect("Failed to submit proof"); + + ledger_db + .set_da_job_id_by_prover_job_id(proving_job_id, da_job_id) + .expect("Failed to set da job_id"); + info!("Recovered Job {} proof sent to DA", proving_job_id); + rx + }; + // submit in the background tokio::spawn(async move { - let tx_id = prover_service - .submit_proof(proof, job_id) - .await - .expect("Failed to submit transaction"); - info!("Recovered Job {} proof sent to DA", job_id); + // TODO handle failure + let txid = rx.await.unwrap().expect("Failed to submit transaction"); // stores tx id and removes job from pending da submission ledger_db - .finalize_proving_job(job_id, tx_id.into()) + .finalize_proving_job(proving_job_id, txid.into()) .expect("Should update proving job tx id"); - info!("Finalized recovered proving job: {}", job_id); + info!("Finalized recovered proving job: {}", proving_job_id); }); } } @@ -1237,7 +1277,7 @@ fn get_prev_hash_proof( /// /// # Returns /// A `BatchProofCircuitOutput` that contains the extracted output from the proof. -fn extract_proof_output( +pub(crate) fn extract_proof_output( job_id: &Uuid, proof: &Proof, code_commitments_by_spec: &HashMap, diff --git a/crates/batch-prover/src/rpc.rs b/crates/batch-prover/src/rpc.rs index fb4614d678..7e71dbf80e 100644 --- a/crates/batch-prover/src/rpc.rs +++ b/crates/batch-prover/src/rpc.rs @@ -30,12 +30,12 @@ use sov_db::schema::types::job_status::JobStatus; use sov_db::schema::types::{L2BlockNumber, SlotNumber}; use sov_modules_api::{BatchProofCircuitOutputV3, SpecId, Zkvm}; use sov_prover_storage_manager::ProverStorageManager; -use sov_rollup_interface::da::{DaTxRequest, SequencerCommitment}; +use sov_rollup_interface::da::SequencerCommitment; use sov_rollup_interface::rpc::{ BatchProofOutputRpcResponse, BatchProofResponse, JobRpcResponse, SequencerCommitmentResponse, SequencerCommitmentRpcParam, }; -use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use sov_rollup_interface::zk::batch_proof::output::{BatchProofCircuitOutput, CumulativeStateDiff}; use sov_rollup_interface::zk::ProvingSessionInfo; use tokio::sync::{mpsc, oneshot}; @@ -241,6 +241,16 @@ pub trait BatchProverRpc { with_proof: Option, ) -> RpcResult>; + /// Get da job id by job id. + /// + /// # Arguments + /// * `job_id` - The unique identifier of the proving job to retrieve. + /// + /// # Returns + /// An optional `Uuid` for the associated da job. + #[method(name = "getDaJobIdByJobId")] + async fn get_da_job_id_by_job_id(&self, job_id: Uuid) -> RpcResult>; + /// Gets last `count` number of job ids. Returns ids in descending order, so latest job is the first index. /// /// # Arguments @@ -305,6 +315,17 @@ pub trait BatchProverRpc { /// A new `Uuid` representing the retried proving job. #[method(name = "retryProvingJob")] async fn retry_proving_job(&self, job_id: Uuid) -> RpcResult; + + /// Submit a proof with output. Only available with `testing` feature. + /// + /// # Arguments + /// * `proof` - Serialized proof + /// * `output` - Serialized `BatchProofCircuitOutput` + /// + /// # Returns + /// The bitcoin-da job id + #[method(name = "submitProofFromFile")] + async fn submit_proof_with_output(&self, proof: Vec, output: Vec) -> RpcResult; } /// Server implementation of the Batch Prover RPC interface @@ -517,15 +538,20 @@ where let receipt = InnerReceipt::Fake(fake_receipt); let proof = bincode::serialize(&receipt).expect("Receipt serialization cannot fail"); - let tx_id = self + let (_, rx) = self .context .da_service .send_transaction(DaTxRequest::ZKProof(proof.clone())) .await .map_err(internal_rpc_error)?; + let txid = rx + .await + .map_err(internal_rpc_error)? + .map_err(internal_rpc_error)?; + Ok(BatchProofResponse { - l1_tx_id: Some(tx_id.into()), + l1_tx_id: Some(txid.into()), proof, proof_output: StoredBatchProofOutput::from(output).into(), info: None, @@ -741,15 +767,70 @@ where info!("Retried proving job {}, new job id: {}", job_id, new_id); Ok(new_id) } + + async fn get_da_job_id_by_job_id(&self, job_id: Uuid) -> RpcResult> { + self.context + .ledger_db + .get_da_job_id_by_prover_job_id(job_id) + .map_err(internal_rpc_error) + } + + #[cfg(not(feature = "testing"))] + async fn submit_proof_with_output(&self, _proof: Vec, _output: Vec) -> RpcResult { + Err(internal_rpc_error("Unsupported test method")) + } + + #[cfg(feature = "testing")] + async fn submit_proof_with_output(&self, proof: Vec, output: Vec) -> RpcResult { + use sov_rollup_interface::services::da::DaTxRequest; + + let ledger_db = &self.context.ledger_db; + let proving_job_id = Uuid::now_v7(); + info!("Submitting proof with id {proving_job_id}"); + + let output: BatchProofCircuitOutput = borsh::from_slice(&output).unwrap(); + + let commitment_indices = (output.sequencer_commitment_index_range().0 + ..output.sequencer_commitment_index_range().1) + .collect(); + + ledger_db + .insert_new_proving_job(proving_job_id, &commitment_indices) + .expect("Should insert new proving job"); + + ledger_db + .put_proof_by_job_id( + proving_job_id, + proof.clone(), + output.into(), + ProvingSessionInfo::Local(Default::default()), + ) + .expect("Should put proof to db"); + + let (da_job_id, _) = self + .context + .da_service + .send_transaction(DaTxRequest::ZKProof(proof.clone())) + .await + .map_err(internal_rpc_error)?; + + ledger_db + .set_da_job_id_by_prover_job_id(proving_job_id, da_job_id) + .expect("Failed to save da job by id"); + + info!("Submitted proof from file, da job id: {da_job_id}"); + + Ok(da_job_id) + } } -/// Creates an RPC module with fullnode methods +/// Creates an RPC module with batch-prover methods /// /// # Arguments /// * `rpc_context` - Context containing shared data for RPC methods /// /// # Type Parameters -/// * `DB` - Database type implementing NodeLedgerOps +/// * `DB` - Database type implementing BatchProverLedgerOps /// * `Da` - Data availability service type implementing DaService /// * `Vm` - Virtual machine type implementing Zkvm pub fn create_rpc_module( diff --git a/crates/bitcoin-da/Cargo.toml b/crates/bitcoin-da/Cargo.toml index 06938ffb67..064ed5a9b1 100644 --- a/crates/bitcoin-da/Cargo.toml +++ b/crates/bitcoin-da/Cargo.toml @@ -13,11 +13,13 @@ repository = { workspace = true } [dependencies] citrea-common = { path = "../common", optional = true } citrea-primitives = { path = "../primitives" } +sov-db = { path = "../sovereign-sdk/full-node/db/sov-db", optional = true } sov-rollup-interface = { path = "../sovereign-sdk/rollup-interface" } anyhow = { workspace = true } async-trait = { workspace = true, optional = true } backoff = { workspace = true, optional = true } +bincode = { workspace = true, optional = true } bitcoin = { workspace = true } borsh = { workspace = true } crypto-bigint = { workspace = true } @@ -29,6 +31,7 @@ k256 = { workspace = true } lru = { workspace = true, optional = true } metrics = { workspace = true, optional = true } metrics-derive = { workspace = true, optional = true } +parking_lot = { workspace = true, optional = true } reqwest = { workspace = true, optional = true } reth-tasks = { workspace = true, optional = true } secp256k1 = { version = "0.29", optional = true, features = ["rand-std", "std", "global-context"] } @@ -38,6 +41,7 @@ sha2 = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"], optional = true } tracing = { workspace = true, optional = true } +uuid = { workspace = true, optional = true } bitcoincore-rpc = { workspace = true, optional = true } @@ -49,20 +53,24 @@ default = [] native = [ "dep:async-trait", "dep:backoff", + "dep:bincode", + "dep:bitcoincore-rpc", + "dep:citrea-common", "dep:futures", + "dep:jsonrpsee", "dep:lru", - "dep:tokio", "dep:metrics", "dep:metrics-derive", + "dep:parking_lot", "dep:reth-tasks", - "dep:tracing", - "dep:serde_json", - "sov-rollup-interface/native", - "dep:citrea-common", - "dep:bitcoincore-rpc", "dep:reqwest", - "dep:jsonrpsee", "dep:secp256k1", + "dep:serde_json", + "dep:sov-db", + "dep:tokio", + "dep:tracing", + "dep:uuid", + "sov-rollup-interface/native", ] testing = [] diff --git a/crates/bitcoin-da/src/error.rs b/crates/bitcoin-da/src/error.rs index e084ae596a..822c5b5c6d 100644 --- a/crates/bitcoin-da/src/error.rs +++ b/crates/bitcoin-da/src/error.rs @@ -6,6 +6,7 @@ use thiserror::Error; use tokio::task::JoinError; use crate::fee::FeeServiceError; +use crate::job::error::JobServiceError; use crate::monitoring::{MonitorError, TxStatus}; /// The top level error type that can be returned by the `BitcoinService`. @@ -44,9 +45,9 @@ pub enum BitcoinServiceError { /// Cannot bump fee for TX. #[error("Cannot bump fee for TX with status: {0:?}. Transaction must be pending")] WrongStatusForBumping(TxStatus), - /// Tx requested when queue is not empty. - #[error("Cannot create DA transaction while da queue is not empty")] - QueueNotEmpty, + /// Tx request when previous job is not fully sent. + #[error("Cannot create DA transaction while other job is in progress")] + PreviousJobInProgress, /// Transaction rejected by mempool. #[error(transparent)] MempoolRejection(#[from] MempoolRejection), @@ -107,9 +108,26 @@ pub enum BitcoinServiceError { /// Body builders error. #[error("Body builders error: {0}")] TransactionBuilderError(String), + /// Fee cap exceeded + #[error("Fee cap exceeded: current rate {current_rate} sat/vb > max {max_rate} sat/vb (elapsed: {elapsed_secs}s / max: {max_duration_secs}s)")] + FeeCapExceeded { + /// Current fee rate as sat/vb + current_rate: f64, + /// Max fee rate in sat/vb + max_rate: f64, + /// Duration since the transaction has been blocked by max fee rate cap + elapsed_secs: u64, + /// Max duration before sending transaction above max fee rate + max_duration_secs: u64, + }, /// Fee service operation failure. #[error("Fee service error: {0}")] FeeServiceError(#[from] FeeServiceError), + // #[error(transparent)] + // Other(#[from] anyhow::Error), + /// Job service error + #[error("Job service error: {0}")] + JobService(#[from] JobServiceError), } /// Error type for mempool rejections via testmempoolaccept method. diff --git a/crates/bitcoin-da/src/fee.rs b/crates/bitcoin-da/src/fee.rs index 2931de25b3..7d3ee1cc70 100644 --- a/crates/bitcoin-da/src/fee.rs +++ b/crates/bitcoin-da/src/fee.rs @@ -5,7 +5,7 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; -use bitcoin::{Amount, Network, Sequence, Txid}; +use bitcoin::{Amount, Network, Sequence, Transaction, Txid}; use bitcoincore_rpc::json::{ BumpFeeResult, CreateRawTransactionInput, EstimateMode, WalletCreateFundedPsbtOptions, }; @@ -100,6 +100,7 @@ impl FeeService { ) -> Self { let mempool_space_url = mempool_space_url.unwrap_or_else(|| DEFAULT_MEMPOOL_SPACE_URL.to_string()); + Self { client, network, @@ -144,7 +145,7 @@ impl FeeService { let sat_vkb = smart_fee.unwrap_or(1000.0); let sat_vb = sat_vkb / 1000.0; - tracing::debug!("Fee rate: {} sat/vb", sat_vb); + tracing::debug!("Fee rate: {sat_vb} sat/vb"); Ok(sat_vb) } @@ -230,6 +231,84 @@ impl FeeService { pub fn get_next_fee_rate_multiplier(&self, multiplier: f64) -> f64 { (multiplier * FEE_RATE_MULTIPLIER_FACTOR).min(MAX_FEE_RATE_MULTIPLIER) } + + pub(crate) async fn validate_txs_fee_rate( + &self, + txs: &[SignedTxPair], + sent_commits: &[Transaction], + sent_reveals: &[Transaction], + fee_rate: f64, + utxo_context: UtxoContext, + ) -> std::result::Result<(), BitcoinServiceError> { + let UtxoContext { + available_utxos: utxos, + prev_utxo, + } = utxo_context; + + let mut utxo_map = utxos + .into_iter() + .map(|utxo| ((utxo.tx_id, utxo.vout), Amount::from_sat(utxo.amount))) + .collect::>(); + if let Some(prev_utxo) = prev_utxo { + utxo_map.insert( + (prev_utxo.tx_id, prev_utxo.vout), + Amount::from_sat(prev_utxo.amount), + ); + } + + // Add sent chunks as available inputs + let get_tx_outputs = |txs: &[Transaction]| { + txs.iter() + .flat_map(|tx| { + let txid = tx.compute_txid(); + tx.output + .iter() + .enumerate() + .map(move |(idx, out)| ((txid, idx as u32), out.value)) + }) + .collect::>() + }; + utxo_map.extend(get_tx_outputs(sent_commits)); + utxo_map.extend(get_tx_outputs(sent_reveals)); + + for tx in txs { + // Validate commit + let commit_tx = &tx.commit.tx; + let input_amount: Amount = commit_tx + .input + .iter() + .flat_map(|input| { + utxo_map + .get(&(input.previous_output.txid, input.previous_output.vout)) + .cloned() + }) + .sum(); + let output_amount = commit_tx.output.iter().map(|tx| tx.value).sum(); + + if (input_amount - output_amount) < Amount::from_sat(commit_tx.vsize() as u64) { + return Err(BitcoinServiceError::FeeCalculation(fee_rate)); + } + + // Add commit change output to utxo_map + if let Some(change_output) = commit_tx.output.get(1) { + utxo_map.insert((tx.commit_txid(), 1), change_output.value); + } + + // Validate reveal + let reveal_tx = &tx.reveal.tx; + let input_amount = commit_tx.output[0].value; + let output_amount = reveal_tx.output[0].value; + + // Add reveal utxo to utxo_map, used by chunking txs + utxo_map.insert((tx.reveal_txid(), 0), output_amount); + + if (input_amount - output_amount) < Amount::from_sat(reveal_tx.vsize() as u64) { + return Err(BitcoinServiceError::FeeCalculation(fee_rate)); + } + } + + Ok(()) + } } pub(crate) async fn get_fee_rate_from_mempool_space( @@ -269,62 +348,6 @@ pub(crate) async fn get_fee_rate_from_mempool_space( Ok(Some(fee_rate * 1000.0)) } -pub(crate) fn validate_txs_fee_rate( - txs: &[SignedTxPair], - fee_rate: f64, - utxo_context: UtxoContext, -) -> std::result::Result<(), BitcoinServiceError> { - let mut utxo_map = utxo_context - .available_utxos - .into_iter() - .map(|utxo| ((utxo.tx_id, utxo.vout), Amount::from_sat(utxo.amount))) - .collect::>(); - if let Some(prev_utxo) = utxo_context.prev_utxo { - utxo_map.insert( - (prev_utxo.tx_id, prev_utxo.vout), - Amount::from_sat(prev_utxo.amount), - ); - } - - for tx in txs { - // Validate commit - let commit_tx = &tx.commit.tx; - let input_amount: Amount = commit_tx - .input - .iter() - .flat_map(|input| { - utxo_map - .get(&(input.previous_output.txid, input.previous_output.vout)) - .cloned() - }) - .sum(); - let output_amount = commit_tx.output.iter().map(|tx| tx.value).sum(); - - if (input_amount - output_amount) < Amount::from_sat(commit_tx.vsize() as u64) { - return Err(BitcoinServiceError::FeeCalculation(fee_rate)); - } - - // Add commit change output to utxo_map - if let Some(change_output) = commit_tx.output.get(1) { - utxo_map.insert((tx.commit_txid(), 1), change_output.value); - } - - // Validate reveal - let reveal_tx = &tx.reveal.tx; - let input_amount = commit_tx.output[0].value; - let output_amount = reveal_tx.output[0].value; - - // Add reveal utxo to utxo_map, used by chunking txs - utxo_map.insert((tx.reveal_txid(), 0), output_amount); - - if (input_amount - output_amount) < Amount::from_sat(reveal_tx.vsize() as u64) { - return Err(BitcoinServiceError::FeeCalculation(fee_rate)); - } - } - - Ok(()) -} - async fn get_with_timeout( url: T, timeout: Duration, diff --git a/crates/bitcoin-da/src/helpers/builders/body_builders.rs b/crates/bitcoin-da/src/helpers/builders/body_builders.rs index bc174df452..606c028686 100644 --- a/crates/bitcoin-da/src/helpers/builders/body_builders.rs +++ b/crates/bitcoin-da/src/helpers/builders/body_builders.rs @@ -14,7 +14,7 @@ use bitcoin::secp256k1::{SecretKey, XOnlyPublicKey}; use bitcoin::{Address, Amount, Network, Transaction}; use metrics::histogram; use secp256k1::SECP256K1; -use serde::Serialize; +use serde::{Deserialize, Serialize}; use sov_rollup_interface::da::DataOnDa; use tracing::{info, instrument, trace, warn}; @@ -26,8 +26,9 @@ use crate::spec::utxo::UTXO; use crate::utxo_manager::UtxoContext; use crate::{REVEAL_OUTPUT_AMOUNT, REVEAL_OUTPUT_THRESHOLD}; +#[derive(Debug, Clone, Serialize, Deserialize, borsh::BorshSerialize, borsh::BorshDeserialize)] /// These are real blobs we put on DA. -pub(crate) enum RawTxData { +pub enum RawTxData { /// borsh(DataOnDa::Complete(compress(Proof))) Complete(Vec), /// let compressed = compress(borsh(Proof)) @@ -77,6 +78,19 @@ pub enum DaTxs { }, } +impl DaTxs { + /// Number of commit/reveal pair + pub fn count(&self) -> usize { + match self { + // Number of required chunks + 1 for aggregate + DaTxs::Chunked { commit_chunks, .. } => commit_chunks.len() + 1, + DaTxs::Complete { .. } + | DaTxs::BatchProofMethodId { .. } + | DaTxs::SequencerCommitment { .. } => 1, + } + } +} + /// Creates the light client transactions (commit and reveal). /// Based on data type, the number of transactions may vary. /// In the end, reveal txs will be mined with a nonce to have @@ -85,6 +99,8 @@ pub enum DaTxs { #[instrument(level = "trace", skip_all, err)] pub fn create_inscription_transactions( data: RawTxData, + sent_commits: Vec, + sent_reveals: Vec, da_private_key: SecretKey, utxo_context: UtxoContext, change_address: Address, @@ -104,8 +120,8 @@ pub fn create_inscription_transactions( network, &reveal_tx_prefix, ), - RawTxData::Chunks(body) => create_inscription_type_1( - body, + RawTxData::Chunks(data) => create_inscription_type_1( + data, &da_private_key, utxo_context, change_address, @@ -113,6 +129,8 @@ pub fn create_inscription_transactions( reveal_fee_rate, network, &reveal_tx_prefix, + sent_commits, + sent_reveals, ), RawTxData::BatchProofMethodId(body) => create_inscription_type_3( body, @@ -326,6 +344,8 @@ pub fn create_inscription_type_1( reveal_fee_rate: f64, network: Network, reveal_tx_prefix: &[u8], + sent_commits: Vec, + sent_reveals: Vec, ) -> Result { let UtxoContext { available_utxos: mut utxos, @@ -336,12 +356,26 @@ pub fn create_inscription_type_1( let key_pair = UntweakedKeypair::from_secret_key(SECP256K1, da_private_key); let (public_key, _parity) = XOnlyPublicKey::from_keypair(&key_pair); - let mut commit_chunks: Vec = vec![]; - let mut reveal_chunks: Vec = vec![]; + let current_idx = sent_commits.len(); + let mut commit_chunks = sent_commits; + let mut reveal_chunks = sent_reveals; + + if let Some(reveal_tx) = reveal_chunks.last() { + prev_utxo = Some(UTXO { + tx_id: reveal_tx.compute_txid(), + vout: 0, + script_pubkey: reveal_tx.output[0].script_pubkey.to_hex_string(), + address: None, + amount: reveal_tx.output[0].value.to_sat(), + confirmations: 0, + spendable: true, + solvable: true, + }); + } let start = Instant::now(); - for body in chunks { + for body in chunks.into_iter().skip(current_idx) { let kind = TransactionKind::Chunks; let kind_bytes = kind.to_bytes(); @@ -654,6 +688,7 @@ pub fn create_inscription_type_1( if let Some(root) = merkle_root { info!("Taproot merkle root for inscription - Aggregate: {}", root); } + return Ok(DaTxs::Chunked { commit_chunks, reveal_chunks, diff --git a/crates/bitcoin-da/src/helpers/builders/mod.rs b/crates/bitcoin-da/src/helpers/builders/mod.rs index 1b9fb909d2..e2fcf27b10 100644 --- a/crates/bitcoin-da/src/helpers/builders/mod.rs +++ b/crates/bitcoin-da/src/helpers/builders/mod.rs @@ -2,6 +2,7 @@ //! related to commit-reveal pattern for Citrea rollup. pub mod body_builders; + #[cfg(feature = "testing")] pub mod test_utils; diff --git a/crates/bitcoin-da/src/helpers/builders/tests.rs b/crates/bitcoin-da/src/helpers/builders/tests.rs index 247e6b7d82..306cee436d 100644 --- a/crates/bitcoin-da/src/helpers/builders/tests.rs +++ b/crates/bitcoin-da/src/helpers/builders/tests.rs @@ -510,6 +510,8 @@ fn create_inscription_transactions() { let tx_prefix = &[0u8]; let DaTxs::Complete { commit, reveal } = super::body_builders::create_inscription_transactions( RawTxData::Complete(body.clone()), + vec![], + vec![], da_private_key, UtxoContext { prev_utxo: None, diff --git a/crates/bitcoin-da/src/helpers/mod.rs b/crates/bitcoin-da/src/helpers/mod.rs index b2a4d2bee8..61a97ed007 100644 --- a/crates/bitcoin-da/src/helpers/mod.rs +++ b/crates/bitcoin-da/src/helpers/mod.rs @@ -2,9 +2,12 @@ //! It includes transaction kind definitions, transaction builders, parsers, and Merkle tree utilities. use core::num::NonZero; +#[cfg(feature = "native")] +use std::time::{SystemTime, UNIX_EPOCH}; use bitcoin::consensus::Encodable; use bitcoin::Transaction; +use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; #[cfg(feature = "native")] @@ -16,7 +19,7 @@ pub mod merkle_tree; pub mod parsers; /// Type represents a typed enum for transaction kind -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] #[repr(u16)] pub(crate) enum TransactionKind { /// This type of transaction includes full body (< 400kb) @@ -68,6 +71,15 @@ impl TransactionKind { } } +/// Return UNIX timestamp in seconds +#[cfg(feature = "native")] +pub(crate) fn get_timestamp() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Cannot fail because there is always a UNIX epoch") + .as_secs() +} + /// Calculate SHA-256d with the patched sha256 impl. pub fn calculate_double_sha256(input: &[u8]) -> [u8; 32] { let mut hasher = Sha256::default(); diff --git a/crates/bitcoin-da/src/job/error.rs b/crates/bitcoin-da/src/job/error.rs new file mode 100644 index 0000000000..3dcc3d6803 --- /dev/null +++ b/crates/bitcoin-da/src/job/error.rs @@ -0,0 +1,42 @@ +use sov_db::schema::types::da_jobs::{DaJobStatus, JobId}; +use thiserror::Error; + +/// Job errors +#[derive(Error, Debug)] +pub enum JobServiceError { + /// Job was not found + #[error("Job not found: {0}")] + JobNotFound(JobId), + + /// Job exceeded the timeout duration + #[error("Job {0} timed out after {1} seconds")] + JobTimeout(JobId, u64), + + /// Job completed in a corrupted state without transactions. + #[error("Job {0} completed but no transactions found")] + NoTransactionsFound(JobId), + + /// Failed to serialize or deserialize job data + #[error("Job borsh serialization failed: {0}")] + SerializationError(#[from] std::io::Error), + + /// Database operation failed + #[error("Database error: {0}")] + DatabaseError(#[from] anyhow::Error), + + /// Job execution failed + #[error("Job {0} failed: {1}")] + JobFailed(JobId, String), + + /// Job was cancelled before completion + #[error("Job {0} was cancelled")] + JobCancelled(JobId), + + /// Job cancellation failure + #[error("Job {0} cannot be cancelled as it is in status: {1:?}")] + JobCancellationFailure(JobId, DaJobStatus), + + /// Job retry failure + #[error("Job {0} cannot be retried as it is in status: {1:?}")] + JobRetryFailure(JobId, DaJobStatus), +} diff --git a/crates/bitcoin-da/src/job/metrics.rs b/crates/bitcoin-da/src/job/metrics.rs new file mode 100644 index 0000000000..0b99e7e65c --- /dev/null +++ b/crates/bitcoin-da/src/job/metrics.rs @@ -0,0 +1,116 @@ +use std::sync::LazyLock; + +use metrics::{Counter, Gauge, Histogram}; +use metrics_derive::Metrics; +use sov_db::schema::types::da_jobs::{DaJobStatus, JobProgress}; + +use crate::helpers::get_timestamp; + +/// Defines the metrics being collected for the DA job service +#[derive(Metrics)] +#[metrics(scope = "da_job")] +pub struct DaJobMetrics { + /// Number of pending jobs + #[metric(describe = "Number of jobs in pending status")] + pub jobs_pending: Gauge, + + /// Number of in-progress jobs + #[metric(describe = "Number of jobs in progress status")] + pub jobs_in_progress: Gauge, + + /// Number of completed jobs + #[metric(describe = "Number of jobs in completed status")] + pub jobs_completed: Gauge, + + /// Number of cancelled jobs + #[metric(describe = "Number of jobs in cancelled status")] + pub jobs_cancelled: Gauge, + + /// Number of failed jobs + #[metric(describe = "Number of jobs in failed status")] + pub jobs_failed: Gauge, + + /// Total jobs submitted + #[metric(describe = "Total number of jobs submitted")] + pub jobs_submitted_total: Counter, + + /// Total jobs completed successfully + #[metric(describe = "Total number of jobs completed successfully")] + pub jobs_completed_total: Counter, + + /// Total jobs cancelled + #[metric(describe = "Total number of jobs cancelled")] + pub jobs_cancelled_total: Counter, + + /// Total jobs failed + #[metric(describe = "Total number of jobs failed")] + pub jobs_failed_total: Counter, + + /// Time taken to process a job from pending to completion + #[metric(describe = "Duration from job submission to completion in seconds")] + pub job_processing_duration: Histogram, + + /// Number of chunks sent per job + #[metric(describe = "Number of commit/reveal pairs sent per job")] + pub job_chunks_sent: Histogram, +} + +impl DaJobMetrics { + pub fn record_status_update(&self, old_status: &DaJobStatus, progress: &JobProgress) { + let new_status = &progress.status; + if old_status == new_status { + return; + } + + match old_status { + DaJobStatus::Pending => self.jobs_pending.decrement(1.0), + DaJobStatus::InProgress => self.jobs_in_progress.decrement(1.0), + DaJobStatus::Completed => self.jobs_completed.decrement(1.0), + DaJobStatus::Cancelled => self.jobs_cancelled.decrement(1.0), + DaJobStatus::Failed { .. } => self.jobs_failed.decrement(1.0), + } + + match new_status { + DaJobStatus::Pending => { + self.jobs_pending.increment(1.0); + } + DaJobStatus::InProgress => { + self.jobs_in_progress.increment(1.0); + } + DaJobStatus::Completed => { + self.jobs_completed.increment(1.0); + self.jobs_completed_total.increment(1); + + // Total time between job creation and completion + if let Some(created_at) = progress.job_id.get_timestamp() { + let duration = get_timestamp().saturating_sub(created_at.to_unix().0); + self.job_processing_duration.record(duration as f64); + } + + // Record total chunks sent + self.job_chunks_sent + .record(progress.sent_txs.count() as f64); + } + DaJobStatus::Cancelled => { + self.jobs_cancelled.increment(1.0); + self.jobs_cancelled_total.increment(1); + } + DaJobStatus::Failed { .. } => { + self.jobs_failed.increment(1.0); + self.jobs_failed_total.increment(1); + } + } + } + + /// Record a job submission + pub fn record_job_submitted(&self) { + self.jobs_submitted_total.increment(1); + self.jobs_pending.increment(1.0); + } +} + +/// DA job service metrics +pub static DA_JOB_METRICS: LazyLock = LazyLock::new(|| { + DaJobMetrics::describe(); + DaJobMetrics::default() +}); diff --git a/crates/bitcoin-da/src/job/mod.rs b/crates/bitcoin-da/src/job/mod.rs new file mode 100644 index 0000000000..bd35726e9c --- /dev/null +++ b/crates/bitcoin-da/src/job/mod.rs @@ -0,0 +1,24 @@ +//! Job management for Bitcoin DA transactions. +//! +//! This module provides a persistent job queue system. +//! Jobs are stored in the database by uuidv7 and processed chronologically. +//! Supports partial sending of chunked transactions and recovery + +use crate::job::error::JobServiceError; + +/// Job related error types +pub mod error; + +/// Job related RPC endpoints +pub mod rpc; + +/// Core job queue implementation and state management +pub mod service; + +/// Job related utility methods +pub mod utils; + +/// Job related metrics +mod metrics; + +type Result = std::result::Result; diff --git a/crates/bitcoin-da/src/job/rpc.rs b/crates/bitcoin-da/src/job/rpc.rs new file mode 100644 index 0000000000..65c12b0c4d --- /dev/null +++ b/crates/bitcoin-da/src/job/rpc.rs @@ -0,0 +1,321 @@ +//! Provides the RPC interface for the bitcoin-da job da. +//! The namespace for these RPC methods is `daJob` +//! This module defines methods to interact with bitcoin-da jobs, +//! including cancelling, retrying and listing jobs. + +use std::sync::Arc; + +use citrea_common::rpc::utils::internal_rpc_error; +use jsonrpsee::core::RpcResult; +use jsonrpsee::proc_macros::rpc; +use serde::{Deserialize, Serialize}; +use sov_db::schema::types::da_jobs::{DaJobStatus, JobId, JobProgress}; + +use super::Result; +use crate::service::BitcoinService; + +/// RPC provider trait for da job service +pub(super) trait DaJobRpcProvider { + /// Cancel a pending or in-progress job by job id + /// + /// # Arguments + /// * `job_id` - The job uuid + /// + /// # Returns + /// * `Ok(())` if the job was successfully cancelled + /// * `Err` if the job doesn't exist, is already completed, or cannot be cancelled + fn cancel_job(&self, job_id: JobId) -> Result<()>; + + /// Retry a failed or cancelled job by creating a new job with the same data + /// + /// # Arguments + /// * `job_id` - The unique identifier of the job to retry + /// + /// # Returns + /// * `Ok(JobId)` - The uuid of the newly created retry job + /// * `Err` if the job doesn't exist or is not in a retryable state + fn retry_job(&self, job_id: JobId) -> Result; + + /// List jobs with optional filtering and pagination + /// + /// # Arguments + /// * `filter` - Optional filter criteria for jobs + /// + /// # Returns + /// * `Ok(Vec)` - List of jobs matching the filter criteria + /// * `Err` on database or serialization errors + fn list_jobs(&self, filter: JobListFilter) -> Result>; + + /// Get detailed information about a specific job + /// + /// # Arguments + /// * `job_id` - The unique identifier of the job + /// + /// # Returns + /// * `Ok(JobInfoResponse)` - Detailed information about the job + /// * `Err` on database error + fn get_job_info(&self, job_id: JobId) -> Result; +} + +/// Filter criteria for listing jobs +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct JobListFilter { + /// Optional status filter (e.g., only show "Pending" jobs) + pub status: Option, + /// Maximum number of jobs to return (default: 25, max: 1000) + pub limit: Option, + /// Skip first N jobs (for pagination) + pub offset: Option, +} + +impl Default for JobListFilter { + fn default() -> Self { + Self { + status: None, + limit: Some(25), + offset: None, + } + } +} + +/// Job status filter for RPC queries +#[derive(Default, Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum JobStatusFilter { + /// Only pending jobs + Pending, + /// Only in-progress jobs + InProgress, + /// Only completed jobs + Completed, + /// Only cancelled jobs + Cancelled, + /// Only failed jobs + Failed, + /// All active jobs (`Pending` + `InProgress`) + Active, + /// All terminal jobs (Completed + Cancelled + Failed) + Terminal, + /// All jobs + #[default] + All, +} + +impl JobStatusFilter { + /// Convert filter to list of status codes to query + pub(super) fn to_job_status(&self) -> Vec { + match self { + JobStatusFilter::Pending => vec![DaJobStatus::Pending], + JobStatusFilter::InProgress => vec![DaJobStatus::InProgress], + JobStatusFilter::Completed => vec![DaJobStatus::Completed], + JobStatusFilter::Cancelled => vec![DaJobStatus::Cancelled], + JobStatusFilter::Failed => { + vec![DaJobStatus::Failed { + error: Default::default(), + }] + } + JobStatusFilter::Active => { + vec![DaJobStatus::Pending, DaJobStatus::InProgress] + } + JobStatusFilter::Terminal => vec![ + DaJobStatus::Completed, + DaJobStatus::Cancelled, + DaJobStatus::Failed { + error: Default::default(), + }, + ], + JobStatusFilter::All => vec![ + DaJobStatus::Pending, + DaJobStatus::InProgress, + DaJobStatus::Completed, + DaJobStatus::Cancelled, + DaJobStatus::Failed { + error: Default::default(), + }, + ], + } + } +} + +/// Detailed information about a job for RPC responses +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JobInfoResponse { + /// Unique job identifier + pub job_id: JobId, + /// Current job status + pub status: DaJobStatus, + /// Job creation timestamp + pub created_at: u64, + /// Last update timestamp + pub last_updated: u64, + /// Number of transactions already sent + pub sent_count: usize, + /// Error message if job failed + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, +} + +impl From for JobInfoResponse { + fn from(value: JobProgress) -> Self { + let created_at = value.job_id.get_timestamp().map_or(0, |ts| ts.to_unix().0); + Self { + job_id: value.job_id, + status: value.status, + created_at, + last_updated: value.last_updated, + sent_count: value.sent_txs.count(), + error: value.last_error, + } + } +} + +/// Response for job cancellation +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CancelJobResponse { + /// Whether the job was successfully cancelled + pub success: bool, +} + +/// Response for job retry +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RetryJobResponse { + /// Uuid of the newly created retry job + pub new_job_id: JobId, + /// Uuid of the original job that was retried + pub original_job_id: JobId, +} + +#[rpc(client, server, namespace = "daJob")] +pub trait DaJobRpc { + /// Cancels a pending or in-progress job. + /// + /// # Arguments + /// * `job_id` - The unique identifier of the job to cancel + /// + /// # Returns + /// * Success response + /// + /// # Errors + /// * Job not found + /// * Job cannot be cancelled (already completed, failed, or cancelled) + #[method(name = "cancelById")] + async fn da_job_cancel(&self, job_id: JobId) -> RpcResult; + + /// Retries a failed or cancelled job by creating a new job with the same data. + /// + /// # Arguments + /// * `job_id` - The unique identifier of the job to retry + /// + /// # Returns + /// * Response containing the new job ID + /// + /// # Errors + /// * Job not found + /// * Job is not in a retryable state (pending, in-progress, or completed) + #[method(name = "retryById")] + async fn da_job_retry(&self, job_id: JobId) -> RpcResult; + + /// Lists jobs with optional filtering and pagination. + /// + /// # Arguments + /// * `status` - Optional status filter (pending, inProgress, completed, cancelled, failed, active, terminal) + /// * `limit` - Maximum number of jobs to return (default: 100, max: 1000) + /// * `offset` - Number of jobs to skip for pagination (default: 0) + /// + /// # Returns + /// * List of job information matching the filter criteria + #[method(name = "list")] + async fn da_job_list( + &self, + status: Option, + limit: Option, + offset: Option, + ) -> RpcResult>; + + /// Gets detailed information about a specific job. + /// + /// # Arguments + /// * `job_id` - The unique identifier of the job + /// + /// # Returns + /// * Detailed job information including status, timestamps, and progress + /// + /// # Errors + /// * Database error related errors + #[method(name = "getById")] + async fn da_job_get_info(&self, job_id: JobId) -> RpcResult; +} + +/// The implementation of the RPC itself. +pub struct DaJobRpcServerImpl { + da: Arc, +} + +#[async_trait::async_trait] +impl DaJobRpcServer for DaJobRpcServerImpl { + async fn da_job_cancel(&self, job_id: JobId) -> RpcResult { + self.da + .job_service + .lock() + .await + .cancel_job(job_id) + .map(|_| CancelJobResponse { success: true }) + .map_err(internal_rpc_error) + } + + async fn da_job_retry(&self, job_id: JobId) -> RpcResult { + self.da + .job_service + .lock() + .await + .retry_job(job_id) + .map(|new_job_id| RetryJobResponse { + new_job_id, + original_job_id: job_id, + }) + .map_err(internal_rpc_error) + } + + async fn da_job_list( + &self, + status: Option, + limit: Option, + offset: Option, + ) -> RpcResult> { + let filter = JobListFilter { + status, + limit, + offset, + }; + + Ok(self + .da + .job_service + .lock() + .await + .list_jobs(filter) + .map_err(internal_rpc_error)? + .into_iter() + .map(Into::into) + .collect()) + } + + async fn da_job_get_info(&self, job_id: JobId) -> RpcResult { + self.da + .job_service + .lock() + .await + .get_job_info(job_id) + .map_err(internal_rpc_error) + .map(Into::into) + } +} + +/// Creates a new module for the bitcoin-da job service RPCs. +pub fn create_rpc_module(da: Arc) -> jsonrpsee::RpcModule { + let server = DaJobRpcServerImpl { da }; + DaJobRpcServer::into_rpc(server) +} diff --git a/crates/bitcoin-da/src/job/service.rs b/crates/bitcoin-da/src/job/service.rs new file mode 100644 index 0000000000..8369ea2052 --- /dev/null +++ b/crates/bitcoin-da/src/job/service.rs @@ -0,0 +1,359 @@ +use std::collections::{HashMap, HashSet}; +use std::num::NonZeroUsize; +use std::sync::Arc; + +use anyhow::Context; +use bitcoin::hashes::Hash; +use bitcoin::Txid; +use lru::LruCache; +use parking_lot::Mutex; +use sov_db::ledger_db::DaLedgerOps; +use sov_db::schema::types::da_jobs::{DaJobStatus, JobId, JobProgress}; +use sov_rollup_interface::da::DataOnDa; +use sov_rollup_interface::services::da::DaTxRequest; +use tokio::sync::oneshot; +use tracing::{info, instrument}; +use uuid::Uuid; + +use super::Result; +use crate::error::BitcoinServiceError; +use crate::helpers::builders::body_builders::RawTxData; +use crate::helpers::get_timestamp; +use crate::job::error::JobServiceError; +use crate::job::metrics::DA_JOB_METRICS as METRICS; +use crate::job::rpc::{DaJobRpcProvider, JobListFilter}; +use crate::service::{split_proof, TxidWrapper}; + +type JobWaiters = + HashMap>>; + +/// Job service +pub struct DaJobService { + ledger_db: DB, + job_waiters: Arc>, + raw_tx_data_cache: Arc>>, +} + +impl DaJobService { + /// Creates a new `DaJobService` with `ledger_db` + pub fn new(ledger_db: DB, cache_size: Option) -> Self { + let cache_size = cache_size.unwrap_or_else(|| NonZeroUsize::new(10).unwrap()); + + Self { + ledger_db, + job_waiters: Arc::new(Mutex::new(HashMap::new())), + raw_tx_data_cache: Arc::new(Mutex::new(LruCache::new(cache_size))), + } + } + + /// Create a new job and save to db + pub fn submit_job( + &self, + da_tx_request: DaTxRequest, + tx: oneshot::Sender>, + ) -> Result { + let job_id = Uuid::now_v7(); + + let progress = JobProgress::new(job_id, get_timestamp()); + + self.ledger_db + .submit_job(job_id, &da_tx_request, &progress)?; + + METRICS.record_job_submitted(); + + self.job_waiters.lock().insert(job_id, tx); + + info!("Job {job_id} submitted and persisted"); + Ok(job_id) + } + + /// Get a job data by id + #[instrument(level = "trace", skip(self), ret)] + pub(crate) fn get_job_request(&self, job_id: &JobId) -> Result> { + self.ledger_db + .get_job_request(job_id) + .map_err(JobServiceError::DatabaseError) + } + + /// Retrieve job progress by id and convert to local format + #[instrument(level = "trace", skip(self), ret)] + pub(crate) fn get_progress(&self, job_id: &JobId) -> Result> { + self.ledger_db + .get_progress(job_id) + .map_err(JobServiceError::DatabaseError) + } + + /// Get the raw transaction data for a job + /// + /// This function attempts to retrieve the data from cache first. + /// If not found in cache, it deserializes from the job data and + /// transforms it into the appropriate RawTxData format. + /// + /// For StoredProof requests, it retrieves the actual proof from the database + /// using the proof_id reference. + /// + /// # Arguments + /// + /// * `job` - The job containing serialized DaTxRequest data + /// + /// # Returns + /// + /// * `Result` - The raw transaction data or an error + #[instrument(level = "trace", skip(self), ret)] + pub(crate) fn get_job_data(&self, job_id: Uuid, job_data: DaTxRequest) -> Result { + if let Some(data) = self.raw_tx_data_cache.lock().get(&job_id) { + return Ok(data.to_owned()); + } + + let raw_tx_data = match job_data { + DaTxRequest::ZKProof(zkproof) => split_proof(zkproof), + DaTxRequest::StoredProof(proof_id) => { + // Retrieve proof via secondary index + let zkproof = self.ledger_db.get_proof_by_proof_id(proof_id)?; + split_proof(zkproof) + } + DaTxRequest::SequencerCommitment(comm) => { + let blob = borsh::to_vec(&DataOnDa::SequencerCommitment(comm)) + .expect("SequencerCommitment serialize must not fail"); + Ok(RawTxData::SequencerCommitment(blob)) + } + DaTxRequest::BatchProofMethodId(id) => { + let blob = borsh::to_vec(&DataOnDa::BatchProofMethodId(id)) + .expect("BatchProofMethodId serialize must not fail"); + Ok(RawTxData::BatchProofMethodId(blob)) + } + } + .context("Failed to retrieve RawTxData from DaTxRequest")?; + + self.raw_tx_data_cache + .lock() + .push(job_id, raw_tx_data.clone()); + + Ok(raw_tx_data) + } + + /// Get all `Pending` and `InProgress` job ids from storage + #[instrument(level = "trace", skip(self), ret)] + pub(crate) fn get_all_active_job_ids(&self) -> Result> { + let mut active_jobs = Vec::new(); + + active_jobs.extend( + self.ledger_db + .get_job_ids_by_status(DaJobStatus::Pending.as_u8())?, + ); + + active_jobs.extend( + self.ledger_db + .get_job_ids_by_status(DaJobStatus::InProgress.as_u8())?, + ); + + // Sort uuidv7 chronologically + active_jobs.sort(); + + Ok(active_jobs) + } + + /// Save job progress + #[instrument(level = "debug", skip(self))] + pub fn upsert_job_progress(&self, progress: &mut JobProgress) -> Result<()> { + progress.last_updated = get_timestamp(); + + self.ledger_db.upsert_progress(progress)?; + + Ok(()) + } + + /// Update and save job progress to a new status + #[instrument(level = "debug", skip(self))] + pub fn update_job_status( + &self, + progress: &mut JobProgress, + new_status: DaJobStatus, + ) -> Result<()> { + let job_id = progress.job_id; + let previous_status = progress.status.clone(); + + progress.status = new_status; + progress.last_updated = get_timestamp(); + + let db_progress = progress.clone(); + self.ledger_db + .upsert_progress_new_status(&db_progress, previous_status.as_u8())?; + + METRICS.record_status_update(&previous_status, progress); + + self.notify_new_status(job_id, progress); + + Ok(()) + } + + /// Get all pending commit and reveals txids. + /// + /// This is required for removing from the utxo set and prevent selecting UTXOs twice + #[instrument(level = "trace", skip_all, ret)] + pub(crate) fn get_pending_chunks(&self) -> Result> { + let mut txids = HashSet::new(); + + let active_job_ids = self.get_all_active_job_ids()?; + for job_id in active_job_ids { + if let Some(JobProgress { + status: DaJobStatus::InProgress, + sent_txs, + .. + }) = self.get_progress(&job_id)? + { + txids.extend(sent_txs.commit.into_iter().map(Txid::from_byte_array)); + txids.extend(sent_txs.reveal.into_iter().map(Txid::from_byte_array)); + } + } + + Ok(txids) + } + + fn notify_new_status(&self, job_id: JobId, progress: &JobProgress) { + let result = match &progress.status { + DaJobStatus::Completed => { + if let Some(last_tx) = progress.sent_txs.reveal.last() { + Ok(TxidWrapper(Txid::from_byte_array(*last_tx))) + } else { + Err(JobServiceError::NoTransactionsFound(job_id).into()) + } + } + DaJobStatus::Cancelled => Err(JobServiceError::JobCancelled(job_id).into()), + DaJobStatus::Failed { error } => { + Err(JobServiceError::JobFailed(job_id, error.clone()).into()) + } + DaJobStatus::Pending | DaJobStatus::InProgress => return, + }; + + if let Some(tx) = self.job_waiters.lock().remove(&job_id) { + let _ = tx.send(result); + } + } + + pub(crate) fn insert_waiter( + &self, + job_id: JobId, + waiter: oneshot::Sender>, + ) { + self.job_waiters.lock().insert(job_id, waiter); + } + + pub(crate) fn recover_job_waiter( + &self, + job_id: Uuid, + ) -> Result>> { + let progress = self + .get_progress(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + + let (tx, rx) = oneshot::channel(); + + match progress.status { + DaJobStatus::Completed => { + // Job already finished before we subscribed + if let Some(last_tx) = progress.sent_txs.reveal.last() { + let _ = tx.send(Ok(TxidWrapper(Txid::from_byte_array(*last_tx)))); + } else { + let _ = tx.send(Err(JobServiceError::NoTransactionsFound(job_id).into())); + } + } + DaJobStatus::Failed { error } => { + // Job already failed + let _ = tx.send(Err(JobServiceError::JobFailed(job_id, error).into())); + } + DaJobStatus::Cancelled => { + // Job already cancelled + let _ = tx.send(Err(JobServiceError::JobCancelled(job_id).into())); + } + DaJobStatus::Pending | DaJobStatus::InProgress => { + // Job still running, register for notification + self.insert_waiter(job_id, tx); + } + } + + Ok(rx) + } +} + +/// Implementation of RPC provider methods +impl DaJobRpcProvider for DaJobService { + fn cancel_job(&self, job_id: JobId) -> Result<()> { + // Get job progress to check status + let mut progress = self + .get_progress(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + + // Only allow cancellation of pending or in-progress jobs + match progress.status { + DaJobStatus::Pending | DaJobStatus::InProgress => { + self.update_job_status(&mut progress, DaJobStatus::Cancelled)?; + tracing::info!("Job {job_id} successfully cancelled"); + Ok(()) + } + DaJobStatus::Completed | DaJobStatus::Cancelled | DaJobStatus::Failed { .. } => Err( + JobServiceError::JobCancellationFailure(job_id, progress.status), + ), + } + } + + fn retry_job(&self, job_id: JobId) -> Result { + // Get job progress to check status + let progress = self + .get_progress(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + + // Only allow retry of failed or cancelled jobs + match progress.status { + DaJobStatus::Failed { .. } | DaJobStatus::Cancelled => { + // Get original job and deserialize data + let da_tx_request = self + .get_job_request(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + + let (tx, _rx) = oneshot::channel(); + // Create new job with same data + let new_job_id = self.submit_job(da_tx_request, tx)?; + tracing::info!("Job {job_id} retried as new job {new_job_id}"); + + Ok(new_job_id) + } + DaJobStatus::Pending | DaJobStatus::InProgress | DaJobStatus::Completed => { + Err(JobServiceError::JobRetryFailure(job_id, progress.status)) + } + } + } + + fn list_jobs(&self, filter: JobListFilter) -> Result> { + let limit = filter.limit.unwrap_or(25).min(100); // Defaults to 25, capped at 100 + let offset = filter.offset.unwrap_or(0); + + // Get job ids based on status filter + let status_filter = filter.status.unwrap_or_default(); + + let mut job_ids = Vec::new(); + for code in status_filter.to_job_status() { + job_ids.extend(self.ledger_db.get_job_ids_by_status(code.as_u8())?); + } + job_ids.sort(); // sort chronologically by uuidv7 + + // Apply pagination + // TODO paginate at the db level. This should be sufficient for now as we take/skip on uuid before fetching job info + let job_ids: Vec<_> = job_ids.into_iter().skip(offset).take(limit).collect(); + + // Return (job, progress) per id + let mut job_infos = Vec::new(); + for job_id in job_ids { + if let Some(progress) = self.get_progress(&job_id)? { + job_infos.push(progress); + } + } + + Ok(job_infos) + } + + fn get_job_info(&self, job_id: JobId) -> Result { + self.get_progress(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id)) + } +} diff --git a/crates/bitcoin-da/src/job/utils.rs b/crates/bitcoin-da/src/job/utils.rs new file mode 100644 index 0000000000..fcadf749d7 --- /dev/null +++ b/crates/bitcoin-da/src/job/utils.rs @@ -0,0 +1,10 @@ +use sov_db::schema::types::da_jobs::JobId; + +use crate::helpers::get_timestamp; + +/// Calculates elapsed time since job creation using job uuidv7 +pub fn get_job_elapsed_time(job_id: JobId) -> u64 { + let job_created_at = job_id.get_timestamp().map(|ts| ts.to_unix().0).unwrap_or(0); + + get_timestamp().saturating_sub(job_created_at) +} diff --git a/crates/bitcoin-da/src/lib.rs b/crates/bitcoin-da/src/lib.rs index d317c28368..1ccb48f064 100644 --- a/crates/bitcoin-da/src/lib.rs +++ b/crates/bitcoin-da/src/lib.rs @@ -69,6 +69,9 @@ pub mod fee; #[cfg(feature = "native")] pub mod rpc; +#[cfg(feature = "native")] +pub mod job; + #[cfg(feature = "native")] pub mod utxo_manager; diff --git a/crates/bitcoin-da/src/monitoring.rs b/crates/bitcoin-da/src/monitoring.rs index 701bb584e1..8d659777cb 100644 --- a/crates/bitcoin-da/src/monitoring.rs +++ b/crates/bitcoin-da/src/monitoring.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use std::time::Duration; use anyhow::anyhow; use bitcoin::address::NetworkUnchecked; @@ -24,6 +24,7 @@ use tokio::time::interval; use tracing::{debug, error, info, instrument, trace}; use crate::helpers::builders::TxWithId; +use crate::helpers::get_timestamp; use crate::helpers::parsers::parse_relevant_transaction; use crate::spec::utxo::UTXO; @@ -32,20 +33,10 @@ type Result = std::result::Result; const REBROADCAST_EACH_N_BLOCK: u64 = 1; -/// Return UNIX timestamp in seconds -fn get_timestamp() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Cannot fail because there is always a UNIX epoch") - .as_secs() -} - /// Transaction status in the monitoring service. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum TxStatus { - /// Queued tx, not already broadcasted - Queued, /// Tx in mempool #[serde(rename_all = "camelCase")] InMempool { @@ -126,7 +117,7 @@ impl MonitoredTx { /// Return the UTXOs for this transaction if it's not replaced or evicted. pub fn to_utxos(&self) -> Option> { let confirmations = match self.status { - TxStatus::Queued | TxStatus::InMempool { .. } => 0, + TxStatus::InMempool { .. } => 0, TxStatus::Confirmed { confirmations, .. } | TxStatus::Finalized { confirmations, .. } => confirmations, _ => return None, @@ -174,8 +165,8 @@ impl Default for ChainState { #[derive(Error, Debug)] pub enum MonitorError { /// Already monitored. - #[error("Transaction already monitored")] - AlreadyMonitored, + #[error("Transaction {0} already monitored")] + AlreadyMonitored(Txid), /// Transaction not found. #[error("Transaction not found")] TxNotFound, @@ -484,7 +475,7 @@ impl MonitoringService { let mut monitored_txs = self.monitored_txs.write().await; if monitored_txs.contains_key(&txid) { - return Err(MonitorError::AlreadyMonitored); + return Err(MonitorError::AlreadyMonitored(txid)); } if let Some(prev_tx_id) = prev_txid { @@ -496,10 +487,12 @@ impl MonitoringService { let current_height = self.client.get_block_count().await?; + let tx_result = self.client.get_transaction(&txid, None).await?; + self.total_size .fetch_add(tx.tx.total_size(), Ordering::SeqCst); - let status = TxStatus::Queued; + let status = self.determine_tx_status(&tx_result, None).await?; let monitored_tx = MonitoredTx { tx: tx.tx, txid, @@ -537,7 +530,7 @@ impl MonitoringService { self.total_size.fetch_add(tx.total_size(), Ordering::SeqCst); let status = self - .determine_tx_status(&tx_result, &monitored_tx.status) + .determine_tx_status(&tx_result, Some(&monitored_tx.status)) .await?; let new_tx = MonitoredTx { @@ -628,7 +621,9 @@ impl MonitoringService { if let TxStatus::Confirmed { confirmations, .. } = tx.status { if confirmations <= depth { let tx_result = self.client.get_transaction(txid, None).await?; - tx.status = self.determine_tx_status(&tx_result, &tx.status).await?; + tx.status = self + .determine_tx_status(&tx_result, Some(&tx.status)) + .await?; if let TxStatus::InMempool { .. } = tx.status { info!("Rebroadcasting tx {} {tx:?}", tx.tx.compute_txid()); @@ -648,10 +643,10 @@ impl MonitoringService { for (txid, monitored_tx) in txs.iter_mut() { match &monitored_tx.status { // Check non-finalized TXs - TxStatus::Queued | TxStatus::Confirmed { .. } | TxStatus::Replaced { .. } => { + TxStatus::Confirmed { .. } | TxStatus::Replaced { .. } => { if let Ok(tx_result) = self.client.get_transaction(txid, None).await { let new_status = self - .determine_tx_status(&tx_result, &monitored_tx.status) + .determine_tx_status(&tx_result, Some(&monitored_tx.status)) .await?; monitored_tx.status = new_status; @@ -664,14 +659,14 @@ impl MonitoringService { } if *rebroadcast_attempts > 0 => { let tx_result = self.client.get_transaction(txid, None).await?; let new_status = self - .determine_tx_status(&tx_result, &monitored_tx.status) + .determine_tx_status(&tx_result, Some(&monitored_tx.status)) .await?; monitored_tx.status = new_status; } TxStatus::InMempool { height, .. } => { let tx_result = self.client.get_transaction(txid, None).await?; let new_status = self - .determine_tx_status(&tx_result, &monitored_tx.status) + .determine_tx_status(&tx_result, Some(&monitored_tx.status)) .await?; // If status is still InMempool, check for how many block it has been in mempool and rebroadcast every REBROADCAST_EACH_N_BLOCK @@ -696,7 +691,7 @@ impl MonitoringService { async fn determine_tx_status( &self, tx_result: &GetTransactionResult, - current_status: &TxStatus, + current_status: Option<&TxStatus>, ) -> Result { let confirmations = tx_result.info.confirmations as u64; let status = if confirmations > 0 { @@ -737,7 +732,7 @@ impl MonitoringService { // Tx not found in mempool Err(_) => match current_status { // If transaction is queued or evicted, keep status as is - TxStatus::Queued | TxStatus::Evicted { .. } => current_status.clone(), + Some(status @ TxStatus::Evicted { .. }) => status.clone(), // If transaction was previously in mempool or confirmed, re-org happened and it got evicted from mempool _ => { tracing::info!("Tx {} was evicted from mempool.", tx_result.info.txid); @@ -906,7 +901,9 @@ impl MonitoringService { for txid in txids { if let Some(entry) = monitored_txs.get_mut(txid) { if let Ok(tx_result) = self.client.get_transaction(txid, None).await { - entry.status = self.determine_tx_status(&tx_result, &entry.status).await?; + entry.status = self + .determine_tx_status(&tx_result, Some(&entry.status)) + .await?; entry.last_checked = get_timestamp(); entry.address = tx_result .details diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index a1153dc635..eab49d1b4a 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -6,11 +6,10 @@ use core::result::Result::Ok; use core::str::FromStr; use core::time::Duration; -use std::collections::{HashMap, VecDeque}; +use std::collections::{HashMap, HashSet}; use std::num::NonZeroUsize; use std::path::PathBuf; use std::sync::Arc; -use std::time::Instant; use anyhow::anyhow; use async_trait::async_trait; @@ -29,24 +28,29 @@ use citrea_primitives::{MAX_COMPRESSED_BLOB_SIZE, MAX_TX_BODY_SIZE}; use lru::LruCache; use reth_tasks::shutdown::GracefulShutdown; use serde::{Deserialize, Serialize}; -use sov_rollup_interface::da::{DaSpec, DaTxRequest, DataOnDa, SequencerCommitment}; -use sov_rollup_interface::services::da::{DaService, TxRequestWithNotifier}; +use sov_db::ledger_db::LedgerDB; +use sov_db::schema::types::da_jobs::{DaJobStatus, JobId, JobProgress, SentTxs}; +use sov_rollup_interface::da::{DaSpec, DataOnDa, SequencerCommitment}; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use sov_rollup_interface::zk::Proof; use sov_rollup_interface::Network; use tokio::select; -use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; -use tokio::sync::oneshot::channel as oneshot_channel; -use tokio::sync::Mutex; +use tokio::sync::mpsc::UnboundedReceiver; +use tokio::sync::{oneshot, Mutex, Notify}; use tracing::{debug, error, info, instrument, trace, warn}; +use uuid::Uuid; use crate::error::{BitcoinServiceError, MempoolRejection}; -use crate::fee::{validate_txs_fee_rate, BumpFeeMethod, FeeService}; +use crate::fee::{BumpFeeMethod, FeeService}; use crate::helpers::backup::backup_txs_to_file; use crate::helpers::builders::body_builders::{create_inscription_transactions, DaTxs, RawTxData}; use crate::helpers::builders::TxWithId; use crate::helpers::merkle_tree::BitcoinMerkleTree; use crate::helpers::parsers::{parse_relevant_transaction, ParsedTransaction, VerifyParsed}; use crate::helpers::{merkle_tree, TransactionKind}; +use crate::job::error::JobServiceError; +use crate::job::service::DaJobService; +use crate::job::utils::get_job_elapsed_time; use crate::metrics::BITCOIN_DA_METRICS as BM; use crate::monitoring::{MonitoredTxKind, MonitoringConfig, MonitoringService, TxStatus}; use crate::network_constants::NetworkConstants; @@ -65,7 +69,10 @@ use crate::verifier::{ pub(crate) type Result = std::result::Result; -const POLLING_INTERVAL: u64 = 10; // seconds +const POLLING_INTERVAL: u64 = 10; // 10 seconds + +const DEFAULT_FEE_RATE_CAP_DURATION_SECS: u64 = 3600; // 1 hour default cap duration +const DEFAULT_MAX_FEE_RATE_SAT_VB: f64 = 15.0; // 15sat/vb default max fee rate /// Map sov Network to Bitcoin Network. pub fn network_to_bitcoin_network(network: &Network) -> bitcoin::Network { @@ -106,6 +113,12 @@ pub struct BitcoinServiceConfig { /// Connection timeout for RPC in seconds pub rpc_connect_timeout_secs: Option, + + /// Max fee rate in sat/vb + pub max_fee_rate_sat_to_pay: Option, + + /// Fee rate cap duration in seconds + pub fee_rate_cap_duration_secs: Option, } impl citrea_common::FromEnv for BitcoinServiceConfig { @@ -131,26 +144,34 @@ impl citrea_common::FromEnv for BitcoinServiceConfig { rpc_connect_timeout_secs: read_env("BITCOIN_RPC_CONNECT_TIMEOUT_SECS") .ok() .and_then(|v| v.parse::().ok()), + max_fee_rate_sat_to_pay: read_env("BITCOIN_MAX_FEE_RATE_SAT_TO_PAY") + .ok() + .and_then(|v| v.parse::().ok()), + fee_rate_cap_duration_secs: read_env("BITCOIN_FEE_RATE_CAP_DURATION_SECS") + .ok() + .and_then(|v| v.parse::().ok()), }) } } /// A service that provides data and data availability proofs for Bitcoin -#[derive(Debug)] pub struct BitcoinService { client: Arc, pub(crate) network: bitcoin::Network, network_constants: NetworkConstants, pub(crate) da_private_key: Option, pub(crate) reveal_tx_prefix: Vec, - inscribes_queue: UnboundedSender>, pub(crate) tx_backup_dir: PathBuf, /// Monitoring service for tracking transaction status. pub monitoring: Arc, fee: FeeService, l1_block_hash_to_height: Arc>>, - tx_queue: Arc>>, pub(crate) tx_signer: TxSigner, + // Persistent job queue + pub(crate) job_service: Mutex>, + max_fee_rate_sat_to_pay: f64, + fee_rate_cap_duration_secs: u64, + job_notifier: Arc, pub(crate) utxo_manager: UtxoManager, } @@ -162,11 +183,12 @@ impl BitcoinService { network_constants: NetworkConstants, monitoring: Arc, fee: FeeService, - inscribes_queue: UnboundedSender>, da_private_key: Option, reveal_tx_prefix: Vec, tx_backup_dir: PathBuf, - tx_queue: Arc>>, + job_service: Mutex>, + max_fee_rate_sat_to_pay: f64, + fee_rate_cap_duration_secs: u64, utxo_manager: UtxoManager, ) -> Self { Self { @@ -176,14 +198,16 @@ impl BitcoinService { network, da_private_key, reveal_tx_prefix, - inscribes_queue, tx_backup_dir, monitoring, fee, l1_block_hash_to_height: Arc::new(Mutex::new(LruCache::new( NonZeroUsize::new(100).unwrap(), ))), - tx_queue, + job_service, + max_fee_rate_sat_to_pay, + fee_rate_cap_duration_secs, + job_notifier: Arc::new(Notify::new()), utxo_manager, } } @@ -199,7 +223,7 @@ impl BitcoinService { monitoring: Arc, fee_service: FeeService, require_wallet_check: bool, - inscribes_queue: UnboundedSender>, + ledger_db: LedgerDB, ) -> Result { if require_wallet_check && client @@ -224,11 +248,17 @@ impl BitcoinService { .transpose() .map_err(|_| BitcoinServiceError::InvalidPrivateKey)?; - let tx_queue = Arc::new(Mutex::new(VecDeque::new())); + let job_service = Mutex::new(DaJobService::new(ledger_db, None)); + let max_fee_rate_sat_to_pay = config + .max_fee_rate_sat_to_pay + .unwrap_or(DEFAULT_MAX_FEE_RATE_SAT_VB); + let fee_rate_cap_duration_secs = config + .fee_rate_cap_duration_secs + .unwrap_or(DEFAULT_FEE_RATE_CAP_DURATION_SECS); + let utxo_manager = UtxoManager::new( client.clone(), monitoring.clone(), - tx_queue.clone(), network_constants, config.utxo_selection_mode.clone().unwrap_or_default(), ); @@ -239,11 +269,12 @@ impl BitcoinService { network_constants, monitoring, fee_service, - inscribes_queue, da_private_key, chain_params.reveal_tx_prefix, tx_backup_dir.to_path_buf(), - tx_queue, + job_service, + max_fee_rate_sat_to_pay, + fee_rate_cap_duration_secs, utxo_manager, )) } @@ -252,13 +283,10 @@ impl BitcoinService { #[instrument(name = "BitcoinDA", skip_all)] pub async fn run_da_queue( self: Arc, - mut rx: UnboundedReceiver>, mut new_block_rx: UnboundedReceiver, mut shutdown: GracefulShutdown, ) { trace!("BitcoinDA queue is initialized. Waiting for the first request..."); - let mut fee_rate_multiplier = self.fee.base_fee_rate_multiplier(); - loop { select! { biased; @@ -269,112 +297,187 @@ impl BitcoinService { new_height_opt = new_block_rx.recv() => { if let Some(new_height) = new_height_opt { trace!("New da block height {new_height}. Processing transaction queue."); - if let Err(e) = self.process_transaction_queue().await { + + if let Err(e) = self.process_job_service().await { error!(?e, "Error processing queue on new block"); } } } - request_opt = rx.recv() => { - if let Some(request) = request_opt { - trace!("A new request is received"); - - loop { - // Build and queue tx with retries: - let fee_sat_per_vbyte = match self.fee.get_fee_rate().await { - Ok(rate) => rate * fee_rate_multiplier, - Err(e) => { - error!(?e, "Failed to call get_fee_rate. Retrying..."); - tokio::time::sleep(Duration::from_secs(1)).await; - continue; - } - }; - match self - .send_transaction_with_fee_rate( - request.tx_request.clone(), - fee_sat_per_vbyte, - ) - .await - { - Ok(txs) => { - let txid = txs.last().unwrap()[1].id; - let tx_id = TxidWrapper(txid); - info!(%txid, "Sent tx to BitcoinDA"); - let _ = request.notify.send(Ok(tx_id)); - - fee_rate_multiplier = self.fee.base_fee_rate_multiplier(); - } - Err(e) => { - error!(?e, "Failed to send transaction to DA layer"); - tokio::time::sleep(Duration::from_secs(1)).await; - - match e { - BitcoinServiceError::MempoolRejection(MempoolRejection::MinRelayFeeNotMet) | BitcoinServiceError::FeeCalculation(_) => { - fee_rate_multiplier = self.fee.get_next_fee_rate_multiplier(fee_rate_multiplier); - }, - BitcoinServiceError::QueueNotEmpty => { - let _ = self.process_transaction_queue().await; - }, - _ => {} - } - - continue; - } - } - break; - } + + _ = self.job_notifier.notified() => { + trace!("Job submitted, processing queue"); + if let Err(e) = self.process_job_service().await { + error!(?e, "Error processing queue on job trigger"); } } } } } - /// Queue and try sending transaction to DA - pub async fn send_transaction_with_fee_rate( + // Process job queue + async fn process_job_service(&self) -> Result<()> { + let job_service = self.job_service.lock().await; + + // Optimization for utxo selection. + // If previous job ends in progress, we need to select a new utxo in Oldest mode. + // If the subsequent job completes, we can continue chaining from its outputs. + let mut previous_job_in_progress = false; + + // Get all pending/in-progress jobs + let active_job_ids = job_service.get_all_active_job_ids()?; + for job_id in active_job_ids { + info!("Processing job {job_id}"); + + let job_request = job_service + .get_job_request(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + let progress = &mut job_service + .get_progress(&job_id)? + .ok_or(JobServiceError::JobNotFound(job_id))?; + + let job_data = job_service.get_job_data(job_id, job_request)?; + + let sent_txids = job_service.get_pending_chunks()?; + + match self + .process_job(job_data, progress, &sent_txids, previous_job_in_progress) + .await + { + Ok(completed) => { + progress.last_error = None; + + if completed { + job_service.update_job_status(progress, DaJobStatus::Completed)?; + info!("Job {job_id} completed successfully"); + + previous_job_in_progress = false; + } else { + job_service.update_job_status(progress, DaJobStatus::InProgress)?; + info!("Job {job_id} partially sent"); + + previous_job_in_progress = true; + } + } + Err(e @ BitcoinServiceError::FeeCapExceeded { .. }) => { + warn!("Job {job_id} hit fee cap: {e:?}"); + + // Save updated progress with last sent attempt value and continue + // Fee cap errors should be retried on next `process_job_service` call + job_service.upsert_job_progress(progress)?; + continue; + } + Err(e) => { + // TODO make the distinction between recoverable and unrecoverable error. + // The latter should be updated to Failed status + error!("Error processing job {job_id}: {e:?}"); + progress.last_error = Some(e.to_string()); + job_service.upsert_job_progress(progress)?; + } + } + } + + Ok(()) + } + + async fn process_job( &self, - tx_request: DaTxRequest, - fee_sat_per_vbyte: f64, - ) -> Result> { - let now = Instant::now(); + job_data: RawTxData, + progress: &mut JobProgress, + sent_txids: &HashSet, + previous_job_in_progress: bool, + ) -> Result { + info!( + "Processing job {} with status {:?}", + progress.job_id, progress.status + ); + + // Get current fee rate as sat/vb + let fee_sat_per_vbyte = self.fee.get_fee_rate().await?; + + // Validate fee rate against cap + self.validate_fee_rate(progress.job_id, fee_sat_per_vbyte)?; - let utxo_context = self.utxo_manager.prepare_context().await?; + // Recover sent commits and sent reveals from their txids + let (sent_commits, sent_reveals) = + self.recover_sent_transactions(&progress.sent_txs).await?; + + let utxo_context = self + .utxo_manager + .prepare_context(&progress.status, previous_job_in_progress, sent_txids) + .await?; let da_txs = self .create_da_transactions_with_fee_rate( - tx_request, fee_sat_per_vbyte, utxo_context.clone(), + job_data, + sent_commits.clone(), + sent_reveals.clone(), ) .await?; - let signed_txs = self.tx_signer.sign_da_txs(da_txs).await?; + + let current_idx = progress.sent_txs.count(); + let signed_txs = self + .tx_signer + .sign_da_txs(da_txs.clone(), current_idx) + .await?; // Test whether signed_txs should be accepted in queue if !self.test_mempool_accept_queue_tx(&signed_txs).await? { // If it failed on mempool policy limit, it can also fail on meeting min relay fee - // Stateless validation of signed txs fee - validate_txs_fee_rate(&signed_txs, fee_sat_per_vbyte, utxo_context)?; + self.fee + .validate_txs_fee_rate( + &signed_txs, + &sent_commits, + &sent_reveals, + fee_sat_per_vbyte, + utxo_context, + ) + .await?; } // backup to file after mempool acceptance backup_txs_to_file(&self.tx_backup_dir, &signed_txs)?; - let txs = signed_txs - .iter() - .map(|tx| tx.clone().into_txs_with_id()) - .collect::>(); - self.monitoring - .monitor_transaction_chain(txs.clone()) - .await?; + let mut txids = Vec::new(); + let mut sent_count = 0; + + for signed_tx in &signed_txs { + // Test mempool acceptance for this specific transaction + if let Err(e) = self.test_mempool_accept(&signed_tx.as_raw_txs()).await { + debug!(?e, "Transaction rejected by mempool, stopping batch"); + break; + } - // Queue transactions - self.queue_transactions(signed_txs).await; + match self.send_signed_transaction(signed_tx).await { + Ok(ids) => { + sent_count += 1; + txids.extend(&ids); + + progress.sent_txs.extend( + vec![signed_tx.commit.tx.compute_txid().to_byte_array()], + vec![signed_tx.reveal.tx.compute_txid().to_byte_array()], + ); + + let txs = signed_tx.clone().into_txs_with_id(); + self.monitoring.monitor_transaction_chain(vec![txs]).await?; + } + Err(e) => { + error!(?e, "Error sending signed transaction"); + break; + } + } + } - // Process transaction queue. - self.process_transaction_queue().await?; + if let Err(e) = self.monitoring.update_txs_status(&txids).await { + error!(?e, "Failed to update queued tx status"); + } - BM.transaction_queue_processing_time - .record(Instant::now().saturating_duration_since(now).as_secs_f64()); + let total_needed = da_txs.count(); + let total_sent = current_idx + sent_count; + let completed = total_sent >= total_needed; - Ok(txs) + Ok(completed) } #[instrument(level = "trace", skip_all, ret)] @@ -388,28 +491,48 @@ impl BitcoinService { .collect() } + /// Validates fee rate against `max_fee_rate_sat_to_pay` + #[instrument(level = "trace", skip(self))] + fn validate_fee_rate(&self, job_id: JobId, fee_sat_per_vbyte: f64) -> Result<()> { + if fee_sat_per_vbyte <= self.max_fee_rate_sat_to_pay { + return Ok(()); + } + + let elapsed_secs = get_job_elapsed_time(job_id); + + if elapsed_secs < self.fee_rate_cap_duration_secs { + warn!( + "Job {job_id} fee rate {fee_sat_per_vbyte} sat/vb exceeds cap of {} sat/vb. \ + Waiting (elapsed: {elapsed_secs}s / max: {}s)", + self.max_fee_rate_sat_to_pay, self.fee_rate_cap_duration_secs + ); + + return Err(BitcoinServiceError::FeeCapExceeded { + current_rate: fee_sat_per_vbyte, + max_rate: self.max_fee_rate_sat_to_pay, + elapsed_secs, + max_duration_secs: self.fee_rate_cap_duration_secs, + }); + } + + warn!( + "Job {job_id} fee rate {fee_sat_per_vbyte} sat/vb exceeds cap but cap duration of {}s exceeded. Sending anyway", + self.fee_rate_cap_duration_secs + ); + + Ok(()) + } + /// Sends a transaction to the Bitcoin network with a specified fee rate. #[instrument(level = "trace", fields(prev_utxo), ret, err, skip(self))] async fn create_da_transactions_with_fee_rate( &self, - tx_request: DaTxRequest, fee_sat_per_vbyte: f64, utxo_context: UtxoContext, + data: RawTxData, + sent_commits: Vec, + sent_reveals: Vec, ) -> Result { - let data = match tx_request { - DaTxRequest::ZKProof(zkproof) => split_proof(zkproof)?, - DaTxRequest::SequencerCommitment(comm) => { - let data = DataOnDa::SequencerCommitment(comm); - let blob = borsh::to_vec(&data).expect("DataOnDa serialize must not fail"); - RawTxData::SequencerCommitment(blob) - } - DaTxRequest::BatchProofMethodId(method_id) => { - let data = DataOnDa::BatchProofMethodId(method_id); - let blob = borsh::to_vec(&data).expect("DataOnDa serialize must not fail"); - RawTxData::BatchProofMethodId(blob) - } - }; - let network = self.network; let da_private_key = self.da_private_key.expect("No private key set"); // get address from a utxo @@ -420,11 +543,14 @@ impl BitcoinService { .require_network(network)?; let prefix = self.reveal_tx_prefix.clone(); + tokio::task::spawn_blocking(move || { // Since this is CPU bound work, we use spawn_blocking // to release the tokio runtime execution create_inscription_transactions( data, + sent_commits, + sent_reveals, da_private_key, utxo_context, address, @@ -438,97 +564,6 @@ impl BitcoinService { .map_err(|e| BitcoinServiceError::TransactionBuilderError(e.to_string())) } - async fn queue_transactions(&self, txs: Vec) { - let txs_len = txs.len(); - self.tx_queue.lock().await.extend(txs); - BM.transaction_queue_size.increment(txs_len as f64); - } - - pub(crate) async fn process_transaction_queue(&self) -> Result> { - match self.utxo_manager.mode { - UtxoSelectionMode::Chained => self.process_transaction_queue_chained().await, - UtxoSelectionMode::Oldest => self.process_transaction_queue_oldest_mode().await, - } - } - - pub(crate) async fn process_transaction_queue_oldest_mode(&self) -> Result> { - let mut queue = self.tx_queue.lock().await; - - let mut txids = Vec::new(); - let mut failed_txs = VecDeque::new(); - while let Some(tx) = queue.pop_front() { - info!( - "Processing transaction from queue. Commit: {} Reveal: {}", - tx.commit_txid(), - tx.reveal_txid() - ); - if let Err(e) = self.test_mempool_accept(&tx.as_raw_txs()).await { - debug!(?e, "Rejected by mempool"); - failed_txs.push_back(tx); - continue; - } - - match self.send_signed_transaction(&tx).await { - Ok(ids) => { - BM.transaction_queue_size.decrement(1); - txids.extend(ids) - } - Err(e) => { - error!(?e, "Error sending signed transaction"); - failed_txs.push_back(tx); - } - } - } - - *queue = failed_txs; - - // Update monitored tx status - if let Err(e) = self.monitoring.update_txs_status(&txids).await { - error!(?e, "Failed to update queued tx status"); - } - - Ok(txids) - } - - /// Send transaction out of the queue to DA until the first error. - /// Returns the successfully sent txs. - pub(crate) async fn process_transaction_queue_chained(&self) -> Result> { - let mut queue = self.tx_queue.lock().await; - - let mut txids = Vec::new(); - while let Some(tx) = queue.front() { - info!( - "Processing transaction from queue. Commit: {} Reveal: {}", - tx.commit_txid(), - tx.reveal_txid() - ); - if let Err(e) = self.test_mempool_accept(&tx.as_raw_txs()).await { - warn!(?e, "Rejected by mempool"); - break; - } - - match self.send_signed_transaction(tx).await { - Ok(ids) => { - queue.pop_front(); - BM.transaction_queue_size.decrement(1); - txids.extend(ids) - } - Err(e) => { - error!(?e, "Error sending signed transaction"); - // Break on first error and return successfully sent txids - break; - } - } - } - - // Update monitored tx status - if let Err(e) = self.monitoring.update_txs_status(&txids).await { - error!(?e, "Failed to update queued tx status"); - } - - Ok(txids) - } - pub(crate) async fn send_signed_transaction(&self, tx: &SignedTxPair) -> Result> { let raw_txs = tx.as_raw_txs(); let raw_txs_size_sum = raw_txs.iter().map(|tx| tx.len()).sum::() as f64; @@ -760,6 +795,43 @@ impl BitcoinService { }), } } + + /// Recover transaction from `SentTxs` txids + /// 1. Try first through monitoring service. + /// 2. If not found via monitoring service - following a restart - falls back to `get_transaction` RPC. + async fn recover_sent_transactions( + &self, + sent_txs: &SentTxs, + ) -> Result<(Vec, Vec)> { + let monitored_txs = self.monitoring.get_monitored_txs().await; + let recover_transaction = async |txid: &Txid| { + let tx = if let Some(monitored) = monitored_txs.get(txid).cloned() { + monitored.tx + } else { + self.client + .get_transaction(txid, None) + .await? + .transaction()? + }; + Ok::(tx) + }; + + let mut commits = Vec::new(); + for txid in &sent_txs.commit { + let txid = Txid::from_byte_array(*txid); + let transaction = recover_transaction(&txid).await?; + commits.push(transaction) + } + + let mut reveals = Vec::new(); + for txid in &sent_txs.reveal { + let txid = Txid::from_byte_array(*txid); + let transaction = recover_transaction(&txid).await?; + reveals.push(transaction) + } + + Ok((commits, reveals)) + } } #[async_trait] @@ -1197,26 +1269,40 @@ impl DaService for BitcoinService { (relevant_txs, inclusion_proof, completeness_proof) } - #[instrument(level = "trace", skip_all)] + /// Submit a new job to the queue async fn send_transaction( &self, tx_request: DaTxRequest, - ) -> Result<::TransactionId> { - let queue = self.get_send_transaction_queue(); - let (tx, rx) = oneshot_channel(); - queue - .send(TxRequestWithNotifier { - tx_request, - notify: tx, - }) - .map_err(|_| BitcoinServiceError::ChannelSendError)?; - Ok(rx.await?.expect("Queue never sends error")) + ) -> Result<(Uuid, oneshot::Receiver>)> { + let (tx, rx) = oneshot::channel(); + let job_id = { + let job_service = self.job_service.lock().await; + + // TODO handle chaining job request + if self.utxo_manager.mode == UtxoSelectionMode::Chained { + let active_jobs = job_service.get_all_active_job_ids()?; + if !active_jobs.is_empty() { + return Err(BitcoinServiceError::PreviousJobInProgress); + } + } + job_service.submit_job(tx_request, tx)? + }; + + // For now, notify on new job and process all of them in order as this is needed for utxo handling + self.job_notifier.notify_one(); + + Ok((job_id, rx)) } - fn get_send_transaction_queue( + async fn recover_existing_job_waiter( &self, - ) -> UnboundedSender> { - self.inscribes_queue.clone() + job_id: Uuid, + ) -> Result>> { + self.job_service + .lock() + .await + .recover_job_waiter(job_id) + .map_err(Into::into) } #[instrument(level = "trace", skip(self))] @@ -1350,7 +1436,7 @@ impl DaService for BitcoinService { /// Wrapper around Txid to be used in DaSpec. #[derive(PartialEq, Eq, PartialOrd, Ord, core::hash::Hash)] -pub struct TxidWrapper(Txid); +pub struct TxidWrapper(pub(crate) Txid); impl From for [u8; 32] { fn from(val: TxidWrapper) -> Self { val.0.to_byte_array() diff --git a/crates/bitcoin-da/src/test_utils.rs b/crates/bitcoin-da/src/test_utils.rs index 6ece539a3f..a07de32d64 100644 --- a/crates/bitcoin-da/src/test_utils.rs +++ b/crates/bitcoin-da/src/test_utils.rs @@ -1,7 +1,9 @@ //! This module provides the implementation for sending separate chunk transactions with a specified fee rate. use bitcoin::hashes::Hash; -use sov_rollup_interface::da::{DaTxRequest, DataOnDa}; +use sov_rollup_interface::da::DataOnDa; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; +use uuid::Uuid; use crate::error::BitcoinServiceError; use crate::helpers::builders::body_builders::{DaTxs, RawTxData}; @@ -11,6 +13,13 @@ use crate::helpers::builders::test_utils::{ use crate::service::{split_proof, BitcoinService, Result}; impl BitcoinService { + /// Send a transaction to da and wait until its completion + pub async fn send_transaction_and_wait(&self, tx_request: DaTxRequest) -> Result { + let (job_id, rx) = self.send_transaction(tx_request).await?; + rx.await??; + Ok(job_id) + } + /// Sends chunks and aggregate as if they are of a Complete kind. pub async fn test_send_separate_chunk_transaction_with_fee_rate( &self, @@ -21,6 +30,7 @@ impl BitcoinService { let da_private_key = self.da_private_key.expect("No private key set"); + let sent_txids = Default::default(); match tx_request { DaTxRequest::ZKProof(zkproof) => { let mut txids = vec![]; @@ -33,7 +43,7 @@ impl BitcoinService { RawTxData::Chunks(chunks) => { for body in chunks { // get all available utxos that are not already spent - let utxos = self.utxo_manager.get_available_utxos().await?; + let utxos = self.utxo_manager.get_available_utxos(&sent_txids).await?; let utxos = utxos .into_iter() .filter(|utxo| { @@ -76,7 +86,7 @@ impl BitcoinService { } .unwrap(); - let signed_txs = self.tx_signer.sign_da_txs(da_txs).await?; + let signed_txs = self.tx_signer.sign_da_txs(da_txs, 0).await?; reveal_chunks.push((txid, wtxid)); @@ -92,7 +102,7 @@ impl BitcoinService { borsh::to_vec(&aggregate).expect("Aggregate serialize must not fail"); // get all available utxos that are not already spent - let utxos = self.utxo_manager.get_available_utxos().await?; + let utxos = self.utxo_manager.get_available_utxos(&sent_txids).await?; let utxos = utxos .into_iter() .filter(|utxo| utxo.amount >= 50 * 10_u64.pow(8)) @@ -119,7 +129,7 @@ impl BitcoinService { ) .unwrap(); - let signed_txs = self.tx_signer.sign_da_txs(da_txs).await?; + let signed_txs = self.tx_signer.sign_da_txs(da_txs, 0).await?; txids.extend(self.send_signed_transaction(&signed_txs[0]).await?); } diff --git a/crates/bitcoin-da/src/tx_signer.rs b/crates/bitcoin-da/src/tx_signer.rs index cce86ce765..7ed87759f4 100644 --- a/crates/bitcoin-da/src/tx_signer.rs +++ b/crates/bitcoin-da/src/tx_signer.rs @@ -7,6 +7,7 @@ use bitcoin::consensus::encode; use bitcoin::{Transaction, Txid}; use bitcoincore_rpc::json::SignRawTransactionInput; use bitcoincore_rpc::{Client, RpcApi}; +use serde::{Deserialize, Serialize}; use tracing::trace; use crate::error::BitcoinServiceError; @@ -16,7 +17,7 @@ use crate::helpers::TransactionKind; pub(crate) type Result = std::result::Result; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct SignedTxWithId { hex: Vec, pub tx: Transaction, @@ -24,7 +25,7 @@ pub(crate) struct SignedTxWithId { } /// Pair of commit/reveal signed transactions -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct SignedTxPair { pub commit: SignedTxWithId, pub reveal: SignedTxWithId, @@ -70,7 +71,11 @@ impl TxSigner { Self { client } } - pub(crate) async fn sign_da_txs(&self, da_txs: DaTxs) -> Result> { + pub(crate) async fn sign_da_txs( + &self, + da_txs: DaTxs, + current_idx: usize, + ) -> Result> { let queued_txs = match da_txs { DaTxs::Complete { commit, reveal } => { vec![ @@ -104,8 +109,14 @@ impl TxSigner { commit, reveal, } => { - self.sign_chunked_transaction(commit_chunks, reveal_chunks, commit, reveal) - .await? + self.sign_chunked_transaction( + commit_chunks, + reveal_chunks, + commit, + reveal, + current_idx, + ) + .await? } }; @@ -153,6 +164,7 @@ impl TxSigner { reveal_chunks: Vec, commit: Transaction, reveal: TxWithId, + current_idx: usize, ) -> Result> { assert!(!commit_chunks.is_empty(), "Received empty chunks"); assert_eq!( @@ -180,7 +192,11 @@ impl TxSigner { let mut raw_txs = Vec::with_capacity(all_tx_map.len()); - for (commit, reveal) in commit_chunks.into_iter().zip(reveal_chunks) { + for (commit, reveal) in commit_chunks + .into_iter() + .zip(reveal_chunks) + .skip(current_idx) + { let mut inputs = vec![]; for input in commit.input.iter() { diff --git a/crates/bitcoin-da/src/utxo_manager.rs b/crates/bitcoin-da/src/utxo_manager.rs index aa33d181b1..385d7bfb98 100644 --- a/crates/bitcoin-da/src/utxo_manager.rs +++ b/crates/bitcoin-da/src/utxo_manager.rs @@ -4,21 +4,20 @@ //! - Chained: Sequential transaction chains //! - Oldest: Parallel chains using most-confirmed UTXOs -use std::collections::VecDeque; +use std::collections::HashSet; use std::sync::Arc; -use bitcoin::Amount; +use bitcoin::{Amount, Txid}; use bitcoincore_rpc::json::ListUnspentResultEntry; use bitcoincore_rpc::{Client, RpcApi}; use serde::{Deserialize, Serialize}; -use tokio::sync::Mutex; +use sov_db::schema::types::da_jobs::DaJobStatus; use crate::error::BitcoinServiceError; use crate::monitoring::MonitoringService; use crate::network_constants::NetworkConstants; use crate::service::Result; use crate::spec::utxo::UTXO; -use crate::tx_signer::SignedTxPair; use crate::REVEAL_OUTPUT_AMOUNT; /// UTXO selection strategy when queue has pending transactions. @@ -55,7 +54,6 @@ pub struct UtxoContext { pub(crate) struct UtxoManager { client: Arc, monitoring: Arc, - tx_queue: Arc>>, network_constants: NetworkConstants, pub mode: UtxoSelectionMode, } @@ -64,7 +62,6 @@ impl UtxoManager { pub fn new( client: Arc, monitoring: Arc, - tx_queue: Arc>>, network_constants: NetworkConstants, mode: UtxoSelectionMode, ) -> Self { @@ -73,14 +70,25 @@ impl UtxoManager { monitoring, network_constants, mode, - tx_queue, } } /// Returns filtered UTXOs and `prev_utxo`. - pub async fn prepare_context(&self) -> Result { - let available_utxos = self.get_available_utxos().await?; - let prev_utxo = self.select_prev_utxo(&available_utxos).await?; + pub async fn prepare_context( + &self, + job_status: &DaJobStatus, + previous_job_in_progress: bool, + sent_txids: &HashSet, + ) -> Result { + let available_utxos = self.get_available_utxos(sent_txids).await?; + + let prev_utxo = match job_status { + DaJobStatus::InProgress => None, // Will use previous reveal utxo in create_inscription_type_1 + _ => { + self.select_prev_utxo(&available_utxos, previous_job_in_progress) + .await? + } + }; Ok(UtxoContext { available_utxos, @@ -94,28 +102,28 @@ impl UtxoManager { /// If queue has pending txs: /// - Chained mode: returns Err(BitcoinServiceError::QueueNotEmpty) /// - Oldest mode: uses UTXO with highest number of confirmation to start new chain - pub(crate) async fn select_prev_utxo(&self, available_utxos: &[UTXO]) -> Result> { + pub(crate) async fn select_prev_utxo( + &self, + available_utxos: &[UTXO], + previous_job_in_progress: bool, + ) -> Result> { let prev_utxo = self.get_prev_utxo().await; - if self.tx_queue.lock().await.is_empty() { + if !previous_job_in_progress { return Ok(prev_utxo); } match self.mode { UtxoSelectionMode::Chained => { // Prevent UTXO conflicts when queue is not empty and running UtxoSelectionMode::Chained mode - Err(BitcoinServiceError::QueueNotEmpty) + Err(BitcoinServiceError::PreviousJobInProgress) } - UtxoSelectionMode::Oldest => Ok(if prev_utxo.is_some() { - // Latest monitored TX has been successfully accepted to mempool and can be used as starting point for another utxo chain - prev_utxo - } else { - // Latest monitored TX has `Queued` status and internal `get_tx_out` errors. + UtxoSelectionMode::Oldest => Ok( // Get UTXO with most confirmations to start new chain available_utxos .iter() .max_by_key(|utxo| utxo.confirmations) - .cloned() - }), + .cloned(), + ), } } @@ -134,7 +142,10 @@ impl UtxoManager { } /// Gets available UTXOs from `list_unspent` RPC, and filter by mode. - pub(crate) async fn get_available_utxos(&self) -> Result> { + pub(crate) async fn get_available_utxos( + &self, + sent_txids: &HashSet, + ) -> Result> { let utxos = self .client .list_unspent(Some(0), None, None, None, None) @@ -145,7 +156,7 @@ impl UtxoManager { let filtered_utxos = match self.mode { UtxoSelectionMode::Chained => self.chained_mode_filter(utxos).await, - UtxoSelectionMode::Oldest => self.oldest_mode_filter(utxos).await, + UtxoSelectionMode::Oldest => self.oldest_mode_filter(utxos, sent_txids).await, }; if filtered_utxos.is_empty() { @@ -176,21 +187,11 @@ impl UtxoManager { } /// Filters UTXOs for Oldest mode. - async fn oldest_mode_filter(&self, utxos: Vec) -> Vec { - let txids = self - .tx_queue - .lock() - .await - .iter() - .flat_map(|tx| { - tx.commit - .tx - .input - .iter() - .map(|input| input.previous_output.txid) - }) - .collect::>(); - + async fn oldest_mode_filter( + &self, + utxos: Vec, + sent_txids: &HashSet, + ) -> Vec { // When running in UtxoSelectionMode::Oldest, we're creating multiple utxos chain in parallel // to be able to send multiple proofs in the same block without hitting mempool policy limits. // To make sure there are no conflicts between parallel utxos chain, @@ -203,7 +204,7 @@ impl UtxoManager { && utxo.safe && utxo.amount > Amount::from_sat(REVEAL_OUTPUT_AMOUNT) // Remove utxo already in use by queued txs - && !txids.contains(&utxo.txid) + && !sent_txids.contains(&utxo.txid) // Only keep finalized change output && (utxo.vout == 0 || utxo.confirmations as u64 >= self.network_constants.finality_depth) diff --git a/crates/common/src/rpc/server.rs b/crates/common/src/rpc/server.rs index 3659ef7ec7..ad52955514 100644 --- a/crates/common/src/rpc/server.rs +++ b/crates/common/src/rpc/server.rs @@ -66,6 +66,7 @@ pub fn start_rpc_server( return; } }; + if let Some(channel) = channel { if let Err(e) = channel.send(bound_address) { error!("Could not send bound_address {}: {}", bound_address, e); diff --git a/crates/prover-services/src/parallel.rs b/crates/prover-services/src/parallel.rs index 1cda449ac9..e522cc6bcf 100644 --- a/crates/prover-services/src/parallel.rs +++ b/crates/prover-services/src/parallel.rs @@ -3,8 +3,7 @@ use std::time::Instant; use anyhow::anyhow; use rand::Rng; -use sov_rollup_interface::da::DaTxRequest; -use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use sov_rollup_interface::zk::{Proof, ProofWithJob, ReceiptType, ZkvmHost}; use tokio::sync::{oneshot, OwnedSemaphorePermit, Semaphore}; use tracing::{debug, error, info, instrument, warn}; @@ -13,6 +12,9 @@ use uuid::Uuid; use crate::metrics::PARALLEL_PROVER_METRICS; use crate::{ProofData, ProofGenMode, ProofWithDuration}; +type DaJobWaiter = + oneshot::Receiver::TransactionId, ::Error>>; + /// Prover service capable of invoking the zkVM proving sessions in parallel. pub struct ParallelProverService where @@ -207,12 +209,8 @@ where } /// Submits the zk proof to the DA service, returning transaction id. - #[instrument(name = "ParallelProverService", skip_all, fields(job_id = _job_id.to_string()))] - pub async fn submit_proof( - &self, - proof: Proof, - _job_id: Uuid, - ) -> anyhow::Result<::TransactionId> { + #[instrument(name = "ParallelProverService", skip_all)] + pub async fn submit_proof(&self, proof: Proof) -> anyhow::Result<(Uuid, DaJobWaiter)> { let tx_request = DaTxRequest::ZKProof(proof); info!("Submitting proof to DA service"); self.da_service @@ -221,16 +219,26 @@ where .map_err(|e| anyhow::anyhow!(e)) } - // Only used in tests - pub async fn submit_proofs( + /// Submits the zk proof by id to the DA service, returning transaction id. + #[instrument(name = "ParallelProverService", skip_all)] + pub async fn submit_proof_by_id( &self, - proofs: Vec, - ) -> anyhow::Result::TransactionId, Proof)>> { + proof_id: Uuid, + ) -> anyhow::Result<(Uuid, DaJobWaiter)> { + let tx_request = DaTxRequest::StoredProof(proof_id); + info!("Submitting proof to DA service"); + self.da_service + .send_transaction(tx_request) + .await + .map_err(|e| anyhow::anyhow!(e)) + } + + // Only used in tests + pub async fn submit_proofs(&self, proofs: Vec) -> anyhow::Result> { let mut tx_and_proof = Vec::with_capacity(proofs.len()); - let job_id = Uuid::nil(); for proof in proofs { - let tx_id = self.submit_proof(proof.clone(), job_id).await?; - tx_and_proof.push((tx_id, proof)); + self.submit_proof(proof.clone()).await?; + tx_and_proof.push(proof); } Ok(tx_and_proof) } @@ -240,6 +248,14 @@ where let vm = self.vm.clone(); vm.start_session_recovery() } + + /// Used for recovery + pub async fn get_existing_da_job_waiter( + &self, + da_job_id: Uuid, + ) -> Result, ::Error> { + self.da_service.recover_existing_job_waiter(da_job_id).await + } } /// Runs the zkVM proving session. Decides on whether to produce a real proof or a fake proof based on the proof mode. diff --git a/crates/prover-services/tests/prover_tests.rs b/crates/prover-services/tests/prover_tests.rs index fcc8fe3fa7..a3d702b6b8 100644 --- a/crates/prover-services/tests/prover_tests.rs +++ b/crates/prover-services/tests/prover_tests.rs @@ -25,7 +25,7 @@ async fn test_successful_prover_execution() { let header_hash = MockHash::from([0; 32]); // Spawn mock proving in the background - let (id, rx) = start_proof(&prover_service, header_hash).await; + let (_, rx) = start_proof(&prover_service, header_hash).await; // Signal finish to 1st proof assert!(vm.finish_next_proof()); @@ -36,7 +36,7 @@ async fn test_successful_prover_execution() { let hash_from_proof = extract_output_header(&proof.proof); assert_eq!(hash_from_proof, header_hash); - prover_service.submit_proof(proof.proof, id).await.unwrap(); + prover_service.submit_proof(proof.proof).await.unwrap(); } #[tokio::test(flavor = "multi_thread")] diff --git a/crates/sequencer/src/commitment/service.rs b/crates/sequencer/src/commitment/service.rs index 9abdbc76bc..6d204d44d8 100644 --- a/crates/sequencer/src/commitment/service.rs +++ b/crates/sequencer/src/commitment/service.rs @@ -14,11 +14,11 @@ use sov_db::ledger_db::SequencerLedgerOps; use sov_db::schema::types::L2BlockNumber; use sov_modules_api::WorkingSet; use sov_prover_storage_manager::ProverStorageManager; -use sov_rollup_interface::da::{BlockHeaderTrait, DaTxRequest, SequencerCommitment}; -use sov_rollup_interface::services::da::{DaService, TxRequestWithNotifier}; +use sov_rollup_interface::da::{BlockHeaderTrait, SequencerCommitment}; +use sov_rollup_interface::services::da::{DaService, DaTxRequest}; use sov_state::ProverStorage; use tokio::select; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::mpsc; use tracing::{debug, error, info, instrument, warn}; use super::controller::CommitmentController; @@ -220,12 +220,12 @@ where debug!("Sequencer: submitting commitment: {:?}", commitment); let tx_request = DaTxRequest::SequencerCommitment(commitment.clone()); - let (notify, rx) = oneshot::channel(); - let request = TxRequestWithNotifier { tx_request, notify }; - self.da_service - .get_send_transaction_queue() - .send(request) - .map_err(|_| anyhow!("Bitcoin service already stopped!"))?; + + let (da_job_id, rx) = self + .da_service + .send_transaction(tx_request) + .await + .map_err(|e| anyhow!("Failed to submit job to DA {e}"))?; info!( "Sent commitment to DA queue. L2 range: #{}-{}, index: {}", @@ -235,10 +235,9 @@ where let start = Instant::now(); let ledger_db = self.ledger_db.clone(); - let _tx_id = rx + let _txid = rx .await - .map_err(|_| anyhow!("DA service is dead!"))? - .map_err(|_| anyhow!("Send transaction cannot fail"))?; + .map_err(|_| anyhow!("DA notification channel closed"))?; SM.send_commitment_execution.record( Instant::now() @@ -252,7 +251,10 @@ where ledger_db.delete_state_diff_by_range(commitment_range)?; - info!("New commitment. L2 range: #{}-{}", l2_start.0, l2_end.0); + info!( + "New commitment. L2 range: #{}-{}, index: {}, da job id {da_job_id}", + l2_start.0, l2_end.0, commitment.index + ); Ok(()) } diff --git a/crates/sovereign-sdk/adapters/mock-da/Cargo.toml b/crates/sovereign-sdk/adapters/mock-da/Cargo.toml index 8f89e5c0b0..a445cb92a9 100644 --- a/crates/sovereign-sdk/adapters/mock-da/Cargo.toml +++ b/crates/sovereign-sdk/adapters/mock-da/Cargo.toml @@ -22,7 +22,9 @@ tokio = { workspace = true, optional = true } rusqlite = { version = "0.34.0", features = ["bundled"], optional = true } serde_json = { workspace = true, optional = true } tracing = { workspace = true, optional = true, features = ["attributes"]} +uuid = { workspace = true, optional = true } +sov-db = { path = "../../full-node/db/sov-db", optional = true } sov-rollup-interface = { path = "../../rollup-interface" } [dev-dependencies] @@ -35,5 +37,7 @@ native = [ "dep:serde_json", "dep:tokio", "dep:tracing", + "dep:uuid", "sov-rollup-interface/native", + "sov-db" ] diff --git a/crates/sovereign-sdk/adapters/mock-da/src/service.rs b/crates/sovereign-sdk/adapters/mock-da/src/service.rs index 2ddefaa84d..7e3fef181b 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/service.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/service.rs @@ -5,15 +5,16 @@ use std::time::Duration; use async_trait::async_trait; use borsh::BorshDeserialize; use sha2::Digest; +use sov_db::ledger_db::{DaLedgerOps, LedgerDB}; use sov_rollup_interface::da::{ - BlobReaderTrait, BlockHeaderTrait, DaSpec, DaTxRequest, DataOnDa, SequencerCommitment, Time, + BlobReaderTrait, BlockHeaderTrait, DaSpec, DataOnDa, SequencerCommitment, Time, }; -use sov_rollup_interface::services::da::{DaService, SlotData, TxRequestWithNotifier}; +use sov_rollup_interface::services::da::{DaService, DaTxRequest, SlotData}; use sov_rollup_interface::zk::Proof; -use tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; -use tokio::sync::{broadcast, Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; +use tokio::sync::{broadcast, oneshot, Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; use tokio::time; use tracing::instrument::Instrument; +use uuid::Uuid; use crate::db_connector::DbConnector; use crate::types::{MockAddress, MockBlob, MockBlock, MockDaVerifier}; @@ -76,6 +77,7 @@ pub struct MockDaService { finalized_header_sender: broadcast::Sender, wait_attempts: usize, planned_fork: Arc>>, + ledger_db: Option, } impl MockDaService { @@ -84,6 +86,18 @@ impl MockDaService { Self::with_finality(sequencer_da_address, 0, db_path) } + /// Creates a new [`MockDaService`] with instant finality and access to LedgerDB for stored proof related functionalities. + pub fn new_with_ledger_db( + sequencer_da_address: MockAddress, + db_path: &Path, + ledger_db: LedgerDB, + ) -> Self { + let mut service = Self::with_finality(sequencer_da_address, 0, db_path); + + service.ledger_db = Some(ledger_db); + service + } + /// Create a new [`MockDaService`] with given finality. #[tracing::instrument(name = "MockDA")] pub fn with_finality( @@ -106,6 +120,7 @@ impl MockDaService { finalized_header_sender: tx, wait_attempts: 100_0000, planned_fork: Arc::new(Mutex::new(None)), + ledger_db: None, } } @@ -430,13 +445,28 @@ impl DaService for MockDaService { async fn send_transaction( &self, tx_request: DaTxRequest, - ) -> Result { + ) -> Result< + ( + Uuid, + oneshot::Receiver>, + ), + Self::Error, + > { let blob = match tx_request { DaTxRequest::ZKProof(proof) => { tracing::debug!("Adding a zkproof"); let req = DataOnDa::Complete(proof); borsh::to_vec(&req).unwrap() } + DaTxRequest::StoredProof(proof_id) => { + let proof = self + .ledger_db + .as_ref() + .unwrap() + .get_proof_by_proof_id(proof_id)?; + let req = DataOnDa::Complete(proof); + borsh::to_vec(&req).unwrap() + } DaTxRequest::SequencerCommitment(seq_comm) => { tracing::debug!("Adding a sequencer commitment"); let req = DataOnDa::SequencerCommitment(seq_comm); @@ -450,21 +480,10 @@ impl DaService for MockDaService { }; let blocks = self.blocks.lock().await; let _ = self.add_blob(&blocks, blob, Default::default())?; - Ok(MockHash([0; 32])) - } + let (tx, rx) = oneshot::channel(); - fn get_send_transaction_queue( - &self, - ) -> UnboundedSender> { - let (tx, mut rx) = unbounded_channel::>(); - let this = self.clone(); - tokio::spawn(async move { - while let Some(req) = rx.recv().await { - let res = this.send_transaction(req.tx_request).await; - let _ = req.notify.send(res); - } - }); - tx + let _ = tx.send(Ok(MockHash([0; 32]))); + Ok((Uuid::nil(), rx)) } async fn get_fee_rate(&self) -> Result { @@ -500,6 +519,13 @@ impl DaService for MockDaService { height: block.header.height, } } + + async fn recover_existing_job_waiter( + &self, + _job_id: Uuid, + ) -> Result>, Self::Error> { + unimplemented!() + } } fn hash_to_array(bytes: &[u8]) -> [u8; 32] { @@ -583,8 +609,8 @@ mod tests { let block_3_before = da.get_block_at(3).await.unwrap(); // Disabling this check because our modified mock da creates blocks when a transaction is sent - // let result = da.get_block_at(4).await; - // assert!(result.is_err()); + let result = da.get_block_at(4).await; + assert!(result.is_err()); let block_1_after = da.get_block_at(1).await.unwrap(); let block_2_after = da.get_block_at(2).await.unwrap(); @@ -613,6 +639,8 @@ mod tests { } mod reo4g_control { + use sov_rollup_interface::services::da::DaTxRequest; + use super::*; use crate::{MockAddress, MockDaService}; diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs index a7c0b4ccec..c9c6834fc6 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs @@ -2,10 +2,12 @@ use std::ops::RangeInclusive; use std::path::Path; use std::sync::Arc; +use anyhow::Context; use rocksdb::{ReadOptions, WriteBatch}; use sov_rollup_interface::block::L2Block; use sov_rollup_interface::da::SequencerCommitment; use sov_rollup_interface::fork::{Fork, ForkMigration}; +use sov_rollup_interface::services::da::DaTxRequest; use sov_rollup_interface::stf::StateDiff; use sov_rollup_interface::zk::{Proof, ProvingSessionInfo, StorageRootHash}; use sov_schema_db::{ScanDirection, Schema, SchemaBatch, SchemaIterator, SeekKeyEncoder, DB}; @@ -17,6 +19,7 @@ use crate::rocks_db_config::RocksdbConfig; use crate::schema::tables::TestTableNew; use crate::schema::tables::{ CommitmentIndicesByJobId, CommitmentIndicesByL1, CommitmentMerkleRoots, CommitmentsByNumber, + DaJobIdByProvingJobId, DaJobProgressById, DaJobStatusIndex, DaTxRequestByJobId, ExecutedMigrations, JobIdOfCommitment, L2BlockByHash, L2BlockByNumber, L2GenesisStateRoot, L2RangeByL1Height, L2StatusHeights, LastPrunedBlock, LightClientProofBySlotNumber, MempoolTxs, PendingBonsaiSessionByJobId, PendingBoundlessSessionByJobId, PendingL1SubmissionJobs, @@ -28,6 +31,7 @@ use crate::schema::tables::{ use crate::schema::types::batch_proof::{ StoredBatchProof, StoredBatchProofOutput, StoredVerifiedProof, }; +use crate::schema::types::da_jobs::JobProgress; use crate::schema::types::job_status::JobStatus; use crate::schema::types::l2_block::{StoredL2Block, StoredTransaction}; use crate::schema::types::light_client_proof::{ @@ -740,6 +744,22 @@ impl BatchProverLedgerOps for LedgerDB { JobStatus::Proving } } + + fn set_da_job_id_by_prover_job_id( + &self, + proving_job_id: Uuid, + da_job_id: Uuid, + ) -> anyhow::Result<()> { + let mut schema_batch = SchemaBatch::new(); + + schema_batch.put::(&proving_job_id, &da_job_id)?; + + self.db.write_schemas(schema_batch) + } + + fn get_da_job_id_by_prover_job_id(&self, proving_job_id: Uuid) -> anyhow::Result> { + self.db.get::(&proving_job_id) + } } impl BonsaiLedgerOps for LedgerDB { @@ -1045,3 +1065,84 @@ impl ForkMigration for LedgerDB { Ok(()) } } + +impl DaLedgerOps for LedgerDB { + fn submit_job( + &self, + job_id: Uuid, + da_tx_request: &DaTxRequest, + progress: &JobProgress, + ) -> anyhow::Result<()> { + let mut batch = SchemaBatch::new(); + let status = progress.status.as_u8(); + + batch.put::(&job_id, da_tx_request)?; + batch.put::(&job_id, progress)?; + batch.put::(&(status, job_id), &())?; + + self.db.write_schemas(batch)?; + Ok(()) + } + + fn get_job_request(&self, job_id: &Uuid) -> anyhow::Result> { + self.db.get::(job_id) + } + + fn upsert_progress(&self, progress: &JobProgress) -> anyhow::Result<()> { + let mut batch = SchemaBatch::new(); + + batch.put::(&progress.job_id, progress)?; + + self.db.write_schemas(batch)?; + Ok(()) + } + + fn upsert_progress_new_status( + &self, + progress: &JobProgress, + previous_status: u8, + ) -> anyhow::Result<()> { + let mut batch = SchemaBatch::new(); + + let job_id = progress.job_id; + let new_status = progress.status.as_u8(); + + batch.put::(&job_id, progress)?; + if previous_status != new_status { + batch.delete::(&(previous_status, job_id))?; + batch.put::(&(new_status, job_id), &())?; + } + + self.db.write_schemas(batch)?; + Ok(()) + } + + fn get_progress(&self, job_id: &Uuid) -> anyhow::Result> { + self.db.get::(job_id) + } + + fn get_job_ids_by_status(&self, status: u8) -> anyhow::Result> { + let mut iter = self.db.iter::()?; + + iter.seek(&(status, Uuid::nil()))?; + + let mut job_ids = Vec::new(); + for item in iter { + let ((item_status, job_id), _) = item?.into_tuple(); + + if item_status != status { + break; + } + + job_ids.push(job_id); + } + Ok(job_ids) + } + + fn get_proof_by_proof_id(&self, proof_id: Uuid) -> anyhow::Result> { + self.db + .get::(&proof_id)? + .map(|stored_batch_proof| stored_batch_proof.proof) + .context("Failed to retrieve proof by id") + } +} diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs index 6b01542d06..6ff613dfaa 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/traits.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use anyhow::Result; use sov_rollup_interface::block::L2Block; use sov_rollup_interface::da::SequencerCommitment; +use sov_rollup_interface::services::da::DaTxRequest; use sov_rollup_interface::stf::StateDiff; use sov_rollup_interface::zk::{Proof, ProvingSessionInfo, StorageRootHash}; use sov_schema_db::SchemaIterator; @@ -12,6 +13,7 @@ use uuid::Uuid; use crate::schema::tables::{PendingProofs, PendingSequencerCommitments}; use crate::schema::types::batch_proof::{StoredBatchProof, StoredBatchProofOutput}; +use crate::schema::types::da_jobs::JobProgress; use crate::schema::types::job_status::JobStatus; use crate::schema::types::l2_block::StoredL2Block; use crate::schema::types::light_client_proof::{ @@ -285,6 +287,12 @@ pub trait BatchProverLedgerOps: SharedLedgerOps + Send + Sync { /// Get job status (non-existent job IS RUNNING) fn job_status(&self, id: Uuid) -> JobStatus; + + /// Set a da job_id by prover job_id + fn set_da_job_id_by_prover_job_id(&self, proving_job_id: Uuid, da_job_id: Uuid) -> Result<()>; + + /// Get da job_id by prover job_id + fn get_da_job_id_by_prover_job_id(&self, proving_job_id: Uuid) -> Result>; } /// Light client prover ledger operations @@ -363,6 +371,36 @@ pub trait SequencerLedgerOps: SharedLedgerOps { fn get_mempool_txs(&self) -> anyhow::Result, Vec)>>; } +/// Bitcoin da ledger operations +pub trait DaLedgerOps { + /// Store a job to db + fn submit_job( + &self, + job_id: Uuid, + job: &DaTxRequest, + progress: &JobProgress, + ) -> anyhow::Result<()>; + + /// Get a DA job request by id + fn get_job_request(&self, job_id: &Uuid) -> Result>; + + /// Upsert a DA job progress + fn upsert_progress(&self, progress: &JobProgress) -> Result<()>; + + /// Upsert a DA job progress with a status change + fn upsert_progress_new_status(&self, progress: &JobProgress, previous_status: u8) + -> Result<()>; + + /// Get a DA job progress by id + fn get_progress(&self, job_id: &Uuid) -> Result>; + + /// Get all job ids for a specific status + fn get_job_ids_by_status(&self, status: u8) -> Result>; + + /// Get stored proof by proof_id + fn get_proof_by_proof_id(&self, proof_id: Uuid) -> Result>; +} + /// Test ledger operations #[cfg(test)] pub trait TestLedgerOps { diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs index 610c44a815..4c365fee57 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs @@ -13,6 +13,7 @@ use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use jmt::storage::{NibblePath, Node, NodeKey, StaleNodeIndex}; use jmt::Version; use sov_rollup_interface::da::SequencerCommitment; +use sov_rollup_interface::services::da::DaTxRequest; use sov_rollup_interface::stf::StateDiff; use sov_rollup_interface::zk::{Proof, ProvingSessionInfo}; use sov_schema_db::schema::{KeyDecoder, KeyEncoder, ValueCodec}; @@ -20,6 +21,7 @@ use sov_schema_db::{CodecError, SeekKeyEncoder}; use uuid::Uuid; use super::types::batch_proof::{StoredBatchProof, StoredVerifiedProof}; +use super::types::da_jobs::JobProgress; use super::types::l2_block::StoredL2Block; use super::types::light_client_proof::StoredLightClientProof; use super::types::{ @@ -43,6 +45,9 @@ pub const STATE_TABLES: &[&str] = &[ /// Note: Please keep the list sorted alphabetically pub const SEQUENCER_LEDGER_TABLES: &[&str] = &[ CommitmentsByNumber::table_name(), + DaTxRequestByJobId::table_name(), + DaJobProgressById::table_name(), + DaJobStatusIndex::table_name(), ExecutedMigrations::table_name(), L2BlockByHash::table_name(), L2BlockByNumber::table_name(), @@ -93,6 +98,10 @@ pub const FULL_NODE_LEDGER_TABLES: &[&str] = &[ pub const BATCH_PROVER_LEDGER_TABLES: &[&str] = &[ CommitmentIndicesByJobId::table_name(), CommitmentIndicesByL1::table_name(), + DaTxRequestByJobId::table_name(), + DaJobIdByProvingJobId::table_name(), + DaJobProgressById::table_name(), + DaJobStatusIndex::table_name(), ExecutedMigrations::table_name(), JobIdOfCommitment::table_name(), L2BlockByHash::table_name(), @@ -146,6 +155,10 @@ pub const LEDGER_TABLES: &[&str] = &[ CommitmentIndicesByL1::table_name(), CommitmentMerkleRoots::table_name(), CommitmentsByNumber::table_name(), + DaTxRequestByJobId::table_name(), + DaJobIdByProvingJobId::table_name(), + DaJobProgressById::table_name(), + DaJobStatusIndex::table_name(), ExecutedMigrations::table_name(), JobIdOfCommitment::table_name(), L2BlockByHash::table_name(), @@ -515,6 +528,26 @@ define_table_with_seek_key_codec!( (PendingProofs) (u32, u32) => (Proof, L1Height) ); +define_table_with_seek_key_codec!( + /// DaTxRequest by uuid + (DaTxRequestByJobId) Uuid => DaTxRequest +); + +define_table_with_seek_key_codec!( + /// Da job progress by uuid + (DaJobProgressById) Uuid => JobProgress +); + +define_table_with_seek_key_codec!( + /// Index by (status, jobid) + (DaJobStatusIndex) (u8, Uuid) => () +); + +define_table_with_seek_key_codec!( + /// DA job id by proving job id + (DaJobIdByProvingJobId) Uuid => Uuid +); + #[cfg(test)] define_table_with_seek_key_codec!( /// Test table old diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs new file mode 100644 index 0000000000..0451cc5b56 --- /dev/null +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/da_jobs.rs @@ -0,0 +1,95 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +/// Unique job id using uuidv7 for ordering by creation time +pub type JobId = Uuid; + +/// Job status representing the current state of transaction processing +#[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize, PartialEq)] +pub enum DaJobStatus { + /// Job is queued and waiting to be processed. + Pending, + /// Job is in progress. None or some its txs have been sent to DA. + InProgress, + /// Job completed successfully. All its txs have been sent to DA. + Completed, + /// Job was cancelled before completion. + Cancelled, + /// Job failed with error. + Failed { + /// Error associated with the failure. + error: String, + }, +} + +impl DaJobStatus { + /// u8 representation of `DaJobStatus` + pub fn as_u8(&self) -> u8 { + match self { + DaJobStatus::Pending => 0, + DaJobStatus::InProgress => 1, + DaJobStatus::Completed => 2, + DaJobStatus::Cancelled => 3, + DaJobStatus::Failed { .. } => 4, + } + } +} + +/// Track sent chunk for partial sending and recovery +#[derive(Debug, Default, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +pub struct SentTxs { + /// Sent commit txids + pub commit: Vec<[u8; 32]>, + /// Sent reveal txids + pub reveal: Vec<[u8; 32]>, +} + +impl SentTxs { + /// Number of sent commit/reveal pair + pub fn count(&self) -> usize { + self.reveal.len() + } + + /// Extend with sent commit and reveal chunks + pub fn extend(&mut self, commits: Vec<[u8; 32]>, reveals: Vec<[u8; 32]>) { + self.commit.extend(commits); + self.reveal.extend(reveals); + } + + /// Return a default SentTxs with empty vectors + pub fn new() -> Self { + Self::default() + } +} + +/// Tracks progress of a job including sent transactions for recovery. +/// +/// This state is persisted to the database and updated as transactions +/// are sent to bitcoin da. +#[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +pub struct JobProgress { + /// Job id as uuidv7 + pub job_id: JobId, + /// Current job status + pub status: DaJobStatus, + /// Sent commit/reveal txs for tracking, partial sending and recovery + pub sent_txs: SentTxs, + /// Last update timestamp + pub last_updated: u64, + /// Last recoverable error message + pub last_error: Option, +} + +impl JobProgress { + /// Creates a new `JobProgress` + pub fn new(job_id: JobId, last_updated: u64) -> Self { + Self { + job_id, + status: DaJobStatus::Pending, + sent_txs: SentTxs::new(), + last_updated, + last_error: None, + } + } +} diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/mod.rs index 604b9852c0..662546c929 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types/mod.rs @@ -3,6 +3,8 @@ use sov_rollup_interface::zk::{Proof, ReceiptType}; /// Batch proof related storage types pub mod batch_proof; +/// DA job related storage types +pub mod da_jobs; /// Job status pub mod job_status; /// L2 block related storage types diff --git a/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs b/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs index f94b83bf01..9c5c538678 100644 --- a/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs +++ b/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs @@ -108,6 +108,7 @@ pub trait RollupBlueprint: Sized + Send + Sync { require_wallet_check: bool, task_manager: TaskExecutor, network: Network, + ledger_db: LedgerDB, ) -> Result, anyhow::Error>; /// Creates instance of [`ProverService`]. diff --git a/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs b/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs index c61391fca4..d95a2969e5 100644 --- a/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs +++ b/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs @@ -1,18 +1,35 @@ //! The da module defines traits used by the full node to interact with the DA layer. +#[cfg(feature = "native")] +use borsh::{BorshDeserialize, BorshSerialize}; use serde::de::DeserializeOwned; use serde::Serialize; #[cfg(feature = "native")] -use tokio::sync::mpsc::UnboundedSender; +use tokio::sync::oneshot::{self, Sender as OneshotSender}; #[cfg(feature = "native")] -use tokio::sync::oneshot::Sender as OneshotSender; +use uuid::Uuid; use crate::da::BlockHeaderTrait; #[cfg(feature = "native")] -use crate::da::{DaSpec, DaTxRequest, DaVerifier, SequencerCommitment}; +use crate::da::{BatchProofMethodId, DaSpec, DaVerifier, SequencerCommitment}; #[cfg(feature = "native")] use crate::zk::Proof; +/// Transaction request to send to the DA queue. +#[cfg(feature = "native")] +#[allow(clippy::large_enum_variant)] +#[derive(Debug, Clone, Eq, PartialEq, BorshDeserialize, BorshSerialize)] +pub enum DaTxRequest { + /// A commitment from the sequencer + SequencerCommitment(SequencerCommitment), + /// Or a zk proof and state diff + ZKProof(Proof), + /// Or a job id for a stored proof + StoredProof(Uuid), + /// Batch proof method id update for light client + BatchProofMethodId(BatchProofMethodId), +} + /// This type represents a queued request to send_transaction #[cfg(feature = "native")] pub struct TxRequestWithNotifier { @@ -107,14 +124,20 @@ pub trait DaService: Send + Sync + 'static { async fn send_transaction( &self, tx_request: DaTxRequest, - ) -> Result; - - /// A tx part of the queue to send transactions in order - fn get_send_transaction_queue( + ) -> Result< + ( + Uuid, + oneshot::Receiver>, + ), + Self::Error, + >; + + /// Recover an ongoing da job sending session + /// Returns the receiver if available + async fn recover_existing_job_waiter( &self, - ) -> UnboundedSender> { - unimplemented!() - } + job_id: Uuid, + ) -> Result>, Self::Error>; /// Returns fee rate per byte on DA layer. async fn get_fee_rate(&self) -> Result; diff --git a/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs b/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs index bdb68e5aa1..a2e60103f8 100644 --- a/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs +++ b/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs @@ -101,18 +101,6 @@ impl core::cmp::Ord for SequencerCommitment { } } -/// Transaction request to send to the DA queue. -#[allow(clippy::large_enum_variant)] -#[derive(Debug, Clone, Eq, PartialEq, BorshDeserialize, BorshSerialize)] -pub enum DaTxRequest { - /// A commitment from the sequencer - SequencerCommitment(SequencerCommitment), - /// Or a zk proof and state diff - ZKProof(Proof), - /// Batch proof method id update for light client - BatchProofMethodId(BatchProofMethodId), -} - /// Data written to DA and read from DA must be the borsh serialization of this enum #[derive(Debug, Clone, Eq, PartialEq, BorshDeserialize, BorshSerialize)] #[allow(clippy::large_enum_variant)] diff --git a/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs b/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs index 65abe59fcf..135a4a8b91 100644 --- a/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs +++ b/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs @@ -28,7 +28,7 @@ pub mod light_client_proof; pub type Proof = Vec; #[cfg(feature = "native")] -#[derive(Debug, Clone, BorshSerialize, BorshDeserialize, Serialize, Deserialize)] +#[derive(Default, Debug, Clone, BorshSerialize, BorshDeserialize, Serialize, Deserialize)] /// Information about a local prover's execution. pub struct LocalProvingSessionInfo { /// Segments count diff --git a/crates/storage-ops/src/rollback/node/batch_prover.rs b/crates/storage-ops/src/rollback/node/batch_prover.rs index a9668836db..74006a5910 100644 --- a/crates/storage-ops/src/rollback/node/batch_prover.rs +++ b/crates/storage-ops/src/rollback/node/batch_prover.rs @@ -2,10 +2,10 @@ use std::collections::HashMap; use std::sync::Arc; use sov_db::schema::tables::{ - CommitmentIndicesByJobId, CommitmentIndicesByL1, JobIdOfCommitment, L2BlockByHash, - L2BlockByNumber, PendingL1SubmissionJobs, ProofByJobId, ProverLastScannedSlot, - ProverPendingCommitments, ProverStateDiffs, SequencerCommitmentByIndex, - ShortHeaderProofBySlotHash, SlotByHash, + CommitmentIndicesByJobId, CommitmentIndicesByL1, DaJobIdByProvingJobId, DaJobProgressById, + DaJobStatusIndex, DaTxRequestByJobId, JobIdOfCommitment, L2BlockByHash, L2BlockByNumber, + PendingL1SubmissionJobs, ProofByJobId, ProverLastScannedSlot, ProverPendingCommitments, + ProverStateDiffs, SequencerCommitmentByIndex, ShortHeaderProofBySlotHash, SlotByHash, }; use sov_db::schema::types::{L2BlockNumber, SlotNumber}; use sov_schema_db::{ScanDirection, SchemaBatch, DB}; @@ -190,6 +190,54 @@ impl BatchProverLedgerRollback { Ok(cache) } + + fn rollback_da_jobs(&self, mut rollback_result: RollbackResult) -> Result { + let mut batch = SchemaBatch::new(); + + // Iterate through all jobs and delete them during rollback + let mut jobs_iter = self.ledger_db.iter_with_direction::( + Default::default(), + ScanDirection::Backward, + )?; + jobs_iter.seek_to_last(); + + for record in jobs_iter { + let record = record?; + let job_id = record.key; + let progress = record.value; + let status_u8 = progress.status.as_u8(); + + // Delete from all DA job tables + batch.delete::(&job_id)?; + increment_table_counter!("DaTxRequestByJobId", rollback_result); + + batch.delete::(&job_id)?; + increment_table_counter!("DaJobProgressById", rollback_result); + + batch.delete::(&(status_u8, job_id))?; + increment_table_counter!("DaJobStatusIndex", rollback_result); + } + + // Delete all entries from DaJobIdByProvingJobId (secondary index table) + let mut proving_job_iter = self + .ledger_db + .iter_with_direction::( + Default::default(), + ScanDirection::Backward, + )?; + proving_job_iter.seek_to_last(); + + for record in proving_job_iter { + let record = record?; + let proving_job_id = record.key; + + batch.delete::(&proving_job_id)?; + increment_table_counter!("DaJobIdByProvingJobId", rollback_result); + } + + self.ledger_db.write_schemas(batch)?; + Ok(rollback_result) + } } impl LedgerNodeRollback for BatchProverLedgerRollback { @@ -213,6 +261,9 @@ impl LedgerNodeRollback for BatchProverLedgerRollback { .put::(&(), &SlotNumber(l1_target)); } + // Rollback DA jobs + rollback_result = self.rollback_da_jobs(rollback_result)?; + let _ = self.ledger_db.flush(); Ok(rollback_result) } diff --git a/crates/storage-ops/src/rollback/node/sequencer.rs b/crates/storage-ops/src/rollback/node/sequencer.rs index 3b778c8e6b..8b6e570423 100644 --- a/crates/storage-ops/src/rollback/node/sequencer.rs +++ b/crates/storage-ops/src/rollback/node/sequencer.rs @@ -1,8 +1,8 @@ use std::sync::Arc; use sov_db::schema::tables::{ - CommitmentsByNumber, L2BlockByHash, L2BlockByNumber, L2RangeByL1Height, - SequencerCommitmentByIndex, StateDiffByBlockNumber, + CommitmentsByNumber, DaJobProgressById, DaJobStatusIndex, DaTxRequestByJobId, L2BlockByHash, + L2BlockByNumber, L2RangeByL1Height, SequencerCommitmentByIndex, StateDiffByBlockNumber, }; use sov_db::schema::types::{L2BlockNumber, SlotNumber}; use sov_schema_db::{ScanDirection, SchemaBatch, DB}; @@ -112,6 +112,37 @@ impl SequencerLedgerRollback { self.ledger_db.write_schemas(batch)?; Ok(rollback_result) } + + fn rollback_da_jobs(&self, mut rollback_result: RollbackResult) -> Result { + let mut batch = SchemaBatch::new(); + + // Iterate through all jobs and delete them during rollback + let mut jobs_iter = self.ledger_db.iter_with_direction::( + Default::default(), + ScanDirection::Backward, + )?; + jobs_iter.seek_to_last(); + + for record in jobs_iter { + let record = record?; + let job_id = record.key; + let progress = record.value; + let status_u8 = progress.status.as_u8(); + + // Delete from all three tables + batch.delete::(&job_id)?; + increment_table_counter!("DaTxRequestByJobId", rollback_result); + + batch.delete::(&job_id)?; + increment_table_counter!("DaJobProgressById", rollback_result); + + batch.delete::(&(status_u8, job_id))?; + increment_table_counter!("DaJobStatusIndex", rollback_result); + } + + self.ledger_db.write_schemas(batch)?; + Ok(rollback_result) + } } impl LedgerNodeRollback for SequencerLedgerRollback { @@ -131,6 +162,9 @@ impl LedgerNodeRollback for SequencerLedgerRollback { rollback_result = self.rollback_slots(l1_target, rollback_result)?; } + // Rollback DA jobs + rollback_result = self.rollback_da_jobs(rollback_result)?; + let _ = self.ledger_db.flush(); Ok(rollback_result)