diff --git a/CHANGELOG.md b/CHANGELOG.md index 7025e83234..4ecfe2e416 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ ## [Unreleased] ### Added - feat: Support `eth_sendRawTransactionSync` on websocket disabled nodes. ([#3164](https://github.com/chainwayxyz/citrea/pull/3164)) +- feat: Stop at height full-node arguments + **New hidden node args:**\ + `-stop_at_l1_height`: Stop the full node after reaching this L1 height + `-stop_at_l2_height`: Stop the full node after reaching this L2 height ### Changed - refactor: Remove `dev_mode` field from local prover config to fix prover config confusion.(This change does not require no env var change because it was set with `RISC0_DEV_MODE` env var, right now dev mode is determined with `PROVING_MODE` in batch prover config) ([#3160](https://github.com/chainwayxyz/citrea/pull/3160)) diff --git a/bin/citrea/src/cli.rs b/bin/citrea/src/cli.rs index 83891da048..bb19611165 100644 --- a/bin/citrea/src/cli.rs +++ b/bin/citrea/src/cli.rs @@ -59,6 +59,24 @@ pub(crate) struct Args { /// Logging verbosity #[arg(long, short = 'q')] pub(crate) quiet: bool, + + /// Stop the full node after reaching this L1 height (debug/testing). + /// Only valid for full nodes. + #[arg( + long, + hide = true, + conflicts_with_all = ["sequencer", "batch_prover", "light_client_prover"] + )] + pub(crate) stop_at_l1_height: Option, + + /// Stop the full node after reaching this L2 height (debug/testing). + /// Only valid for full nodes. + #[arg( + long, + hide = true, + conflicts_with_all = ["sequencer", "batch_prover", "light_client_prover"] + )] + pub(crate) stop_at_l2_height: Option, } pub(crate) enum NodeWithConfig { diff --git a/bin/citrea/src/main.rs b/bin/citrea/src/main.rs index cc9f2cdc79..649ede5a9f 100644 --- a/bin/citrea/src/main.rs +++ b/bin/citrea/src/main.rs @@ -6,7 +6,8 @@ use std::time::Duration; use anyhow::{anyhow, Context as _}; use bitcoin_da::service::BitcoinServiceConfig; use citrea::{ - initialize_logging, BitcoinRollup, CitreaRollupBlueprint, Dependencies, MockDemoRollup, Storage, + initialize_logging, BitcoinRollup, CitreaRollupBlueprint, Dependencies, MockDemoRollup, + StopConditions, Storage, }; use citrea_common::backup::BackupManager; use citrea_common::rpc::server::start_rpc_server; @@ -77,6 +78,11 @@ async fn main() -> anyhow::Result<()> { panic!("RISC0_DEV_MODE is enabled but network is set to Mainnet. Dev mode SHOULD NOT be used on mainnet."); } + let stop_conditions = StopConditions { + stop_at_l1_height: args.stop_at_l1_height, + stop_at_l2_height: args.stop_at_l2_height, + }; + match args.da_layer { SupportedDaLayer::Mock => { start_rollup::( @@ -84,6 +90,7 @@ async fn main() -> anyhow::Result<()> { &GenesisPaths::from_dir(&args.genesis_paths), args.rollup_config_path, node_type, + stop_conditions, ) .await?; } @@ -93,6 +100,7 @@ async fn main() -> anyhow::Result<()> { &GenesisPaths::from_dir(&args.genesis_paths), args.rollup_config_path, node_type, + stop_conditions, ) .await?; } @@ -107,6 +115,7 @@ async fn start_rollup( runtime_genesis_paths: &::DaSpec> as sov_modules_stf_blueprint::Runtime::DaSpec>>::GenesisPaths, rollup_config_path: Option, node_type: NodeWithConfig, + stop_conditions: StopConditions, ) -> Result<(), anyhow::Error> where DaC: serde::de::DeserializeOwned + DebugTrait + Clone + FromEnv + Send + Sync + 'static, @@ -365,6 +374,7 @@ where l2_block_tx, rpc_module, backup_manager, + stop_conditions, ) .await .expect("Could not start full-node"); diff --git a/bin/citrea/src/rollup/mod.rs b/bin/citrea/src/rollup/mod.rs index 74dd385600..d915f8517a 100644 --- a/bin/citrea/src/rollup/mod.rs +++ b/bin/citrea/src/rollup/mod.rs @@ -41,6 +41,7 @@ use tracing::{debug, info, instrument}; mod bitcoin; mod mock; pub use bitcoin::*; +pub use citrea_fullnode::StopConditions; pub use mock::*; type GenesisParams = StfGenesisParams< @@ -251,6 +252,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { l2_block_tx: broadcast::Sender, rpc_module: RpcModule<()>, backup_manager: Arc, + stop_conditions: StopConditions, ) -> Result<( FullNodeL2Syncer, FullNodeL1BlockHandler, @@ -290,6 +292,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { code_commitments, rpc_module, backup_manager, + stop_conditions, ) } diff --git a/bin/citrea/tests/common/helpers.rs b/bin/citrea/tests/common/helpers.rs index 5d00ec61a9..e3fd34eaed 100644 --- a/bin/citrea/tests/common/helpers.rs +++ b/bin/citrea/tests/common/helpers.rs @@ -5,7 +5,7 @@ use std::time::{Duration, Instant, SystemTime}; use anyhow::bail; use borsh::BorshDeserialize; -use citrea::{CitreaRollupBlueprint, Dependencies, MockDemoRollup, Storage}; +use citrea::{CitreaRollupBlueprint, Dependencies, MockDemoRollup, StopConditions, Storage}; use citrea_common::backup::BackupManager; use citrea_common::rpc::server::start_rpc_server; use citrea_common::rpc::{register_healthcheck_rpc, register_healthcheck_rpc_light_client_prover}; @@ -397,6 +397,7 @@ pub async fn start_rollup( l2_block_tx, rpc_module, backup_manager, + StopConditions::default(), ) .instrument(span.clone()) .await diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs index 6fe434380e..c815cb91a6 100644 --- a/crates/common/src/utils.rs +++ b/crates/common/src/utils.rs @@ -151,6 +151,11 @@ pub fn shutdown_requested(shutdown_signal: &GracefulShutdown) -> bool { .is_some() } +/// Returns the configured stop height if `next_height` exceeds it. +pub fn exceeded_stop_height(next_height: u64, stop_height: Option) -> Option { + stop_height.filter(|target| next_height > *target) +} + // If tangerine activation height is 0, return 1 // Because in tests when the first l2 block for the first sequencer commitment is needed // Tangerine activation height should be sent diff --git a/crates/ethereum-rpc/src/subscription.rs b/crates/ethereum-rpc/src/subscription.rs index b21ac3875f..b216f6be0f 100644 --- a/crates/ethereum-rpc/src/subscription.rs +++ b/crates/ethereum-rpc/src/subscription.rs @@ -11,7 +11,7 @@ use sov_modules_api::WorkingSet; use tokio::sync::broadcast; use tokio::sync::broadcast::error::RecvError; use tokio::task::JoinHandle; -use tracing::warn; +use tracing::{debug, warn}; const SUBSCRIPTION_TIMEOUT: Duration = Duration::from_secs(1); @@ -162,7 +162,7 @@ async fn l2_block_event_handler( continue; } Err(RecvError::Closed) => { - warn!(target: "subscriptions", "l2_block_rx is closed"); + debug!(target: "subscriptions", "l2_block_rx is closed"); break; } Ok(height) => height, diff --git a/crates/fullnode/src/da_block_handler.rs b/crates/fullnode/src/da_block_handler.rs index b4c85700d1..6ffb66a41e 100644 --- a/crates/fullnode/src/da_block_handler.rs +++ b/crates/fullnode/src/da_block_handler.rs @@ -8,11 +8,13 @@ use std::collections::{HashMap, VecDeque}; use std::sync::Arc; use std::time::{Duration, Instant}; -use anyhow::anyhow; +use anyhow::{anyhow, bail}; use citrea_common::backup::BackupManager; use citrea_common::cache::L1BlockCache; use citrea_common::da::{extract_zk_proofs_and_sequencer_commitments, sync_l1, ProofOrCommitment}; -use citrea_common::utils::{get_tangerine_activation_height_non_zero, shutdown_requested}; +use citrea_common::utils::{ + exceeded_stop_height, get_tangerine_activation_height_non_zero, shutdown_requested, +}; use citrea_primitives::forks::fork_from_block_number; use citrea_primitives::network_to_dev_mode; use reth_tasks::shutdown::GracefulShutdown; @@ -77,6 +79,8 @@ where backup_manager: Arc, /// Citrea network the node is operating on network: Network, + /// Optional L1 height at which to stop syncing (debug/testing) + stop_at_l1_height: Option, } impl L1BlockHandler @@ -95,6 +99,7 @@ where /// * `code_commitments_by_spec` - Map of ZKVM code commitments /// * `l1_block_cache` - Cache for L1 block data /// * `backup_manager` - Manager for backup operations + /// * `stop_at_l1_height` - Optional L1 height at which to stop syncing (debug/testing) #[allow(clippy::too_many_arguments)] pub fn new( network: Network, @@ -105,6 +110,7 @@ where code_commitments_by_spec: HashMap, l1_block_cache: Arc>>, backup_manager: Arc, + stop_at_l1_height: Option, ) -> Self { Self { ledger_db, @@ -116,6 +122,7 @@ where queued_l1_blocks: Arc::new(Mutex::new(VecDeque::new())), backup_manager, network, + stop_at_l1_height, } } @@ -176,6 +183,13 @@ where let Some(l1_block) = self.queued_l1_blocks.lock().await.front().cloned() else { break; }; + + if let Some(stop_height) = + exceeded_stop_height(l1_block.header().height(), self.stop_at_l1_height) + { + bail!("Reached target L1 height {stop_height}"); + } + self.process_l1_block(l1_block).await?; self.queued_l1_blocks.lock().await.pop_front(); } diff --git a/crates/fullnode/src/l2_syncer.rs b/crates/fullnode/src/l2_syncer.rs index 6f3d51785e..18190898a5 100644 --- a/crates/fullnode/src/l2_syncer.rs +++ b/crates/fullnode/src/l2_syncer.rs @@ -11,7 +11,7 @@ use borsh::BorshDeserialize; use citrea_common::backup::BackupManager; use citrea_common::cache::L1BlockCache; use citrea_common::l2::{apply_l2_block, commit_l2_block, sync_l2}; -use citrea_common::utils::shutdown_requested; +use citrea_common::utils::{exceeded_stop_height, shutdown_requested}; use citrea_primitives::types::L2BlockHash; use citrea_stf::runtime::CitreaRuntime; use jsonrpsee::http_client::{HttpClient, HttpClientBuilder}; @@ -74,6 +74,8 @@ where l2_block_tx: broadcast::Sender, /// Manager for backup operations backup_manager: Arc, + /// Optional L2 height at which to stop syncing (debug/testing) + stop_at_l2_height: Option, } impl L2Syncer @@ -95,6 +97,7 @@ where /// * `l2_block_tx` - Channel for L2 block notifications /// * `backup_manager` - Manager for backup operations /// * `include_tx_body` - Whether to include transaction bodies in block processing + /// * `stop_at_l2_height` - Optional L2 height at which to stop syncing (debug/testing) #[allow(clippy::too_many_arguments)] pub fn new( runner_config: RunnerConfig, @@ -108,6 +111,7 @@ where l2_block_tx: broadcast::Sender, backup_manager: Arc, include_tx_body: bool, + stop_at_l2_height: Option, ) -> Result { let start_l2_height = ledger_db.get_head_l2_block_height()?.unwrap_or(0) + 1; @@ -130,6 +134,7 @@ where fork_manager, l2_block_tx, backup_manager, + stop_at_l2_height, }) } @@ -156,7 +161,6 @@ where select! { _ = &mut shutdown_signal => { info!("Shutting down L2Syncer"); - l2_rx.close(); return; }, _ = &mut l2_sync_worker => { @@ -166,11 +170,16 @@ where Some(l2_blocks) = l2_rx.recv() => { // While syncing, we'd like to process L2 blocks as they come without any delays. for l2_block in l2_blocks { + if let Some(stop_height) = + exceeded_stop_height(l2_block.header.height.to(), self.stop_at_l2_height) + { + return info!("Reached target L2 height {stop_height}"); + } + let mut backoff = ExponentialBackoff::default(); loop { if shutdown_requested(&shutdown_signal) { info!("Shutting down L2Syncer"); - l2_rx.close(); return; } @@ -184,6 +193,7 @@ where } } } + } }, } diff --git a/crates/fullnode/src/lib.rs b/crates/fullnode/src/lib.rs index c9060c4635..945f6a90bf 100644 --- a/crates/fullnode/src/lib.rs +++ b/crates/fullnode/src/lib.rs @@ -140,6 +140,19 @@ use sov_rollup_interface::zk::ZkvmHost; use sov_rollup_interface::Network; use tokio::sync::{broadcast, Mutex}; +/// Configuration for optional height-based node stopping (debug/testing) +/// +/// When either stop condition is set, the fullnode will gracefully shut down +/// after processing blocks up to and including the specified height. +/// Similar to Bitcoin's `--stopatheight` option. +#[derive(Debug, Clone, Default)] +pub struct StopConditions { + /// Stop the fullnode after processing this L1 block height + pub stop_at_l1_height: Option, + /// Stop the fullnode after processing this L2 block height + pub stop_at_l2_height: Option, +} + /// Module for handling L1 data availability blocks pub mod da_block_handler; /// Module containing database migration definitions @@ -169,6 +182,7 @@ pub mod rpc; /// * `code_commitments` - Map of ZKVM code commitments by spec ID /// * `rpc_module` - RPC module for external communication /// * `backup_manager` - Manager for backup operations +/// * `stop_conditions` - Optional stop conditions for height-based shutdown (debug/testing) /// /// # Type Parameters /// * `DA` - Data availability service type @@ -200,6 +214,7 @@ pub fn build_services( code_commitments: HashMap::CodeCommitment>, rpc_module: RpcModule<()>, backup_manager: Arc, + stop_conditions: StopConditions, ) -> Result<( L2Syncer, L1BlockHandler, @@ -244,6 +259,7 @@ where l2_block_tx, backup_manager.clone(), include_tx_bodies, + stop_conditions.stop_at_l2_height, )?; let l1_block_handler = L1BlockHandler::new( @@ -255,6 +271,7 @@ where code_commitments, Arc::new(Mutex::new(L1BlockCache::new())), backup_manager, + stop_conditions.stop_at_l1_height, ); Ok((l2_syncer, l1_block_handler, pruner, rpc_module))