Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@
## [Unreleased]
### Added
- feat: Support `eth_sendRawTransactionSync` on websocket disabled nodes. ([#3164](https://github.com/chainwayxyz/citrea/pull/3164))
- feat: Stop at height full-node arguments
**New hidden node args:**\
`-stop_at_l1_height`: Stop the full node after reaching this L1 height
`-stop_at_l2_height`: Stop the full node after reaching this L2 height

### Changed
- refactor: Remove `dev_mode` field from local prover config to fix prover config confusion.(This change does not require no env var change because it was set with `RISC0_DEV_MODE` env var, right now dev mode is determined with `PROVING_MODE` in batch prover config) ([#3160](https://github.com/chainwayxyz/citrea/pull/3160))
Expand Down
18 changes: 18 additions & 0 deletions bin/citrea/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,24 @@ pub(crate) struct Args {
/// Logging verbosity
#[arg(long, short = 'q')]
pub(crate) quiet: bool,

/// Stop the full node after reaching this L1 height (debug/testing).
/// Only valid for full nodes.
#[arg(
long,
hide = true,
conflicts_with_all = ["sequencer", "batch_prover", "light_client_prover"]
)]
pub(crate) stop_at_l1_height: Option<u64>,

/// Stop the full node after reaching this L2 height (debug/testing).
/// Only valid for full nodes.
#[arg(
long,
hide = true,
conflicts_with_all = ["sequencer", "batch_prover", "light_client_prover"]
)]
pub(crate) stop_at_l2_height: Option<u64>,
}

pub(crate) enum NodeWithConfig {
Expand Down
12 changes: 11 additions & 1 deletion bin/citrea/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@ use std::time::Duration;
use anyhow::{anyhow, Context as _};
use bitcoin_da::service::BitcoinServiceConfig;
use citrea::{
initialize_logging, BitcoinRollup, CitreaRollupBlueprint, Dependencies, MockDemoRollup, Storage,
initialize_logging, BitcoinRollup, CitreaRollupBlueprint, Dependencies, MockDemoRollup,
StopConditions, Storage,
};
use citrea_common::backup::BackupManager;
use citrea_common::rpc::server::start_rpc_server;
Expand Down Expand Up @@ -77,13 +78,19 @@ async fn main() -> anyhow::Result<()> {
panic!("RISC0_DEV_MODE is enabled but network is set to Mainnet. Dev mode SHOULD NOT be used on mainnet.");
}

let stop_conditions = StopConditions {
stop_at_l1_height: args.stop_at_l1_height,
stop_at_l2_height: args.stop_at_l2_height,
};

match args.da_layer {
SupportedDaLayer::Mock => {
start_rollup::<MockDemoRollup, MockDaConfig>(
network,
&GenesisPaths::from_dir(&args.genesis_paths),
args.rollup_config_path,
node_type,
stop_conditions,
)
.await?;
}
Expand All @@ -93,6 +100,7 @@ async fn main() -> anyhow::Result<()> {
&GenesisPaths::from_dir(&args.genesis_paths),
args.rollup_config_path,
node_type,
stop_conditions,
)
.await?;
}
Expand All @@ -107,6 +115,7 @@ async fn start_rollup<S, DaC>(
runtime_genesis_paths: &<CitreaRuntime<DefaultContext, <S as RollupBlueprint>::DaSpec> as sov_modules_stf_blueprint::Runtime<DefaultContext, <S as RollupBlueprint>::DaSpec>>::GenesisPaths,
rollup_config_path: Option<String>,
node_type: NodeWithConfig,
stop_conditions: StopConditions,
) -> Result<(), anyhow::Error>
where
DaC: serde::de::DeserializeOwned + DebugTrait + Clone + FromEnv + Send + Sync + 'static,
Expand Down Expand Up @@ -365,6 +374,7 @@ where
l2_block_tx,
rpc_module,
backup_manager,
stop_conditions,
)
.await
.expect("Could not start full-node");
Expand Down
3 changes: 3 additions & 0 deletions bin/citrea/src/rollup/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ use tracing::{debug, info, instrument};
mod bitcoin;
mod mock;
pub use bitcoin::*;
pub use citrea_fullnode::StopConditions;
pub use mock::*;

type GenesisParams<T> = StfGenesisParams<
Expand Down Expand Up @@ -251,6 +252,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint {
l2_block_tx: broadcast::Sender<u64>,
rpc_module: RpcModule<()>,
backup_manager: Arc<BackupManager>,
stop_conditions: StopConditions,
) -> Result<(
FullNodeL2Syncer<Self::DaService, LedgerDB>,
FullNodeL1BlockHandler<Self::Vm, Self::DaService, LedgerDB>,
Expand Down Expand Up @@ -290,6 +292,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint {
code_commitments,
rpc_module,
backup_manager,
stop_conditions,
)
}

Expand Down
3 changes: 2 additions & 1 deletion bin/citrea/tests/common/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use std::time::{Duration, Instant, SystemTime};

use anyhow::bail;
use borsh::BorshDeserialize;
use citrea::{CitreaRollupBlueprint, Dependencies, MockDemoRollup, Storage};
use citrea::{CitreaRollupBlueprint, Dependencies, MockDemoRollup, StopConditions, Storage};
use citrea_common::backup::BackupManager;
use citrea_common::rpc::server::start_rpc_server;
use citrea_common::rpc::{register_healthcheck_rpc, register_healthcheck_rpc_light_client_prover};
Expand Down Expand Up @@ -397,6 +397,7 @@ pub async fn start_rollup(
l2_block_tx,
rpc_module,
backup_manager,
StopConditions::default(),
)
.instrument(span.clone())
.await
Expand Down
5 changes: 5 additions & 0 deletions crates/common/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,11 @@ pub fn shutdown_requested(shutdown_signal: &GracefulShutdown) -> bool {
.is_some()
}

/// Returns the configured stop height if `next_height` exceeds it.
pub fn exceeded_stop_height(next_height: u64, stop_height: Option<u64>) -> Option<u64> {
stop_height.filter(|target| next_height > *target)
}

// If tangerine activation height is 0, return 1
// Because in tests when the first l2 block for the first sequencer commitment is needed
// Tangerine activation height should be sent
Expand Down
4 changes: 2 additions & 2 deletions crates/ethereum-rpc/src/subscription.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use sov_modules_api::WorkingSet;
use tokio::sync::broadcast;
use tokio::sync::broadcast::error::RecvError;
use tokio::task::JoinHandle;
use tracing::warn;
use tracing::{debug, warn};

const SUBSCRIPTION_TIMEOUT: Duration = Duration::from_secs(1);

Expand Down Expand Up @@ -162,7 +162,7 @@ async fn l2_block_event_handler<C: sov_modules_api::Context>(
continue;
}
Err(RecvError::Closed) => {
warn!(target: "subscriptions", "l2_block_rx is closed");
debug!(target: "subscriptions", "l2_block_rx is closed");
break;
}
Ok(height) => height,
Expand Down
18 changes: 16 additions & 2 deletions crates/fullnode/src/da_block_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,13 @@ use std::collections::{HashMap, VecDeque};
use std::sync::Arc;
use std::time::{Duration, Instant};

use anyhow::anyhow;
use anyhow::{anyhow, bail};
use citrea_common::backup::BackupManager;
use citrea_common::cache::L1BlockCache;
use citrea_common::da::{extract_zk_proofs_and_sequencer_commitments, sync_l1, ProofOrCommitment};
use citrea_common::utils::{get_tangerine_activation_height_non_zero, shutdown_requested};
use citrea_common::utils::{
exceeded_stop_height, get_tangerine_activation_height_non_zero, shutdown_requested,
};
use citrea_primitives::forks::fork_from_block_number;
use citrea_primitives::network_to_dev_mode;
use reth_tasks::shutdown::GracefulShutdown;
Expand Down Expand Up @@ -77,6 +79,8 @@ where
backup_manager: Arc<BackupManager>,
/// Citrea network the node is operating on
network: Network,
/// Optional L1 height at which to stop syncing (debug/testing)
stop_at_l1_height: Option<u64>,
}

impl<Vm, Da, DB> L1BlockHandler<Vm, Da, DB>
Expand All @@ -95,6 +99,7 @@ where
/// * `code_commitments_by_spec` - Map of ZKVM code commitments
/// * `l1_block_cache` - Cache for L1 block data
/// * `backup_manager` - Manager for backup operations
/// * `stop_at_l1_height` - Optional L1 height at which to stop syncing (debug/testing)
#[allow(clippy::too_many_arguments)]
pub fn new(
network: Network,
Expand All @@ -105,6 +110,7 @@ where
code_commitments_by_spec: HashMap<SpecId, Vm::CodeCommitment>,
l1_block_cache: Arc<Mutex<L1BlockCache<Da>>>,
backup_manager: Arc<BackupManager>,
stop_at_l1_height: Option<u64>,
) -> Self {
Self {
ledger_db,
Expand All @@ -116,6 +122,7 @@ where
queued_l1_blocks: Arc::new(Mutex::new(VecDeque::new())),
backup_manager,
network,
stop_at_l1_height,
}
}

Expand Down Expand Up @@ -176,6 +183,13 @@ where
let Some(l1_block) = self.queued_l1_blocks.lock().await.front().cloned() else {
break;
};

if let Some(stop_height) =
exceeded_stop_height(l1_block.header().height(), self.stop_at_l1_height)
{
bail!("Reached target L1 height {stop_height}");
}

self.process_l1_block(l1_block).await?;
self.queued_l1_blocks.lock().await.pop_front();
}
Expand Down
16 changes: 13 additions & 3 deletions crates/fullnode/src/l2_syncer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use borsh::BorshDeserialize;
use citrea_common::backup::BackupManager;
use citrea_common::cache::L1BlockCache;
use citrea_common::l2::{apply_l2_block, commit_l2_block, sync_l2};
use citrea_common::utils::shutdown_requested;
use citrea_common::utils::{exceeded_stop_height, shutdown_requested};
use citrea_primitives::types::L2BlockHash;
use citrea_stf::runtime::CitreaRuntime;
use jsonrpsee::http_client::{HttpClient, HttpClientBuilder};
Expand Down Expand Up @@ -74,6 +74,8 @@ where
l2_block_tx: broadcast::Sender<u64>,
/// Manager for backup operations
backup_manager: Arc<BackupManager>,
/// Optional L2 height at which to stop syncing (debug/testing)
stop_at_l2_height: Option<u64>,
}

impl<DA, DB> L2Syncer<DA, DB>
Expand All @@ -95,6 +97,7 @@ where
/// * `l2_block_tx` - Channel for L2 block notifications
/// * `backup_manager` - Manager for backup operations
/// * `include_tx_body` - Whether to include transaction bodies in block processing
/// * `stop_at_l2_height` - Optional L2 height at which to stop syncing (debug/testing)
#[allow(clippy::too_many_arguments)]
pub fn new(
runner_config: RunnerConfig,
Expand All @@ -108,6 +111,7 @@ where
l2_block_tx: broadcast::Sender<u64>,
backup_manager: Arc<BackupManager>,
include_tx_body: bool,
stop_at_l2_height: Option<u64>,
) -> Result<Self, anyhow::Error> {
let start_l2_height = ledger_db.get_head_l2_block_height()?.unwrap_or(0) + 1;

Expand All @@ -130,6 +134,7 @@ where
fork_manager,
l2_block_tx,
backup_manager,
stop_at_l2_height,
})
}

Expand All @@ -156,7 +161,6 @@ where
select! {
_ = &mut shutdown_signal => {
info!("Shutting down L2Syncer");
l2_rx.close();
return;
},
_ = &mut l2_sync_worker => {
Expand All @@ -166,11 +170,16 @@ where
Some(l2_blocks) = l2_rx.recv() => {
// While syncing, we'd like to process L2 blocks as they come without any delays.
for l2_block in l2_blocks {
if let Some(stop_height) =
exceeded_stop_height(l2_block.header.height.to(), self.stop_at_l2_height)
{
return info!("Reached target L2 height {stop_height}");
}

let mut backoff = ExponentialBackoff::default();
loop {
if shutdown_requested(&shutdown_signal) {
info!("Shutting down L2Syncer");
l2_rx.close();
return;
}

Expand All @@ -184,6 +193,7 @@ where
}
}
}

}
},
}
Expand Down
17 changes: 17 additions & 0 deletions crates/fullnode/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,19 @@ use sov_rollup_interface::zk::ZkvmHost;
use sov_rollup_interface::Network;
use tokio::sync::{broadcast, Mutex};

/// Configuration for optional height-based node stopping (debug/testing)
///
/// When either stop condition is set, the fullnode will gracefully shut down
/// after processing blocks up to and including the specified height.
/// Similar to Bitcoin's `--stopatheight` option.
#[derive(Debug, Clone, Default)]
pub struct StopConditions {
/// Stop the fullnode after processing this L1 block height
pub stop_at_l1_height: Option<u64>,
/// Stop the fullnode after processing this L2 block height
pub stop_at_l2_height: Option<u64>,
}

/// Module for handling L1 data availability blocks
pub mod da_block_handler;
/// Module containing database migration definitions
Expand Down Expand Up @@ -169,6 +182,7 @@ pub mod rpc;
/// * `code_commitments` - Map of ZKVM code commitments by spec ID
/// * `rpc_module` - RPC module for external communication
/// * `backup_manager` - Manager for backup operations
/// * `stop_conditions` - Optional stop conditions for height-based shutdown (debug/testing)
///
/// # Type Parameters
/// * `DA` - Data availability service type
Expand Down Expand Up @@ -200,6 +214,7 @@ pub fn build_services<DA, DB, Vm>(
code_commitments: HashMap<SpecId, <Vm as Zkvm>::CodeCommitment>,
rpc_module: RpcModule<()>,
backup_manager: Arc<BackupManager>,
stop_conditions: StopConditions,
) -> Result<(
L2Syncer<DA, DB>,
L1BlockHandler<Vm, DA, DB>,
Expand Down Expand Up @@ -244,6 +259,7 @@ where
l2_block_tx,
backup_manager.clone(),
include_tx_bodies,
stop_conditions.stop_at_l2_height,
)?;

let l1_block_handler = L1BlockHandler::new(
Expand All @@ -255,6 +271,7 @@ where
code_commitments,
Arc::new(Mutex::new(L1BlockCache::new())),
backup_manager,
stop_conditions.stop_at_l1_height,
);

Ok((l2_syncer, l1_block_handler, pruner, rpc_module))
Expand Down
Loading