diff --git a/finalizer/src/actor.rs b/finalizer/src/actor.rs index cd23a7d5..45d79946 100644 --- a/finalizer/src/actor.rs +++ b/finalizer/src/actor.rs @@ -378,13 +378,13 @@ impl< // out-of-bounds array access. view + REGISTRY_CHANGE_VIEW_DELTA, //view, - std::mem::take(&mut self.state.added_validators), - std::mem::take(&mut self.state.removed_validators), + &self.state.added_validators, + &self.state.removed_validators, ); let participants = self.registry.peers().clone(); // TODO(matthias): should we wait until view `view + REGISTRY_CHANGE_VIEW_DELTA` // to update the oracle? - self.oracle.register(view, participants).await; + self.oracle.register(new_height, participants).await; } #[cfg(feature = "prom")] @@ -406,6 +406,14 @@ impl< histogram!("database_operations_duration_millis").record(db_operations_duration); } + // Only clear the added and removed validators after saving the state to disk + if !self.state.added_validators.is_empty() { + self.state.added_validators.clear(); + } + if !self.state.removed_validators.is_empty() { + self.state.removed_validators.clear(); + } + #[cfg(debug_assertions)] { let gauge: Gauge = Gauge::default(); diff --git a/node/src/args.rs b/node/src/args.rs index 09acaf7b..8672d005 100644 --- a/node/src/args.rs +++ b/node/src/args.rs @@ -19,6 +19,7 @@ use commonware_codec::ReadExt; use commonware_utils::from_hex_formatted; use futures::{channel::oneshot, future::try_join_all}; use governor::Quota; +use ssz::Decode; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, num::NonZeroU32, @@ -35,6 +36,7 @@ use crate::engine::VALIDATOR_MINIMUM_STAKE; #[cfg(not(any(feature = "bench", feature = "base-bench")))] use summit_types::RethEngineClient; use summit_types::account::{ValidatorAccount, ValidatorStatus}; +use summit_types::checkpoint::Checkpoint; use summit_types::consensus_state::ConsensusState; use summit_types::network_oracle::DiscoveryOracle; use summit_types::{Genesis, PublicKey, utils::get_expanded_path}; @@ -116,6 +118,9 @@ pub struct RunFlags { default_value_t = String::from("./example_genesis.toml") )] pub genesis_path: String, + /// Path to a checkpoint file + #[arg(long)] + pub checkpoint_path: Option, /// IP address for this node (optional, will use genesis if not provided) #[arg(long)] pub ip: Option, @@ -157,6 +162,16 @@ impl Command { console_subscriber::init(); } + let maybe_checkpoint = flags.checkpoint_path.as_ref().map(|path| { + // TODO(matthias): verify the checkpoint + let checkpoint_bytes: Vec = + std::fs::read(path).expect("failed to read checkpoint from disk"); + let checkpoint = + Checkpoint::from_ssz_bytes(&checkpoint_bytes).expect("failed to parse checkpoint"); + ConsensusState::try_from(checkpoint) + .expect("failed to create consensus state from checkpoint") + }); + let store_path = get_expanded_path(&flags.store_path).expect("Invalid store path"); let signer = expect_signer(&flags.key_path); @@ -203,7 +218,11 @@ impl Command { .collect(); committee.sort(); - let initial_state = get_initial_state(&genesis, &committee, None); + let genesis_hash: [u8; 32] = from_hex_formatted(&genesis.eth_genesis_hash) + .map(|hash_bytes| hash_bytes.try_into()) + .expect("bad eth_genesis_hash") + .expect("bad eth_genesis_hash"); + let initial_state = get_initial_state(genesis_hash, &committee, maybe_checkpoint); let mut peers: Vec = initial_state .validator_accounts .iter() @@ -316,7 +335,7 @@ impl Command { } // configure network - let mut p2p_cfg = authenticated::discovery::Config::aggressive( + let mut p2p_cfg = authenticated::discovery::Config::recommended( signer.clone(), genesis.namespace.as_bytes(), SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), flags.port), @@ -331,7 +350,9 @@ impl Command { authenticated::discovery::Network::new(context.with_label("network"), p2p_cfg); // Provide authorized peers - oracle.register(0, peers.clone()).await; + oracle + .register(initial_state.latest_height, peers.clone()) + .await; let oracle = DiscoveryOracle::new(oracle); let config = EngineConfig::get_engine_config( @@ -432,7 +453,11 @@ pub fn run_node_with_runtime( .collect(); committee.sort(); - let initial_state = get_initial_state(&genesis, &committee, checkpoint); + let genesis_hash: [u8; 32] = from_hex_formatted(&genesis.eth_genesis_hash) + .map(|hash_bytes| hash_bytes.try_into()) + .expect("bad eth_genesis_hash") + .expect("bad eth_genesis_hash"); + let initial_state = get_initial_state(genesis_hash, &committee, checkpoint); let mut peers: Vec = initial_state .validator_accounts .iter() @@ -509,6 +534,7 @@ pub fn run_node_with_runtime( } // configure network + #[cfg(feature = "e2e")] let mut p2p_cfg = authenticated::discovery::Config::aggressive( signer.clone(), genesis.namespace.as_bytes(), @@ -517,6 +543,15 @@ pub fn run_node_with_runtime( network_committee, genesis.max_message_size_bytes as usize, ); + #[cfg(not(feature = "e2e"))] + let mut p2p_cfg = authenticated::discovery::Config::recommended( + signer.clone(), + genesis.namespace.as_bytes(), + SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), flags.port), + our_ip, + network_committee, + genesis.max_message_size_bytes as usize, + ); p2p_cfg.mailbox_size = MAILBOX_SIZE; // Start p2p @@ -524,7 +559,9 @@ pub fn run_node_with_runtime( authenticated::discovery::Network::new(context.with_label("network"), p2p_cfg); // Provide authorized peers - oracle.register(0, peers.clone()).await; + oracle + .register(initial_state.latest_height, peers.clone()) + .await; let oracle = DiscoveryOracle::new(oracle); @@ -602,14 +639,10 @@ pub fn run_node_with_runtime( } fn get_initial_state( - genesis: &Genesis, - committee: &Vec<(PublicKey, SocketAddr, Address)>, + genesis_hash: [u8; 32], + genesis_committee: &Vec<(PublicKey, SocketAddr, Address)>, checkpoint: Option, ) -> ConsensusState { - let genesis_hash: [u8; 32] = from_hex_formatted(&genesis.eth_genesis_hash) - .map(|hash_bytes| hash_bytes.try_into()) - .expect("bad eth_genesis_hash") - .expect("bad eth_genesis_hash"); let genesis_hash: B256 = genesis_hash.into(); checkpoint.unwrap_or_else(|| { let forkchoice = ForkchoiceState { @@ -619,7 +652,7 @@ fn get_initial_state( }; let mut state = ConsensusState::new(forkchoice); // Add the genesis nodes to the consensus state with the minimum stake balance. - for (pubkey, _, address) in committee { + for (pubkey, _, address) in genesis_committee { let pubkey_bytes: [u8; 32] = pubkey .as_ref() .try_into() diff --git a/node/src/bin/stake_and_checkpoint.rs b/node/src/bin/stake_and_checkpoint.rs index 266cfe06..3e951e49 100644 --- a/node/src/bin/stake_and_checkpoint.rs +++ b/node/src/bin/stake_and_checkpoint.rs @@ -810,6 +810,7 @@ fn get_node_flags(node: usize) -> RunFlags { engine_ipc_path: format!("/tmp/reth_engine_api{node}.ipc"), #[cfg(any(feature = "base-bench", feature = "bench"))] bench_block_dir: None, + checkpoint_path: None, ip: None, } } diff --git a/node/src/bin/testnet.rs b/node/src/bin/testnet.rs index 9c6501d4..8351f6b7 100644 --- a/node/src/bin/testnet.rs +++ b/node/src/bin/testnet.rs @@ -205,6 +205,7 @@ fn get_node_flags(node: usize) -> RunFlags { engine_ipc_path: format!("/tmp/reth_engine_api{node}.ipc"), #[cfg(any(feature = "base-bench", feature = "bench"))] bench_block_dir: None, + checkpoint_path: None, ip: None, } } diff --git a/node/src/bin/withdraw_and_exit.rs b/node/src/bin/withdraw_and_exit.rs index c1bd21d5..e64e1896 100644 --- a/node/src/bin/withdraw_and_exit.rs +++ b/node/src/bin/withdraw_and_exit.rs @@ -394,6 +394,7 @@ fn get_node_flags(node: usize) -> RunFlags { engine_ipc_path: format!("/tmp/reth_engine_api{node}.ipc"), #[cfg(any(feature = "base-bench", feature = "bench"))] bench_block_dir: None, + checkpoint_path: None, ip: None, } } diff --git a/types/src/consensus_state_query.rs b/types/src/consensus_state_query.rs index fc5a5edf..669b0d94 100644 --- a/types/src/consensus_state_query.rs +++ b/types/src/consensus_state_query.rs @@ -3,6 +3,7 @@ use crate::checkpoint::Checkpoint; use futures::SinkExt; use futures::channel::{mpsc, oneshot}; +#[allow(clippy::large_enum_variant)] pub enum ConsensusStateRequest { GetCheckpoint, GetLatestHeight, diff --git a/types/src/registry.rs b/types/src/registry.rs index 093eeae8..22d1c9d4 100644 --- a/types/src/registry.rs +++ b/types/src/registry.rs @@ -38,7 +38,7 @@ impl Registry { registry } - pub fn update_registry(&self, index: View, add: Vec, remove: Vec) { + pub fn update_registry(&self, index: View, add: &[PublicKey], remove: &[PublicKey]) { let mut views = self.views.write().unwrap(); let mut participants = if let Some((latest_view, view_data)) = views.last_key_value() { @@ -55,9 +55,10 @@ impl Registry { continue; } participants.participants.push(participant.clone()); - participants - .participants_map - .insert(participant, (participants.participants.len() as u32) - 1); + participants.participants_map.insert( + participant.clone(), + (participants.participants.len() as u32) - 1, + ); } for participant in remove { @@ -211,7 +212,9 @@ mod tests { let new_participant = crate::PrivateKey::from_seed(99).public_key(); // Add participant to view 1 - registry.update_registry(1, vec![new_participant.clone()], vec![]); + let add = vec![new_participant.clone()]; + let remove = vec![]; + registry.update_registry(1, &add, &remove); // Verify participant was added let view_1_participants = registry.participants(1); @@ -230,7 +233,9 @@ mod tests { let existing_participant = registry.participants(0).unwrap()[0].clone(); // Try to add existing participant - should log warning but not fail - registry.update_registry(1, vec![existing_participant.clone()], vec![]); + let add = vec![existing_participant.clone()]; + let remove = vec![]; + registry.update_registry(1, &add, &remove); // Verify participant count didn't increase (duplicate was ignored) let view_1_participants = registry.participants(1); @@ -244,7 +249,9 @@ mod tests { let participant_to_remove = registry.participants(0).unwrap()[1].clone(); // Remove participant from view 1 - registry.update_registry(1, vec![], vec![participant_to_remove.clone()]); + let add = vec![]; + let remove = vec![participant_to_remove.clone()]; + registry.update_registry(1, &add, &remove); // Verify participant was removed let view_1_participants = registry.participants(1); @@ -266,7 +273,9 @@ mod tests { let nonexistent_participant = crate::PrivateKey::from_seed(999).public_key(); // Try to remove non-existent participant - should log warning but not fail - registry.update_registry(1, vec![], vec![nonexistent_participant]); + let add = vec![]; + let remove = vec![nonexistent_participant]; + registry.update_registry(1, &add, &remove); // Verify participant count didn't change (remove was ignored) let view_1_participants = registry.participants(1); @@ -308,7 +317,9 @@ mod tests { // Add participant to create view 3 let new_participant = crate::PrivateKey::from_seed(100).public_key(); - registry.update_registry(3, vec![new_participant.clone()], vec![]); + let add = vec![new_participant.clone()]; + let remove = vec![]; + registry.update_registry(3, &add, &remove); // Views 0, 1, 2 should still use original participants (largest view <= requested) assert_eq!(registry.participants(0).unwrap(), original_participants); @@ -374,7 +385,9 @@ mod tests { // Add participant to create view 1 let new_participant = crate::PrivateKey::from_seed(100).public_key(); - registry.update_registry(1, vec![new_participant], vec![]); + let add = vec![new_participant]; + let remove = vec![]; + registry.update_registry(1, &add, &remove); // Peer set ID should now be 1 assert_eq!(registry.peer_set_id(), 1); @@ -390,7 +403,9 @@ mod tests { // Add participant let new_participant = crate::PrivateKey::from_seed(100).public_key(); - registry.update_registry(1, vec![new_participant.clone()], vec![]); + let add = vec![new_participant.clone()]; + let remove = vec![]; + registry.update_registry(1, &add, &remove); // Peers should now reflect the latest view let updated_peers = registry.peers(); @@ -407,8 +422,8 @@ mod tests { let participant_a = crate::PrivateKey::from_seed(100).public_key(); let participant_b = crate::PrivateKey::from_seed(101).public_key(); - registry.update_registry(3, vec![participant_a.clone()], vec![]); - registry.update_registry(7, vec![participant_b.clone()], vec![]); + registry.update_registry(3, &[participant_a.clone()], &[]); + registry.update_registry(7, &[participant_b.clone()], &[]); // Test participants for each view (largest view <= requested) assert_eq!(registry.participants(0).unwrap().len(), 2); // view 0 @@ -432,7 +447,7 @@ mod tests { // Add participant to view 1 let new_participant = crate::PrivateKey::from_seed(100).public_key(); - registry.update_registry(1, vec![new_participant.clone()], vec![]); + registry.update_registry(1, &[new_participant.clone()], &[]); // Original view should remain unchanged assert_eq!(registry.participants(0).unwrap(), &original_participants); @@ -520,8 +535,8 @@ mod tests { let participant_a = crate::PrivateKey::from_seed(100).public_key(); let participant_b = crate::PrivateKey::from_seed(101).public_key(); - registry.update_registry(3, vec![participant_a.clone()], vec![]); - registry.update_registry(7, vec![participant_b.clone()], vec![]); + registry.update_registry(3, &[participant_a.clone()], &[]); + registry.update_registry(7, &[participant_b.clone()], &[]); // Test that we get the largest view <= requested view // Views available: 0 (2 participants), 3 (3 participants), 7 (4 participants) @@ -556,7 +571,7 @@ mod tests { // Add participant at view 2 let new_participant = crate::PrivateKey::from_seed(100).public_key(); - registry.update_registry(2, vec![new_participant.clone()], vec![]); + registry.update_registry(2, &[new_participant.clone()], &[]); // Leader for view 0-1 should use 4-participant set from view 0 let leader_0 = Su::leader(®istry, 0); @@ -591,7 +606,7 @@ mod tests { // Add participant at view 3 let new_participant = crate::PrivateKey::from_seed(100).public_key(); - registry.update_registry(3, vec![new_participant.clone()], vec![]); + registry.update_registry(3, &[new_participant.clone()], &[]); // Original participants should be found in all views assert_eq!( @@ -634,10 +649,10 @@ mod tests { let participant_a = crate::PrivateKey::from_seed(100).public_key(); let participant_b = crate::PrivateKey::from_seed(101).public_key(); - registry.update_registry(5, vec![participant_a], vec![]); + registry.update_registry(5, &[participant_a], &[]); assert_eq!(registry.peer_set_id(), 5); - registry.update_registry(10, vec![participant_b], vec![]); + registry.update_registry(10, &[participant_b], &[]); assert_eq!(registry.peer_set_id(), 10); } @@ -648,7 +663,7 @@ mod tests { let participant_to_remove = original_participants[1].clone(); // Remove participant at view 2 - registry.update_registry(2, vec![], vec![participant_to_remove.clone()]); + registry.update_registry(2, &[], &[participant_to_remove.clone()]); // Views 0-1 should still have original participants assert_eq!(registry.participants(0).unwrap().len(), 3); @@ -696,8 +711,8 @@ mod tests { // Add two participants and remove one in a single operation registry.update_registry( 1, - vec![new_participant_a.clone(), new_participant_b.clone()], - vec![participant_to_remove.clone()], + &[new_participant_a.clone(), new_participant_b.clone()], + &[participant_to_remove.clone()], ); // Verify the result