diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 3f60752d1d..aacc367949 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -109,6 +109,8 @@ pub struct NakamotoDownloadStateMachine { tenure_block_ids: HashMap, /// Who can serve a given tenure pub(crate) available_tenures: HashMap>, + /// What is the highest available tenure, if known? + pub(crate) highest_available_tenure: Option, /// Confirmed tenure download schedule pub(crate) tenure_download_schedule: VecDeque, /// Unconfirmed tenure download schedule @@ -140,6 +142,7 @@ impl NakamotoDownloadStateMachine { state: NakamotoDownloadState::Confirmed, tenure_block_ids: HashMap::new(), available_tenures: HashMap::new(), + highest_available_tenure: None, tenure_download_schedule: VecDeque::new(), unconfirmed_tenure_download_schedule: VecDeque::new(), tenure_downloads: NakamotoTenureDownloaderSet::new(), @@ -862,6 +865,14 @@ impl NakamotoDownloadStateMachine { self.tenure_download_schedule = schedule; self.tenure_block_ids = tenure_block_ids; self.available_tenures = available; + + let highest_available_tenure = self.find_highest_available_tenure(); + self.highest_available_tenure = highest_available_tenure; + + test_debug!( + "new highest_available_tenure: {:?}", + &self.highest_available_tenure + ); } /// Update our tenure download state machines, given our download schedule, our peers' tenure @@ -958,14 +969,14 @@ impl NakamotoDownloadStateMachine { return false; } - let (unconfirmed_tenure_opt, confirmed_tenure_opt) = Self::find_unconfirmed_tenure_ids( + let (confirmed_tenure_opt, unconfirmed_tenure_opt) = Self::find_unconfirmed_tenure_ids( wanted_tenures, prev_wanted_tenures, available_tenures, ); debug!( "Check unconfirmed tenures: highest two available tenures are {:?}, {:?}", - &unconfirmed_tenure_opt, &confirmed_tenure_opt + &confirmed_tenure_opt, &unconfirmed_tenure_opt ); // see if we need any tenures still @@ -980,11 +991,11 @@ impl NakamotoDownloadStateMachine { }); if !is_available_and_processed { - let is_unconfirmed = unconfirmed_tenure_opt + let is_unconfirmed = confirmed_tenure_opt .as_ref() .map(|ch| *ch == wt.tenure_id_consensus_hash) .unwrap_or(false) - || confirmed_tenure_opt + || unconfirmed_tenure_opt .as_ref() .map(|ch| *ch == wt.tenure_id_consensus_hash) .unwrap_or(false); @@ -1549,6 +1560,24 @@ impl NakamotoDownloadStateMachine { } } + /// Find the highest available tenure ID. + /// Returns Some(consensus_hash) for the highest tenure available from at least one node. + /// Returns None if no tenures are available from any peer. + fn find_highest_available_tenure(&self) -> Option { + let (t1, t2) = Self::find_unconfirmed_tenure_ids( + &self.wanted_tenures, + self.prev_wanted_tenures.as_ref().unwrap_or(&vec![]), + &self.available_tenures, + ); + if let Some(ch) = t2 { + return Some(ch); + } else if let Some(ch) = t1 { + return Some(ch); + } else { + return None; + } + } + /// Go and get tenures. Returns list of blocks per tenure, identified by consensus hash. /// The blocks will be sorted by height, but may not be contiguous. pub fn run( diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index d73342164e..2f291bfb30 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -113,9 +113,11 @@ pub struct NakamotoTenureDownloaderSet { /// The set of tenures that have been successfully downloaded (but possibly not yet stored or /// processed) pub(crate) completed_tenures: HashSet, - /// Number of times a tenure download was attempted + /// Number of times a tenure download was attempted. This counter is incremented before the + /// downloader starts pub(crate) attempted_tenures: HashMap, - /// Number of times a tenure download failed + /// Number of times a tenure download failed. This counter is incremented after the downloader + /// finishes in an error state. pub(crate) attempt_failed_tenures: HashMap, /// Peers that should be deprioritized because they're dead (maps to when they can be used /// again) @@ -451,7 +453,13 @@ impl NakamotoTenureDownloaderSet { continue; }; if tenure_info.processed { - // we already have this tenure + // we already have tried to download this tenure, + // but do remove it from `self.completed_tenures` in order to (1) avoid a memory + // leak, and (2) account for the chance that the end-block has changed due to a + // Bitcoin reorg. This way, a subsequent call with the same tenure in `schedule` + // will succeed in starting a downloader. Since `schedule` is derived from on-disk + // state, the only way a "completed" tenure will show up in `schedule` again is if + // it is later determined that the tenure we stored is incomplete or not canonical. debug!("Already have processed tenure {ch}"); self.completed_tenures .remove(&CompletedTenure::from(tenure_info)); diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 695574769c..96b747e81f 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1469,6 +1469,8 @@ pub const DENY_MIN_BAN_DURATION: u64 = 2; pub struct NetworkResult { /// Stacks chain tip when we began this pass pub stacks_tip: StacksBlockId, + /// Stacks chain tip's tenure ID when we began this pass + pub stacks_tip_tenure_id: ConsensusHash, /// PoX ID as it was when we begin downloading blocks (set if we have downloaded new blocks) pub download_pox_id: Option, /// Network messages we received but did not handle @@ -1519,15 +1521,22 @@ pub struct NetworkResult { pub coinbase_height: u64, /// The observed stacks tip height (different in Nakamoto from coinbase height) pub stacks_tip_height: u64, - /// The consensus hash of the stacks tip (prefixed `rc_` for historical reasons) + /// The consensus hash of the highest complete Stacks tenure at the time the canonical + /// sortition tip was processed. Not guaranteed to be the same across all nodes for the same + /// given sortition tip. + /// + /// TODO: remove this and use canonical Stacks tenure ID instead. pub rc_consensus_hash: ConsensusHash, /// The current StackerDB configs pub stacker_db_configs: HashMap, + /// Highest available tenure, if known + pub highest_available_tenure: Option, } impl NetworkResult { pub fn new( stacks_tip: StacksBlockId, + stacks_tip_tenure_id: ConsensusHash, num_state_machine_passes: u64, num_inv_sync_passes: u64, num_download_passes: u64, @@ -1537,9 +1546,11 @@ impl NetworkResult { stacks_tip_height: u64, rc_consensus_hash: ConsensusHash, stacker_db_configs: HashMap, + highest_available_tenure: Option, ) -> NetworkResult { NetworkResult { stacks_tip, + stacks_tip_tenure_id, unhandled_messages: HashMap::new(), download_pox_id: None, blocks: vec![], @@ -1567,6 +1578,7 @@ impl NetworkResult { stacks_tip_height, rc_consensus_hash, stacker_db_configs, + highest_available_tenure, } } diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index fd8561326a..7288615412 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -5187,7 +5187,7 @@ impl PeerNetwork { poll_timeout: u64, handler_args: &RPCHandlerArgs, ) -> Result { - debug!(">>>>>>>>>>>>>>>>>>>>>>> Begin Network Dispatch (poll for {}) >>>>>>>>>>>>>>>>>>>>>>>>>>>>", poll_timeout); + debug!(">>>>>>>>>>>>>>>>>>>>>>> Begin Network Dispatch (poll for {}, ibd={}) >>>>>>>>>>>>>>>>>>>>>>>>>>>>", poll_timeout, ibd); let mut poll_states = match self.network { None => { debug!("{:?}: network not connected", &self.local_peer); @@ -5226,6 +5226,7 @@ impl PeerNetwork { ); let mut network_result = NetworkResult::new( self.stacks_tip.block_id(), + self.stacks_tip.consensus_hash.clone(), self.num_state_machine_passes, self.num_inv_sync_passes, self.num_downloader_passes, @@ -5235,6 +5236,10 @@ impl PeerNetwork { self.stacks_tip.height, self.chain_view.rc_consensus_hash.clone(), self.get_stacker_db_configs_owned(), + self.block_downloader_nakamoto + .as_ref() + .map(|dler| dler.highest_available_tenure.clone()) + .flatten(), ); network_result.consume_unsolicited(unsolicited_buffered_messages); diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index c4684acf14..8f8f85722c 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -1155,6 +1155,7 @@ fn test_boot_nakamoto_peer() { fn test_network_result_update() { let mut network_result_1 = NetworkResult::new( StacksBlockId([0x11; 32]), + ConsensusHash([0x01; 20]), 1, 1, 1, @@ -1164,10 +1165,12 @@ fn test_network_result_update() { 1, ConsensusHash([0x11; 20]), HashMap::new(), + None, ); let mut network_result_2 = NetworkResult::new( StacksBlockId([0x22; 32]), + ConsensusHash([0x01; 20]), 2, 2, 2, @@ -1177,6 +1180,7 @@ fn test_network_result_update() { 2, ConsensusHash([0x22; 20]), HashMap::new(), + None, ); let nk1 = NeighborKey { @@ -1589,6 +1593,7 @@ fn test_network_result_update() { // stackerdb uploaded chunks get consolidated correctly let mut old = NetworkResult::new( StacksBlockId([0xaa; 32]), + ConsensusHash([0x01; 20]), 10, 10, 10, @@ -1598,6 +1603,7 @@ fn test_network_result_update() { 10, ConsensusHash([0xaa; 20]), HashMap::new(), + None, ); let mut new = old.clone(); @@ -1648,6 +1654,7 @@ fn test_network_result_update() { // stackerdb pushed chunks get consolidated correctly let mut old = NetworkResult::new( StacksBlockId([0xaa; 32]), + ConsensusHash([0x01; 20]), 10, 10, 10, @@ -1657,6 +1664,7 @@ fn test_network_result_update() { 10, ConsensusHash([0xaa; 20]), HashMap::new(), + None, ); let mut new = old.clone(); @@ -1707,6 +1715,7 @@ fn test_network_result_update() { // nakamoto blocks obtained via download, upload, or pushed get consoldated let mut old = NetworkResult::new( StacksBlockId([0xbb; 32]), + ConsensusHash([0x01; 20]), 11, 11, 11, @@ -1716,6 +1725,7 @@ fn test_network_result_update() { 11, ConsensusHash([0xbb; 20]), HashMap::new(), + None, ); old.nakamoto_blocks.insert(nblk1.block_id(), nblk1.clone()); old.pushed_nakamoto_blocks.insert( @@ -1731,6 +1741,7 @@ fn test_network_result_update() { let new = NetworkResult::new( StacksBlockId([0xbb; 32]), + ConsensusHash([0x01; 20]), 11, 11, 11, @@ -1740,6 +1751,7 @@ fn test_network_result_update() { 11, ConsensusHash([0xbb; 20]), HashMap::new(), + None, ); let mut new_pushed = new.clone(); diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index 2729d648eb..d66f0b578f 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -3076,6 +3076,7 @@ fn process_new_blocks_rejects_problematic_asts() { let mut network_result = NetworkResult::new( peer.network.stacks_tip.block_id(), + ConsensusHash([0x01; 20]), 0, 0, 0, @@ -3085,6 +3086,7 @@ fn process_new_blocks_rejects_problematic_asts() { 0, ConsensusHash([0x01; 20]), HashMap::new(), + None, ); network_result.consume_unsolicited(unsolicited); diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index d76c16641c..4719245f63 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -2179,6 +2179,7 @@ impl BurnchainController for BitcoinRegtestController { } } + /// NOTE: this is 1-indexed. If there are 10 headers, then this returns 11 fn get_headers_height(&self) -> u64 { let (_, network_id) = self.config.burnchain.get_bitcoin_network(); let spv_client = SpvClient::new( diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index ca96a1f81c..debcffa649 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -129,12 +129,17 @@ impl Globals { } } - /// Does the inventory sync watcher think we still need to - /// catch up to the chain tip? + /// Are we still in the initial block download period? As in, are there more sortitions to + /// process and/or more tenure-start blocks to process? pub fn in_initial_block_download(&self) -> bool { self.sync_comms.get_ibd() } + /// Flag whether or not the node is in IBD + pub fn set_initial_block_download(&mut self, ibd: bool) { + self.sync_comms.set_ibd(ibd); + } + /// Get the last sortition processed by the relayer thread pub fn get_last_sortition(&self) -> Option { self.last_sortition diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index c49e0bbc73..a7b41705dd 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -313,14 +313,12 @@ impl StacksNode { /// Process a state coming from the burnchain, by extracting the validated KeyRegisterOp /// and inspecting if a sortition was won. - /// `ibd`: boolean indicating whether or not we are in the initial block download /// Called from the main thread. pub fn process_burnchain_state( &mut self, config: &Config, sortdb: &SortitionDB, sort_id: &SortitionId, - ibd: bool, ) -> Result<(), Error> { let ic = sortdb.index_conn(); @@ -374,7 +372,6 @@ impl StacksNode { "burn_height" => block_height, "leader_keys_count" => num_key_registers, "block_commits_count" => num_block_commits, - "in_initial_block_download?" => ibd, ); self.globals.set_last_sortition(block_snapshot.clone()); diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 16b33ead7a..11c365e84d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -64,11 +64,12 @@ use crate::run_loop::RegisteredKey; /// Test flag to stall the miner thread pub static TEST_MINE_STALL: LazyLock> = LazyLock::new(TestFlag::default); #[cfg(test)] +/// Test flag to stall the miner from announcing a block +pub static TEST_BLOCK_ANNOUNCE_STALL: LazyLock> = LazyLock::new(TestFlag::default); +#[cfg(test)] /// Test flag to stall block proposal broadcasting pub static TEST_BROADCAST_STALL: LazyLock> = LazyLock::new(TestFlag::default); #[cfg(test)] -pub static TEST_BLOCK_ANNOUNCE_STALL: LazyLock> = LazyLock::new(TestFlag::default); -#[cfg(test)] pub static TEST_SKIP_P2P_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); /// If the miner was interrupted while mining a block, how long should the diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 3c4e6a98f4..9f6ef7ebbc 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -235,7 +235,7 @@ impl PeerThread { fee_estimator: Option<&Box>, ) -> bool { // initial block download? - let ibd = self.globals.sync_comms.get_ibd(); + let ibd = self.globals.in_initial_block_download(); let download_backpressure = self .results_with_data .as_ref() diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 2cbc37acff..d4507ebeac 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -400,6 +400,67 @@ impl RelayerThread { || !self.config.miner.wait_for_block_download } + /// Compute and set the global IBD flag from a NetworkResult + pub fn infer_ibd(&mut self, net_result: &NetworkResult) { + let cur_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) + .expect("FATAL: failed to query sortition DB"); + + // (remember, get_headers_height() is 1-indexed, so account for that with -1) + let headers_height = self + .bitcoin_controller + .get_headers_height() + .saturating_sub(1); + + // are we still processing sortitions? + let burnchain_ibd = cur_sn.block_height != headers_height; + + // if the highest available tenure is known, then is it the same as the ongoing stacks + // tenure? If so, then we're not IBD. If not, then we're IBD. + // If it is not known, then we're not in IBD. + let stacks_ibd = + if let Some(highest_available_tenure) = net_result.highest_available_tenure.as_ref() { + if *highest_available_tenure != net_result.stacks_tip_tenure_id { + // in IBD if the highest available tenure comes after the stacks tip (not always a + // given, because neighbors may not report all the data we have). + let highest_available_tenure_sn = SortitionDB::get_block_snapshot_consensus( + self.sortdb.conn(), + highest_available_tenure, + ) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: highest available tenure not in sortition DB"); + + let stacks_tip_tenure_sn = SortitionDB::get_block_snapshot_consensus( + self.sortdb.conn(), + &net_result.stacks_tip_tenure_id, + ) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: highest available tenure not in sortition DB"); + + highest_available_tenure_sn.block_height > stacks_tip_tenure_sn.block_height + } else { + // they're the same, so not in IBD + false + } + } else { + // we don't know the highest available tenure, so assume that we have it (and thus are + // not in IBD) + false + }; + + debug!("Relayer: set initial block download inference ({})", burnchain_ibd || stacks_ibd; + "burnchain_ibd" => %burnchain_ibd, + "stacks_ibd" => %stacks_ibd, + "highest_available_tenure" => ?net_result.highest_available_tenure, + "stacks_tip_tenure_id" => %net_result.stacks_tip_tenure_id, + "cur_sn.block_height" => cur_sn.block_height, + "burnchain_headers_height" => headers_height); + + // we're in IBD if we're either still processing sortitions, or the highest available + // tenure is different from the highest processed tenure. + self.globals + .set_initial_block_download(burnchain_ibd || stacks_ibd); + } + /// Handle a NetworkResult from the p2p/http state machine. Usually this is the act of /// * preprocessing and storing new blocks and microblocks /// * relaying blocks, microblocks, and transacctions @@ -409,6 +470,7 @@ impl RelayerThread { "Relayer: Handle network result (from {})", net_result.burn_height ); + self.infer_ibd(&net_result); if self.last_network_block_height != net_result.burn_height { // burnchain advanced; disable mining until we also do a download pass. @@ -427,7 +489,7 @@ impl RelayerThread { &mut self.sortdb, &mut self.chainstate, &mut self.mempool, - self.globals.sync_comms.get_ibd(), + self.globals.in_initial_block_download(), Some(&self.globals.coord_comms), Some(&self.event_dispatcher), ) diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 335fb325d8..3a9135caee 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -447,7 +447,7 @@ impl RunLoop { let (relay_send, relay_recv) = sync_channel(RELAYER_MAX_BUFFER); // set up globals so other subsystems can instantiate off of the runloop state. - let globals = Globals::new( + let mut globals = Globals::new( coordinator_senders, self.get_miner_status(), relay_send, @@ -523,7 +523,15 @@ impl RunLoop { burnchain.get_headers_height() - 1, ); - debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}"); + let sortition_height = + SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn()) + .map(|snapshot| snapshot.block_height) + .unwrap_or(0); + + let initial_ibd = sortition_height < burnchain.get_headers_height() - 1; + + debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}. IBD={initial_ibd}"); + globals.set_initial_block_download(initial_ibd); let mut last_tenure_sortition_height = 0; let mut poll_deadline = 0; @@ -545,18 +553,7 @@ impl RunLoop { } let remote_chain_height = burnchain.get_headers_height() - 1; - - // wait for the p2p state-machine to do at least one pass - debug!("Runloop: Wait until Stacks block downloads reach a quiescent state before processing more burnchain blocks"; "remote_chain_height" => remote_chain_height, "local_chain_height" => burnchain_height); - - // TODO: for now, we just set initial block download false. - // I think that the sync watchdog probably needs to change a fair bit - // for nakamoto. There may be some opportunity to refactor this runloop - // as well (e.g., the `mine_start` should be integrated with the - // watchdog so that there's just one source of truth about ibd), - // but I think all of this can be saved for post-neon work. - let ibd = false; - self.pox_watchdog_comms.set_ibd(ibd); + let ibd = globals.in_initial_block_download(); // calculate burnchain sync percentage let percent: f64 = if remote_chain_height > 0 { @@ -648,7 +645,6 @@ impl RunLoop { self.config(), burnchain.sortdb_mut(), sortition_id, - ibd, ) { // relayer errored, exit. error!("Runloop: Block relayer and miner errored, exiting."; "err" => ?e); @@ -693,7 +689,7 @@ impl RunLoop { remote_chain_height, ); - debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height})"); + debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height}, ibd={ibd})"); target_burnchain_block_height = next_target_burnchain_block_height; if sortition_db_height >= burnchain_height && !ibd { @@ -725,6 +721,11 @@ impl RunLoop { globals.raise_initiative("runloop-synced".to_string()); } } + } else { + info!("Runloop: still synchronizing"; + "sortition_db_height" => sortition_db_height, + "burnchain_height" => burnchain_height, + "ibd" => ibd); } } } diff --git a/testnet/stacks-node/src/syncctl.rs b/testnet/stacks-node/src/syncctl.rs index 488234d21d..90f463f232 100644 --- a/testnet/stacks-node/src/syncctl.rs +++ b/testnet/stacks-node/src/syncctl.rs @@ -151,9 +151,18 @@ impl PoxSyncWatchdog { ibd } - /// Wait until the next PoX anchor block arrives. - /// We know for a fact that they all exist for Epochs 2.5 and earlier, in both mainnet and - /// testnet. + /// This code path is only used for Epoch 2.5 and earlier. + /// + /// Wait to poll the burnchain for its height, and compute the maximum height up to which we + /// should process sortitions. + /// + /// This code used to be much more elaborate, and would use a set of heuristics to determine + /// whether or not there could be an outstanding PoX anchor block to try waiting for before + /// attempting to process sortitions without it. However, we now know for a fact that in epoch + /// 2.5 and earlier, in both mainnet and testnet, there are no missing anchor blocks, so this + /// code instead just sleeps for `[burnchain].poll_time_secs` and computes the burn block height of + /// the start of the first reward cycle for which we don't yet have an anchor block. + /// /// Return (still-in-ibd?, maximum-burnchain-sync-height) on success. pub fn pox_sync_wait( &mut self, @@ -186,7 +195,6 @@ impl PoxSyncWatchdog { .max(burnchain_height) }; - self.relayer_comms.set_ibd(ibbd); if !self.unconditionally_download { self.relayer_comms .interruptable_sleep(self.steady_state_burnchain_sync_interval)?; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3070b6610c..df2ad9ceb6 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3818,6 +3818,7 @@ fn follower_bootup_simple() { thread::sleep(Duration::from_millis(100)); continue; }; + assert!(info.is_fully_synced, "{:?}", &info); let Ok(follower_info) = get_chain_info_result(&follower_conf) else { debug!("follower_bootup: Could not get follower chain info"); @@ -3827,6 +3828,7 @@ fn follower_bootup_simple() { if follower_info.burn_block_height < info.burn_block_height { debug!("follower_bootup: Follower is behind miner's burnchain view"); + assert!(!follower_info.is_fully_synced, "{:?}", &follower_info); thread::sleep(Duration::from_millis(100)); continue; } @@ -3860,6 +3862,7 @@ fn follower_bootup_simple() { thread::sleep(Duration::from_millis(100)); continue; }; + assert!(info.is_fully_synced, "{:?}", &info); let Ok(follower_info) = get_chain_info_result(&follower_conf) else { debug!("follower_bootup: Could not get follower chain info"); @@ -3871,11 +3874,13 @@ fn follower_bootup_simple() { "follower_bootup: Follower has advanced to miner's tip {}", &info.stacks_tip ); + assert!(follower_info.is_fully_synced, "{:?}", &follower_info); } else { debug!( "follower_bootup: Follower has NOT advanced to miner's tip: {} != {}", &info.stacks_tip, follower_info.stacks_tip ); + assert!(!follower_info.is_fully_synced, "{:?}", &follower_info); } last_tip = info.stacks_tip; @@ -3923,8 +3928,10 @@ fn follower_bootup_simple() { if follower_node_info.stacks_tip_consensus_hash == tip.consensus_hash && follower_node_info.stacks_tip == tip.anchored_header.block_hash() { + assert!(follower_node_info.is_fully_synced); break; } + assert!(!follower_node_info.is_fully_synced); } coord_channel @@ -5261,6 +5268,7 @@ fn forked_tenure_is_ignored() { next_block_and(&mut btc_regtest_controller, 60, || { test_skip_commit_op.set(false); TEST_BLOCK_ANNOUNCE_STALL.set(false); + let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); let blocks_processed = coord_channel diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index bc30a105d5..1b858f28dd 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -954,6 +954,7 @@ fn forked_tenure_testing( |config| { // make the duration long enough that the reorg attempt will definitely be accepted config.first_proposal_burn_block_timing = proposal_limit; + // don't allow signers to post signed blocks (limits the amount of fault injection we // need) TEST_SKIP_BLOCK_BROADCAST.set(true); @@ -1097,14 +1098,14 @@ fn forked_tenure_testing( ); assert_ne!(tip_b, tip_a); + info!("Starting Tenure C."); if !expect_tenure_c { + info!("Process Tenure B"); // allow B to process, so it'll be distinct from C TEST_BLOCK_ANNOUNCE_STALL.set(false); sleep_ms(1000); } - info!("Starting Tenure C."); - // Submit a block commit op for tenure C let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = if expect_tenure_c { @@ -1124,7 +1125,6 @@ fn forked_tenure_testing( || { let commits_count = commits_submitted.load(Ordering::SeqCst); if commits_count > commits_before { - // now allow block B to process if it hasn't already. TEST_BLOCK_ANNOUNCE_STALL.set(false); } let rejected_count = rejected_blocks.load(Ordering::SeqCst); @@ -1174,6 +1174,8 @@ fn forked_tenure_testing( panic!(); }); + coord_channel.lock().unwrap().announce_new_stacks_block(); + // allow blocks B and C to be processed sleep_ms(1000);