Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix/5274: add test coverage for microforks #5280

Open
wants to merge 2 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 10 additions & 8 deletions stackslib/src/chainstate/nakamoto/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1805,13 +1805,6 @@ impl NakamotoChainState {
)
});

debug!("Process staging Nakamoto block";
"consensus_hash" => %next_ready_block.header.consensus_hash,
"stacks_block_hash" => %next_ready_block.header.block_hash(),
"stacks_block_id" => %next_ready_block.header.block_id(),
"burn_block_hash" => %next_ready_block_snapshot.burn_header_hash
);

let elected_height = sort_db
.get_consensus_hash_height(&next_ready_block.header.consensus_hash)?
.ok_or_else(|| ChainstateError::NoSuchBlockError)?;
Expand Down Expand Up @@ -1878,6 +1871,14 @@ impl NakamotoChainState {
return Err(ChainstateError::InvalidStacksBlock(msg.into()));
}

debug!("Process staging Nakamoto block";
"consensus_hash" => %next_ready_block.header.consensus_hash,
"stacks_block_hash" => %next_ready_block.header.block_hash(),
"stacks_block_id" => %next_ready_block.header.block_id(),
"burn_block_hash" => %next_ready_block_snapshot.burn_header_hash,
"parent_block_id" => %next_ready_block.header.parent_block_id,
);
Comment on lines +1874 to +1880
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's helpful having this appear earlier (where it was before), because there's a bunch of try macros between the original location and where its moved to. Those try macros will materialize a warn log in an invoking function, but there would be no logs indicating which block the node was considering processing when the error occurred.


// set the sortition handle's pointer to the block's burnchain view.
// this is either:
// (1) set by the tenure change tx if one exists
Expand Down Expand Up @@ -4061,7 +4062,8 @@ impl NakamotoChainState {
warn!("Invalid Nakamoto block: its tenure's block-commit's block ID hash does not match its parent tenure's start block";
"parent_consensus_hash" => %parent_ch,
"parent_tenure_start_block_id" => %parent_tenure_start_header.index_block_hash(),
"block_commit.last_tenure_id" => %tenure_block_commit.last_tenure_id()
"block_commit.last_tenure_id" => %tenure_block_commit.last_tenure_id(),
"parent_tip" => %parent_block_id,
);

return Err(ChainstateError::NoSuchBlockError);
Expand Down
41 changes: 41 additions & 0 deletions stackslib/src/chainstate/nakamoto/staging_blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -332,6 +332,47 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> {
)))
}

/// Get a Nakamoto tenure, starting a the given index block hash.
/// Item 0 in the block list is the tenure-start block.
/// The last item is the block given by the index block hash
#[cfg(any(test, feature = "testing"))]
pub fn load_nakamoto_tenure(
&self,
tip: &StacksBlockId,
) -> Result<Option<Vec<NakamotoBlock>>, ChainstateError> {
let Some((block, ..)) = self.get_nakamoto_block(tip)? else {
return Ok(None);
};
if block.is_wellformed_tenure_start_block().map_err(|_| {
ChainstateError::InvalidStacksBlock("Malformed tenure-start block".into())
})? {
// we're done
return Ok(Some(vec![block]));
}

// this is an intermediate block
let mut tenure = vec![];
let mut cursor = block.header.parent_block_id.clone();
tenure.push(block);
loop {
let Some((block, _)) = self.get_nakamoto_block(&cursor)? else {
return Ok(None);
};

let is_tenure_start = block.is_wellformed_tenure_start_block().map_err(|e| {
ChainstateError::InvalidStacksBlock("Malformed tenure-start block".into())
})?;
cursor = block.header.parent_block_id.clone();
tenure.push(block);

if is_tenure_start {
break;
}
}
tenure.reverse();
Ok(Some(tenure))
}
Comment on lines +335 to +374
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can this be moved to a test module?


/// Get the size of a Nakamoto block, given its index block hash
/// Returns Ok(Some(size)) if the block was present
/// Returns Ok(None) if there was no such block
Expand Down
3 changes: 3 additions & 0 deletions stackslib/src/net/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4183,6 +4183,9 @@ pub mod test {
all_blocks: Vec<NakamotoBlock>,
expected_siblings: usize,
) {
if !self.mine_malleablized_blocks {
return;
}
for block in all_blocks.iter() {
let sighash = block.header.signer_signature_hash();
let siblings = self
Expand Down
198 changes: 194 additions & 4 deletions stackslib/src/net/tests/download/nakamoto.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,10 @@ use crate::clarity::vm::types::StacksAddressExtensions;
use crate::net::api::gettenureinfo::RPCGetTenureInfo;
use crate::net::download::nakamoto::{TenureStartEnd, WantedTenure, *};
use crate::net::inv::nakamoto::NakamotoTenureInv;
use crate::net::test::{dns_thread_start, TestEventObserver};
use crate::net::tests::inv::nakamoto::{make_nakamoto_peer_from_invs, peer_get_nakamoto_invs};
use crate::net::test::{dns_thread_start, to_addr, TestEventObserver};
use crate::net::tests::inv::nakamoto::{
make_nakamoto_peer_from_invs, make_nakamoto_peers_from_invs_ext, peer_get_nakamoto_invs,
};
use crate::net::tests::{NakamotoBootPlan, TestPeer};
use crate::net::{Error as NetError, Hash160, NeighborAddress, SortitionDB};
use crate::stacks_common::types::Address;
Expand Down Expand Up @@ -2161,7 +2163,9 @@ fn test_nakamoto_download_run_2_peers() {
"Booting peer's stacks tip is now {:?}",
&boot_peer.network.stacks_tip
);
if stacks_tip_ch == canonical_stacks_tip_ch {
if stacks_tip_ch == canonical_stacks_tip_ch
&& stacks_tip_bhh == canonical_stacks_tip_bhh
{
break;
}
}
Expand Down Expand Up @@ -2272,7 +2276,193 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() {
"Booting peer's stacks tip is now {:?}",
&boot_peer.network.stacks_tip
);
if stacks_tip_ch == canonical_stacks_tip_ch {
if stacks_tip_ch == canonical_stacks_tip_ch
&& stacks_tip_bhh == canonical_stacks_tip_bhh
{
break;
}
}

term_sx.send(()).unwrap();
});

loop {
if term_rx.try_recv().is_ok() {
break;
}
peer.step_with_ibd(false).unwrap();
}
});

boot_dns_thread_handle.join().unwrap();
}

/// Test the case where one or more blocks from tenure _T_ get orphend by a tenure-start block in
/// tenure _T + 1_. The unconfirmed downloader should be able to handle this case.
#[test]
fn test_nakamoto_microfork_download_run_2_peers() {
let sender_key = StacksPrivateKey::new();
let sender_addr = to_addr(&sender_key);
let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)];

let observer = TestEventObserver::new();
let bitvecs = vec![
// full rc
vec![true, true, true, true, true, true, true, true, true, true],
];

let rc_len = 10u64;

let (mut peer, _) = make_nakamoto_peers_from_invs_ext(
function_name!(),
&observer,
bitvecs.clone(),
|boot_plan| {
boot_plan
.with_pox_constants(rc_len as u32, 5)
.with_extra_peers(0)
.with_initial_balances(initial_balances)
.with_malleablized_blocks(false)
},
);
peer.refresh_burnchain_view();

let nakamoto_start =
NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants);

// create a microfork
let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone();
let naka_tip_bh = peer.network.stacks_tip.block_hash.clone();
let naka_tip = peer.network.stacks_tip.block_id();

let sortdb = peer.sortdb_ref().reopen().unwrap();
let (chainstate, _) = peer.chainstate_ref().reopen().unwrap();

let naka_tip_header = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &naka_tip)
.unwrap()
.unwrap();

// load the full tenure for this tip
let mut naka_tip_tenure = chainstate
.nakamoto_blocks_db()
.load_nakamoto_tenure(&naka_tip)
.unwrap()
.unwrap();

assert!(naka_tip_tenure.len() > 1);

// make a microfork -- orphan naka_tip_tenure.last()
naka_tip_tenure.pop();

debug!("test: mine off of tenure");
debug!(
"test: first {}: {:?}",
&naka_tip_tenure.first().as_ref().unwrap().block_id(),
&naka_tip_tenure.first().as_ref().unwrap()
);
debug!(
"test: last {}: {:?}",
&naka_tip_tenure.last().as_ref().unwrap().block_id(),
&naka_tip_tenure.last().as_ref().unwrap()
);

peer.mine_nakamoto_on(naka_tip_tenure);
let (fork_naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true);
debug!(
"test: produced fork {}: {:?}",
&fork_naka_block.block_id(),
&fork_naka_block
);

peer.refresh_burnchain_view();

peer.mine_nakamoto_on(vec![fork_naka_block.clone()]);
let (fork_naka_block_2, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true);
debug!(
"test: confirmed fork with {}: {:?}",
&fork_naka_block_2.block_id(),
&fork_naka_block_2
);

peer.refresh_burnchain_view();

// get reward cyclce data
let (mut peer, reward_cycle_invs) =
peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]);

// make a neighbor from this peer
let boot_observer = TestEventObserver::new();
let privk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4]);
let mut boot_peer = peer.neighbor_with_observer(privk, Some(&boot_observer));

let (canonical_stacks_tip_ch, canonical_stacks_tip_bhh) =
SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap();

let all_sortitions = peer.sortdb().get_all_snapshots().unwrap();
let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap();
let nakamoto_tip = peer
.sortdb()
.index_handle(&tip.sortition_id)
.get_nakamoto_tip_block_id()
.unwrap()
.unwrap();

assert_eq!(tip.block_height, 53);

// boot up the boot peer's burnchain
for height in 25..tip.block_height {
let ops = peer
.get_burnchain_block_ops_at_height(height + 1)
.unwrap_or(vec![]);
let sn = {
let ih = peer.sortdb().index_handle(&tip.sortition_id);
let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap();
sn
};
test_debug!(
"boot_peer tip height={} hash={}",
sn.block_height,
&sn.burn_header_hash
);
test_debug!("ops = {:?}", &ops);
let block_header = TestPeer::make_next_burnchain_block(
&boot_peer.config.burnchain,
sn.block_height,
&sn.burn_header_hash,
ops.len() as u64,
false,
);
TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone());
}

let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100);

// start running that peer so we can boot off of it
let (term_sx, term_rx) = sync_channel(1);
thread::scope(|s| {
s.spawn(move || {
let (mut last_stacks_tip_ch, mut last_stacks_tip_bhh) =
SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn())
.unwrap();
loop {
boot_peer
.run_with_ibd(true, Some(&mut boot_dns_client))
.unwrap();

let (stacks_tip_ch, stacks_tip_bhh) =
SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn())
.unwrap();

last_stacks_tip_ch = stacks_tip_ch;
last_stacks_tip_bhh = stacks_tip_bhh;

debug!(
"Booting peer's stacks tip is now {:?}",
&boot_peer.network.stacks_tip
);
if stacks_tip_ch == canonical_stacks_tip_ch
&& stacks_tip_bhh == canonical_stacks_tip_bhh
{
break;
}
}
Expand Down
Loading