diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 6bd63f11a7..20238b7295 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -123,6 +123,13 @@ jobs: - test-name: tests::epoch_24::verify_auto_unlock_behavior # Disable this flaky test. We don't need continue testing Epoch 2 -> 3 transition - test-name: tests::nakamoto_integrations::flash_blocks_on_epoch_3_FLAKY + # These mempool tests take a long time to run, and are meant to be run manually + - test-name: tests::nakamoto_integrations::large_mempool_original_constant_fee + - test-name: tests::nakamoto_integrations::large_mempool_original_random_fee + - test-name: tests::nakamoto_integrations::large_mempool_next_constant_fee + - test-name: tests::nakamoto_integrations::large_mempool_next_random_fee + - test-name: tests::nakamoto_integrations::larger_mempool + - test-name: tests::signer::v0::larger_mempool steps: ## Setup test environment diff --git a/stacks-common/src/types/sqlite.rs b/stacks-common/src/types/sqlite.rs index 183ec61fbc..57010ea118 100644 --- a/stacks-common/src/types/sqlite.rs +++ b/stacks-common/src/types/sqlite.rs @@ -16,7 +16,7 @@ use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; -use super::chainstate::VRFSeed; +use super::chainstate::{StacksAddress, VRFSeed}; use crate::deps_common::bitcoin::util::hash::Sha256dHash; use crate::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, TrieHash, @@ -42,6 +42,13 @@ impl ToSql for Sha256dHash { } } +impl rusqlite::types::ToSql for StacksAddress { + fn to_sql(&self) -> rusqlite::Result { + let addr_str = self.to_string(); + Ok(addr_str.into()) + } +} + // Implement rusqlite traits for a bunch of structs that used to be defined // in the chainstate code impl_byte_array_rusqlite_only!(ConsensusHash); diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs new file mode 100644 index 0000000000..c108a4deb1 --- /dev/null +++ b/stacks-common/src/util/lru_cache.rs @@ -0,0 +1,307 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fmt::Display; + +use hashbrown::HashMap; + +/// Node in the doubly linked list +struct Node { + key: K, + value: V, + dirty: bool, + next: usize, + prev: usize, +} + +impl Display for Node { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}={} ({}) [prev={}, next={}]", + self.key, + self.value, + if self.dirty { "dirty" } else { "clean" }, + self.prev, + self.next + ) + } +} + +/// LRU cache for account nonces +pub struct LruCache { + capacity: usize, + /// Map from address to an offset in the linked list + cache: HashMap, + /// Doubly linked list of values in order of most recently used + order: Vec>, + /// Index of the head of the linked list -- the most recently used element + head: usize, + /// Index of the tail of the linked list -- the least recently used element + tail: usize, +} + +impl Display for LruCache { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!( + f, + "LruCache (capacity={}, head={}, tail={})", + self.capacity, self.head, self.tail + )?; + let mut curr = self.head; + while curr != self.capacity { + writeln!(f, " {}", self.order[curr])?; + curr = self.order[curr].next; + } + Ok(()) + } +} + +impl LruCache { + /// Create a new LRU cache with the given capacity + pub fn new(capacity: usize) -> Self { + LruCache { + capacity, + cache: HashMap::new(), + order: Vec::with_capacity(capacity), + head: capacity, + tail: capacity, + } + } + + /// Get the value for the given key + pub fn get(&mut self, key: &K) -> Option { + if let Some(node) = self.cache.get(key) { + // Move the node to the head of the LRU list + let node = *node; + + if node != self.head { + let prev = self.order[node].prev; + let next = self.order[node].next; + + if node == self.tail { + // If this is the tail, update the tail + self.tail = prev; + } else { + // Else, update the next node's prev pointer + self.order[next].prev = prev; + } + + self.order[prev].next = next; + self.order[node].prev = self.capacity; + self.order[node].next = self.head; + self.order[self.head].prev = node; + self.head = node; + } + + Some(self.order[node].value) + } else { + None + } + } + + /// Insert a key-value pair into the cache, marking it as dirty. + /// Returns `Some((K, V))` if a dirty value was evicted. + pub fn insert(&mut self, key: K, value: V) -> Option<(K, V)> { + self.insert_with_dirty(key, value, true) + } + + /// Insert a key-value pair into the cache, marking it as clean. + /// Returns `Some((K, V))` if a dirty value was evicted. + pub fn insert_clean(&mut self, key: K, value: V) -> Option<(K, V)> { + self.insert_with_dirty(key, value, false) + } + + /// Insert a key-value pair into the cache + /// Returns `Some((K, V))` if a dirty value was evicted. + pub fn insert_with_dirty(&mut self, key: K, value: V, dirty: bool) -> Option<(K, V)> { + let mut evicted = None; + if let Some(node) = self.cache.get(&key) { + // Update the value for the key + let node = *node; + self.order[node].value = value; + self.order[node].dirty = dirty; + + // Just call get to handle updating the LRU list + self.get(&key); + } else { + let index = if self.cache.len() == self.capacity { + // Take the place of the least recently used element. + // First, remove it from the tail of the LRU list + let index = self.tail; + let prev = self.order[index].prev; + self.order[prev].next = self.capacity; + self.tail = prev; + + // Remove it from the cache + self.cache.remove(&self.order[index].key); + + // Replace the key with the new key, saving the old key + let replaced_key = std::mem::replace(&mut self.order[index].key, key.clone()); + + // If it is dirty, save the key-value pair to return + if self.order[index].dirty { + evicted = Some((replaced_key, self.order[index].value)); + } + + // Insert this new value into the cache + self.cache.insert(key, index); + + // Update the node with the new key-value pair, inserting it at + // the head of the LRU list + self.order[index].value = value; + self.order[index].dirty = dirty; + self.order[index].next = self.head; + self.order[index].prev = self.capacity; + + index + } else { + // Insert a new key-value pair + let node = Node { + key: key.clone(), + value, + dirty, + next: self.head, + prev: self.capacity, + }; + + let index = self.order.len(); + self.order.push(node); + self.cache.insert(key, index); + + index + }; + + // Put it at the head of the LRU list + if self.head != self.capacity { + self.order[self.head].prev = index; + } else { + self.tail = index; + } + + self.head = index; + } + evicted + } + + pub fn flush(&mut self, mut f: impl FnMut(&K, V) -> Result<(), E>) -> Result<(), E> { + let mut index = self.head; + while index != self.capacity { + let next = self.order[index].next; + if self.order[index].dirty { + let value = self.order[index].value; + f(&self.order[index].key, value)?; + self.order[index].dirty = false; + } + index = next; + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_lru_cache() { + let mut cache = LruCache::new(2); + + cache.insert(1, 1); + cache.insert(2, 2); + assert_eq!(cache.get(&1), Some(1)); + cache.insert(3, 3); + assert_eq!(cache.get(&2), None); + cache.insert(4, 4); + assert_eq!(cache.get(&1), None); + assert_eq!(cache.get(&3), Some(3)); + assert_eq!(cache.get(&4), Some(4)); + } + + #[test] + fn test_lru_cache_update() { + let mut cache = LruCache::new(2); + + cache.insert(1, 1); + cache.insert(2, 2); + cache.insert(1, 10); + assert_eq!(cache.get(&1), Some(10)); + cache.insert(3, 3); + assert_eq!(cache.get(&2), None); + cache.insert(2, 4); + assert_eq!(cache.get(&2), Some(4)); + assert_eq!(cache.get(&3), Some(3)); + } + + #[test] + fn test_lru_cache_evicted() { + let mut cache = LruCache::new(2); + + assert!(cache.insert(1, 1).is_none()); + assert!(cache.insert(2, 2).is_none()); + let evicted = cache.insert(3, 3).expect("expected an eviction"); + assert_eq!(evicted, (1, 1)); + } + + #[test] + fn test_lru_cache_flush() { + let mut cache = LruCache::new(2); + + cache.insert(1, 1); + + let mut flushed = Vec::new(); + cache + .flush(|k, v| { + flushed.push((*k, v)); + Ok::<(), ()>(()) + }) + .unwrap(); + + assert_eq!(flushed, vec![(1, 1)]); + + cache.insert(1, 3); + cache.insert(2, 2); + + let mut flushed = Vec::new(); + cache + .flush(|k, v| { + flushed.push((*k, v)); + Ok::<(), ()>(()) + }) + .unwrap(); + + assert_eq!(flushed, vec![(2, 2), (1, 3)]); + } + + #[test] + fn test_lru_cache_evict_clean() { + let mut cache = LruCache::new(2); + + assert!(cache.insert_with_dirty(0, 0, false).is_none()); + assert!(cache.insert_with_dirty(1, 1, false).is_none()); + assert!(cache.insert_with_dirty(2, 2, true).is_none()); + assert!(cache.insert_with_dirty(3, 3, true).is_none()); + + let mut flushed = Vec::new(); + cache + .flush(|k, v| { + flushed.push((*k, v)); + Ok::<(), ()>(()) + }) + .unwrap(); + + assert_eq!(flushed, [(3, 3), (2, 2)]); + } +} diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index c6ba30f3d2..db80ed51e4 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -22,6 +22,7 @@ pub mod chunked_encoding; #[cfg(feature = "rusqlite")] pub mod db; pub mod hash; +pub mod lru_cache; pub mod pair; pub mod pipe; pub mod retry; diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 88a7ffe849..8fa6762cc0 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1246,7 +1246,7 @@ impl<'a> StacksMicroblockBuilder<'a> { let deadline = get_epoch_time_ms() + u128::from(self.settings.max_miner_time_ms); let mut block_limit_hit = BlockLimitFunction::NO_LIMIT_HIT; - mem_pool.reset_nonce_cache()?; + mem_pool.reset_mempool_caches()?; let stacks_epoch_id = clarity_tx.get_epoch(); let block_limit = clarity_tx .block_limit() @@ -2287,13 +2287,10 @@ impl StacksBlockBuilder { } } - mempool.reset_nonce_cache()?; mempool.estimate_tx_rates(100, &block_limit, &stacks_epoch_id)?; let mut block_limit_hit = BlockLimitFunction::NO_LIMIT_HIT; let mut considered = HashSet::new(); // txids of all transactions we looked at - let mut mined_origin_nonces: HashMap = HashMap::new(); // map addrs of mined transaction origins to the nonces we used - let mut mined_sponsor_nonces: HashMap = HashMap::new(); // map addrs of mined transaction sponsors to the nonces we used let mut invalidated_txs = vec![]; let mut to_drop_and_blacklist = vec![]; @@ -2308,6 +2305,17 @@ impl StacksBlockBuilder { let mut loop_result = Ok(()); while block_limit_hit != BlockLimitFunction::LIMIT_REACHED { let mut num_considered = 0; + + // Check if we've been preempted before we attempt mining. + // This is important because otherwise, we will add unnecessary + // contention on the mempool DB. + blocked = + (*settings.miner_status.lock().expect("FATAL: mutex poisoned")).is_blocked(); + if blocked { + info!("Miner stopping due to preemption"); + break; + } + let intermediate_result = mempool.iterate_candidates( epoch_tx, &mut tx_events, @@ -2317,7 +2325,7 @@ impl StacksBlockBuilder { blocked = (*settings.miner_status.lock().expect("FATAL: mutex poisoned")) .is_blocked(); if blocked { - debug!("Miner stopping due to preemption"); + info!("Miner stopping due to preemption"); return Ok(None); } @@ -2325,16 +2333,20 @@ impl StacksBlockBuilder { let update_estimator = to_consider.update_estimate; if block_limit_hit == BlockLimitFunction::LIMIT_REACHED { + info!("Miner stopping due to limit reached"); return Ok(None); } let time_now = get_epoch_time_ms(); if time_now >= deadline { - debug!("Miner mining time exceeded ({} ms)", max_miner_time_ms); + info!( + "Miner stopping due to mining time exceeded ({} ms)", + max_miner_time_ms + ); return Ok(None); } if let Some(time_estimate) = txinfo.metadata.time_estimate_ms { if time_now.saturating_add(time_estimate.into()) > deadline { - debug!("Mining tx would cause us to exceed our deadline, skipping"; + info!("Mining tx would cause us to exceed our deadline, skipping"; "txid" => %txinfo.tx.txid(), "deadline" => deadline, "now" => time_now, @@ -2360,40 +2372,6 @@ impl StacksBlockBuilder { )); } - if let Some(nonce) = mined_origin_nonces.get(&txinfo.tx.origin_address()) { - if *nonce >= txinfo.tx.get_origin_nonce() { - return Ok(Some( - TransactionResult::skipped( - &txinfo.tx, - format!( - "Bad origin nonce, tx nonce {} versus {}.", - txinfo.tx.get_origin_nonce(), - *nonce - ), - ) - .convert_to_event(), - )); - } - } - if let Some(sponsor_addr) = txinfo.tx.sponsor_address() { - if let Some(nonce) = mined_sponsor_nonces.get(&sponsor_addr) { - if let Some(sponsor_nonce) = txinfo.tx.get_sponsor_nonce() { - if *nonce >= sponsor_nonce { - return Ok(Some( - TransactionResult::skipped( - &txinfo.tx, - format!( - "Bad sponsor nonce, tx nonce {} versus {}.", - sponsor_nonce, *nonce - ), - ) - .convert_to_event(), - )); - } - } - } - } - considered.insert(txinfo.tx.txid()); num_considered += 1; @@ -2445,15 +2423,7 @@ impl StacksBlockBuilder { "error" => ?e); } } - mined_origin_nonces.insert( - txinfo.tx.origin_address(), - txinfo.tx.get_origin_nonce(), - ); - if let (Some(sponsor_addr), Some(sponsor_nonce)) = - (txinfo.tx.sponsor_address(), txinfo.tx.get_sponsor_nonce()) - { - mined_sponsor_nonces.insert(sponsor_addr, sponsor_nonce); - } + if soft_limit_reached { // done mining -- our soft limit execution budget is exceeded. // Make the block from the transactions we did manage to get @@ -2484,9 +2454,7 @@ impl StacksBlockBuilder { } else if block_limit_hit == BlockLimitFunction::CONTRACT_LIMIT_HIT { - debug!( - "Stop mining anchored block due to limit exceeded" - ); + info!("Miner stopping due to limit reached"); block_limit_hit = BlockLimitFunction::LIMIT_REACHED; return Ok(None); } @@ -2652,6 +2620,7 @@ impl StacksBlockBuilder { .block_limit() .expect("Failed to obtain block limit from miner's block connection"); + mempool.reset_mempool_caches()?; let (blocked, tx_events) = match Self::select_and_apply_transactions( &mut epoch_tx, &mut builder, diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 2008d8c4cd..67980fbfd0 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -30,6 +30,7 @@ use clarity::vm::costs::LimitedCostTracker; use clarity::vm::database::ClarityDatabase; use clarity::vm::test_util::TEST_BURN_STATE_DB; use clarity::vm::types::*; +use mempool::MemPoolWalkStrategy; use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; use rusqlite::params; @@ -4852,7 +4853,7 @@ fn mempool_walk_test_users_10_rounds_3_cache_size_2000_null_prob_100() { fn paramaterized_mempool_walk_test( num_users: usize, num_rounds: usize, - nonce_and_candidate_cache_size: u64, + nonce_and_candidate_cache_size: usize, consider_no_estimate_tx_prob: u8, timeout_ms: u128, ) { @@ -5026,3 +5027,246 @@ fn paramaterized_mempool_walk_test( }, ); } + +#[test] +/// Test that the mempool walk query ignores old nonces and prefers next possible nonces before higher global fees. +fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { + let key_address_pairs: Vec<(Secp256k1PrivateKey, StacksAddress)> = (0..7) + .map(|_user_index| { + let privk = StacksPrivateKey::random(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk)], + ) + .unwrap(); + (privk, addr) + }) + .collect(); + let accounts: Vec = key_address_pairs + .iter() + .map(|(_, b)| b.to_string()) + .collect(); + let address_0 = accounts[0].to_string(); + let address_1 = accounts[1].to_string(); + let address_2 = accounts[2].to_string(); + let address_3 = accounts[3].to_string(); + let address_4 = accounts[4].to_string(); + let address_5 = accounts[5].to_string(); + let address_6 = accounts[6].to_string(); + + let test_name = function_name!(); + let mut peer_config = TestPeerConfig::new(&test_name, 0, 0); + peer_config.initial_balances = vec![]; + for (privk, addr) in &key_address_pairs { + peer_config + .initial_balances + .push((addr.to_account_principal(), 1000000000)); + } + + let recipient = + StacksAddress::from_string("ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV").unwrap(); + + let mut chainstate = + instantiate_chainstate_with_balances(false, 0x80000000, &test_name, vec![]); + let chainstate_path = chainstate_path(&test_name); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + let b_1 = make_block( + &mut chainstate, + ConsensusHash([0x1; 20]), + &( + FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + FIRST_STACKS_BLOCK_HASH.clone(), + ), + 1, + 1, + ); + let b_2 = make_block(&mut chainstate, ConsensusHash([0x2; 20]), &b_1, 2, 2); + + let mut tx_events = Vec::new(); + + // Simulate next possible nonces for **some** addresses. Leave some blank so we can test the case where the nonce cannot be + // found on the db table and has to be pulled from the MARF. + let mempool_tx = mempool.tx_begin().unwrap(); + mempool_tx + .execute( + "INSERT INTO nonces (address, nonce) VALUES (?, ?), (?, ?), (?, ?), (?, ?), (?, ?)", + params![address_0, 2, address_1, 1, address_2, 6, address_4, 1, address_5, 0], + ) + .unwrap(); + mempool_tx.commit().unwrap(); + + // Test transactions with a wide variety of origin/sponsor configurations and fee rate values. Some transactions do not have a + // sponsor, some others do, and some others are sponsored by other sponsors. All will be in flight at the same time. + // + // tuple shape: (origin_address_index, origin_nonce, sponsor_address_index, sponsor_nonce, fee_rate) + let test_vectors = vec![ + (0, 0, 0, 0, 100.0), // Old origin nonce - ignored + (0, 1, 0, 1, 200.0), // Old origin nonce - ignored + (0, 2, 0, 2, 300.0), + (0, 3, 0, 3, 400.0), + (0, 4, 3, 0, 500.0), // Nonce 0 for address 3 is not in the table but will be valid on MARF + (1, 0, 1, 0, 400.0), // Old origin nonce - ignored + (1, 1, 3, 1, 600.0), + (1, 2, 3, 2, 700.0), + (1, 3, 3, 3, 800.0), + (1, 4, 1, 4, 1200.0), + (2, 3, 2, 3, 9000.0), // Old origin nonce - ignored + (2, 4, 2, 4, 9000.0), // Old origin nonce - ignored + (2, 5, 2, 5, 9000.0), // Old origin nonce - ignored + (2, 6, 4, 0, 900.0), // Old sponsor nonce - ignored + (2, 6, 4, 1, 1000.0), + (2, 7, 4, 2, 800.0), + (2, 8, 2, 8, 1000.0), + (2, 9, 3, 5, 1000.0), + (2, 10, 3, 6, 1500.0), + (3, 4, 3, 4, 100.0), + (4, 3, 5, 2, 550.0), + (5, 0, 5, 0, 500.0), + (5, 1, 5, 1, 500.0), + (5, 3, 4, 4, 2000.0), + (5, 4, 4, 5, 2000.0), + (6, 2, 6, 2, 1000.0), // Address has nonce 0 in MARF - ignored + ]; + for (origin_index, origin_nonce, sponsor_index, sponsor_nonce, fee_rate) in + test_vectors.into_iter() + { + // Create tx, either standard or sponsored + let mut tx = if origin_index != sponsor_index { + let payload = TransactionPayload::TokenTransfer( + recipient.to_account_principal(), + 1, + TokenTransferMemo([0; 34]), + ); + sign_sponsored_singlesig_tx( + payload.into(), + &key_address_pairs[origin_index].0, + &key_address_pairs[sponsor_index].0, + origin_nonce, + sponsor_nonce, + 200, + ) + } else { + make_user_stacks_transfer( + &key_address_pairs[origin_index].0, + origin_nonce, + 200, + &recipient.to_account_principal(), + 1, + ) + }; + + let mut mempool_tx = mempool.tx_begin().unwrap(); + + let origin_address = tx.origin_address(); + let sponsor_address = tx.sponsor_address().unwrap_or(origin_address); + tx.set_tx_fee(fee_rate as u64); + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let tx_fee = tx.get_tx_fee(); + let height = 100; + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &b_1.0, + &b_1.1, + true, + txid, + tx_bytes, + tx_fee, + height, + &origin_address, + origin_nonce, + &sponsor_address, + sponsor_nonce, + None, + ) + .unwrap(); + mempool_tx + .execute( + "UPDATE mempool SET fee_rate = ? WHERE txid = ?", + params![Some(fee_rate), &txid], + ) + .unwrap(); + + mempool_tx.commit().unwrap(); + } + + // Visit transactions using the `NextNonceWithHighestFeeRate` strategy. Keep a record of the order of visits so we can compare + // at the end. + let mut mempool_settings = MemPoolWalkSettings::default(); + mempool_settings.strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + let mut considered_txs = vec![]; + let deadline = get_epoch_time_ms() + 30000; + chainstate.with_read_only_clarity_tx( + &TEST_BURN_STATE_DB, + &StacksBlockHeader::make_index_block_hash(&b_2.0, &b_2.1), + |clarity_conn| { + loop { + if mempool + .iterate_candidates::<_, ChainstateError, _>( + clarity_conn, + &mut tx_events, + mempool_settings.clone(), + |_, available_tx, _| { + considered_txs.push(( + available_tx.tx.metadata.origin_address.to_string(), + available_tx.tx.metadata.origin_nonce, + available_tx.tx.metadata.sponsor_address.to_string(), + available_tx.tx.metadata.sponsor_nonce, + available_tx.tx.metadata.tx_fee, + )); + Ok(Some( + // Generate any success result + TransactionResult::success( + &available_tx.tx.tx, + StacksTransactionReceipt::from_stx_transfer( + available_tx.tx.tx.clone(), + vec![], + Value::okay(Value::Bool(true)).unwrap(), + ExecutionCost::ZERO, + ), + ) + .convert_to_event(), + )) + }, + ) + .unwrap() + .0 + == 0 + { + break; + } + assert!(get_epoch_time_ms() < deadline, "test timed out"); + } + + // Expected transaction consideration order, sorted by mineable first (next origin+sponsor nonces, highest fee). + // Ignores old and very future nonces. + let expected_tx_order = vec![ + (address_2.clone(), 6, address_4.clone(), 1, 1000), // Round 1 + (address_5.clone(), 0, address_5.clone(), 0, 500), + (address_0.clone(), 2, address_0.clone(), 2, 300), + (address_2.clone(), 7, address_4.clone(), 2, 800), // Round 2 + (address_5.clone(), 1, address_5.clone(), 1, 500), + (address_0.clone(), 3, address_0.clone(), 3, 400), + (address_2.clone(), 8, address_2.clone(), 8, 1000), // Round 3 + (address_4.clone(), 3, address_5.clone(), 2, 550), + (address_0.clone(), 4, address_3.clone(), 0, 500), + (address_5.clone(), 3, address_4.clone(), 4, 2000), // Round 4 + (address_1.clone(), 1, address_3.clone(), 1, 600), + (address_5.clone(), 4, address_4.clone(), 5, 2000), // Round 5 + (address_1.clone(), 2, address_3.clone(), 2, 700), + (address_1.clone(), 3, address_3.clone(), 3, 800), // Round 6 + (address_1.clone(), 4, address_1.clone(), 4, 1200), // Round 7 + (address_3.clone(), 4, address_3.clone(), 4, 100), + (address_2.clone(), 9, address_3.clone(), 5, 1000), // Round 8 + (address_2.clone(), 10, address_3.clone(), 6, 1500), // Round 9 + ]; + assert_eq!( + considered_txs, expected_tx_order, + "Mempool should visit transactions in the correct order while ignoring past nonces", + ); + }, + ); +} diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index f550f8b032..81a143ac11 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -1363,6 +1363,37 @@ pub fn sign_standard_singlesig_tx( tx_signer.get_tx().unwrap() } +pub fn sign_sponsored_singlesig_tx( + payload: TransactionPayload, + origin: &StacksPrivateKey, + sponsor: &StacksPrivateKey, + origin_nonce: u64, + sponsor_nonce: u64, + tx_fee: u64, +) -> StacksTransaction { + let mut origin_spending_condition = + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(origin)) + .expect("Failed to create p2pkh spending condition from public key."); + origin_spending_condition.set_nonce(origin_nonce); + origin_spending_condition.set_tx_fee(tx_fee); + let mut sponsored_spending_condition = + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(sponsor)) + .expect("Failed to create p2pkh spending condition from public key."); + sponsored_spending_condition.set_nonce(sponsor_nonce); + sponsored_spending_condition.set_tx_fee(tx_fee); + let auth = TransactionAuth::Sponsored(origin_spending_condition, sponsored_spending_condition); + let mut unsigned_tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); + + unsigned_tx.chain_id = 0x80000000; + unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; + + let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); + tx_signer.sign_origin(origin).unwrap(); + tx_signer.sign_sponsor(sponsor).unwrap(); + + tx_signer.get_tx().unwrap() +} + pub fn get_stacks_account(peer: &mut TestPeer, addr: &PrincipalData) -> StacksAccount { let account = peer .with_db_state(|ref mut sortdb, ref mut chainstate, _, _| { diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 56a90bafef..5476c677d5 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -46,7 +46,7 @@ use crate::chainstate::stacks::index::storage::TrieHashCalculationMode; use crate::chainstate::stacks::miner::{BlockBuilderSettings, MinerStatus}; use crate::chainstate::stacks::MAX_BLOCK_LEN; use crate::config::chain_data::MinerStats; -use crate::core::mempool::{MemPoolWalkSettings, MemPoolWalkTxTypes}; +use crate::core::mempool::{MemPoolWalkSettings, MemPoolWalkStrategy, MemPoolWalkTxTypes}; use crate::core::{ MemPoolDB, StacksEpoch, StacksEpochExtension, StacksEpochId, BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT, BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT, @@ -1093,6 +1093,7 @@ impl Config { BlockBuilderSettings { max_miner_time_ms: miner_config.nakamoto_attempt_time_ms, mempool_settings: MemPoolWalkSettings { + strategy: miner_config.mempool_walk_strategy, max_walk_time_ms: miner_config.nakamoto_attempt_time_ms, consider_no_estimate_tx_prob: miner_config.probability_pick_no_estimate_tx, nonce_cache_size: miner_config.nonce_cache_size, @@ -1136,6 +1137,7 @@ impl Config { // second or later attempt to mine a block -- give it some time miner_config.subsequent_attempt_time_ms }, + strategy: miner_config.mempool_walk_strategy, consider_no_estimate_tx_prob: miner_config.probability_pick_no_estimate_tx, nonce_cache_size: miner_config.nonce_cache_size, candidate_retry_cache_size: miner_config.candidate_retry_cache_size, @@ -2113,6 +2115,8 @@ pub struct MinerConfig { pub microblock_attempt_time_ms: u64, /// Max time to assemble Nakamoto block pub nakamoto_attempt_time_ms: u64, + /// Strategy to follow when picking next mempool transactions to consider. + pub mempool_walk_strategy: MemPoolWalkStrategy, pub probability_pick_no_estimate_tx: u8, pub block_reward_recipient: Option, /// If possible, mine with a p2wpkh address @@ -2120,8 +2124,8 @@ pub struct MinerConfig { /// Wait for a downloader pass before mining. /// This can only be disabled in testing; it can't be changed in the config file. pub wait_for_block_download: bool, - pub nonce_cache_size: u64, - pub candidate_retry_cache_size: u64, + pub nonce_cache_size: usize, + pub candidate_retry_cache_size: usize, pub unprocessed_block_deadline_secs: u64, pub mining_key: Option, /// Amount of time while mining in nakamoto to wait in between mining interim blocks @@ -2202,6 +2206,7 @@ impl Default for MinerConfig { activated_vrf_key_path: None, fast_rampup: false, underperform_stop_threshold: None, + mempool_walk_strategy: MemPoolWalkStrategy::GlobalFeeRate, txs_to_consider: MemPoolWalkTxTypes::all(), filter_origins: HashSet::new(), max_reorg_depth: 3, @@ -2596,11 +2601,12 @@ pub struct MinerConfigFile { pub subsequent_attempt_time_ms: Option, pub microblock_attempt_time_ms: Option, pub nakamoto_attempt_time_ms: Option, + pub mempool_walk_strategy: Option, pub probability_pick_no_estimate_tx: Option, pub block_reward_recipient: Option, pub segwit: Option, - pub nonce_cache_size: Option, - pub candidate_retry_cache_size: Option, + pub nonce_cache_size: Option, + pub candidate_retry_cache_size: Option, pub unprocessed_block_deadline_secs: Option, pub mining_key: Option, pub wait_on_interim_blocks_ms: Option, @@ -2717,6 +2723,9 @@ impl MinerConfigFile { activated_vrf_key_path: self.activated_vrf_key_path.clone(), fast_rampup: self.fast_rampup.unwrap_or(miner_default_config.fast_rampup), underperform_stop_threshold: self.underperform_stop_threshold, + mempool_walk_strategy: self.mempool_walk_strategy + .map(|s| str::parse(&s).unwrap_or_else(|e| panic!("Could not parse '{s}': {e}"))) + .unwrap_or(MemPoolWalkStrategy::GlobalFeeRate), txs_to_consider: { if let Some(txs_to_consider) = &self.txs_to_consider { txs_to_consider diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index d21f46c3c1..06bdcd9eb8 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -15,21 +15,23 @@ // along with this program. If not, see . use std::cmp::{self, Ordering}; -use std::collections::{HashMap, HashSet, VecDeque}; +use std::collections::{HashMap, HashSet, LinkedList, VecDeque}; use std::hash::Hasher; use std::io::{Read, Write}; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::{Duration, Instant, SystemTime}; -use std::{fs, io}; +use std::{fs, io, thread}; use clarity::vm::types::PrincipalData; use rand::distributions::Uniform; use rand::prelude::Distribution; +use rand::Rng; use rusqlite::types::ToSql; use rusqlite::{ - params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Row, Rows, Transaction, + params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Row, Rows, Statement, + Transaction, }; use siphasher::sip::SipHasher; // this is SipHash-2-4 use stacks_common::codec::{ @@ -55,6 +57,7 @@ use crate::chainstate::stacks::{ Error as ChainstateError, StacksBlock, StacksMicroblock, StacksTransaction, TransactionPayload, }; use crate::clarity_vm::clarity::ClarityConnection; +use crate::core::nonce_cache::NonceCache; use crate::core::{ ExecutionCost, StacksEpochId, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, }; @@ -99,6 +102,9 @@ pub const DEFAULT_BLACKLIST_MAX_SIZE: u64 = 134217728; // 2**27 -- the blacklist // loading the bloom filter, even though the bloom filter is larger. const DEFAULT_MAX_TX_TAGS: u32 = 2048; +// maximum number of transactions that can fit in a single block +const MAX_BLOCK_TXS: usize = 11_650; + /// A node-specific transaction tag -- the first 8 bytes of siphash(local-seed,txid) #[derive(Debug, Clone, PartialEq, Hash, Eq)] pub struct TxTag(pub [u8; 8]); @@ -144,8 +150,11 @@ pub enum MemPoolSyncData { TxTags([u8; 32], Vec), } +#[derive(Debug, PartialEq)] pub enum MempoolIterationStopReason { + /// No more candidates in the mempool to consider NoMoreCandidates, + /// The mining deadline has been reached DeadlineReached, /// If the iteration function supplied to mempool iteration exited /// (i.e., the transaction evaluator returned an early exit command) @@ -514,20 +523,46 @@ impl MemPoolWalkTxTypes { } } +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum MemPoolWalkStrategy { + /// Select transactions with the highest global fee rate. + GlobalFeeRate, + /// Select transactions with the next expected nonce for origin and sponsor addresses, + NextNonceWithHighestFeeRate, +} + +impl FromStr for MemPoolWalkStrategy { + type Err = &'static str; + fn from_str(s: &str) -> Result { + match s { + "GlobalFeeRate" => { + return Ok(Self::GlobalFeeRate); + } + "NextNonceWithHighestFeeRate" => { + return Ok(Self::NextNonceWithHighestFeeRate); + } + _ => { + return Err("Unknown mempool walk strategy"); + } + } + } +} + #[derive(Debug, Clone)] pub struct MemPoolWalkSettings { + /// Strategy to use when selecting the next transactions to consider in the `mempool` table. + pub strategy: MemPoolWalkStrategy, /// Maximum amount of time a miner will spend walking through mempool transactions, in /// milliseconds. This is a soft deadline. pub max_walk_time_ms: u64, /// Probability percentage to consider a transaction which has not received a cost estimate. - /// That is, with x%, when picking the next transaction to include a block, select one that - /// either failed to get a cost estimate or has not been estimated yet. + /// Only used when walk strategy is `GlobalFeeRate`. pub consider_no_estimate_tx_prob: u8, /// Size of the nonce cache. This avoids MARF look-ups. - pub nonce_cache_size: u64, + pub nonce_cache_size: usize, /// Size of the candidate cache. These are the candidates that will be retried after each /// transaction is mined. - pub candidate_retry_cache_size: u64, + pub candidate_retry_cache_size: usize, /// Types of transactions we'll consider pub txs_to_consider: HashSet, /// Origins for transactions that we'll consider @@ -540,6 +575,7 @@ pub struct MemPoolWalkSettings { impl Default for MemPoolWalkSettings { fn default() -> Self { MemPoolWalkSettings { + strategy: MemPoolWalkStrategy::GlobalFeeRate, max_walk_time_ms: u64::MAX, consider_no_estimate_tx_prob: 5, nonce_cache_size: 1024 * 1024, @@ -553,6 +589,7 @@ impl Default for MemPoolWalkSettings { impl MemPoolWalkSettings { pub fn zero() -> MemPoolWalkSettings { MemPoolWalkSettings { + strategy: MemPoolWalkStrategy::GlobalFeeRate, max_walk_time_ms: u64::MAX, consider_no_estimate_tx_prob: 5, nonce_cache_size: 1024 * 1024, @@ -803,6 +840,29 @@ const MEMPOOL_SCHEMA_7_TIME_ESTIMATES: &[&str] = &[ "#, ]; +const MEMPOOL_SCHEMA_8_NONCE_SORTING: &'static [&'static str] = &[ + r#" + -- Add table to track considered transactions + CREATE TABLE IF NOT EXISTS considered_txs( + txid TEXT PRIMARY KEY NOT NULL, + FOREIGN KEY(txid) REFERENCES mempool(txid) ON DELETE CASCADE + ); + "#, + r#" + -- Drop redundant mempool indexes, covered by unique constraints + DROP INDEX IF EXISTS "by_txid"; + DROP INDEX IF EXISTS "by_sponsor"; + DROP INDEX IF EXISTS "by_origin"; + "#, + r#" + -- Add indexes for nonce sorting + CREATE INDEX IF NOT EXISTS by_address_nonce ON nonces(address, nonce); + "#, + r#" + INSERT INTO schema_version (version) VALUES (8) + "#, +]; + const MEMPOOL_INDEXES: &[&str] = &[ "CREATE INDEX IF NOT EXISTS by_txid ON mempool(txid);", "CREATE INDEX IF NOT EXISTS by_height ON mempool(height);", @@ -997,126 +1057,7 @@ impl<'a> MemPoolTx<'a> { } } -/// Used to locally cache nonces to avoid repeatedly looking them up in the nonce. -struct NonceCache { - cache: HashMap, - /// The maximum size that this cache can be. - max_cache_size: usize, -} - -impl NonceCache { - fn new(nonce_cache_size: u64) -> Self { - let max_size: usize = nonce_cache_size - .try_into() - .expect("Could not cast `nonce_cache_size` as `usize`."); - Self { - cache: HashMap::new(), - max_cache_size: max_size, - } - } - - /// Get a nonce from the cache. - /// First, the RAM cache will be checked for this address. - /// If absent, then the `nonces` table will be queried for this address. - /// If absent, then the MARF will be queried for this address. - /// - /// If not in RAM, the nonce will be opportunistically stored to the `nonces` table. If that - /// fails due to lock contention, then the method will return `true` for its second tuple argument. - /// - /// Returns (nonce, should-try-store-again?) - fn get( - &mut self, - address: &StacksAddress, - clarity_tx: &mut C, - mempool_db: &DBConn, - ) -> (u64, bool) - where - C: ClarityConnection, - { - #[cfg(test)] - assert!(self.cache.len() <= self.max_cache_size); - - // Check in-memory cache - match self.cache.get(address) { - Some(nonce) => (*nonce, false), - None => { - // Check sqlite cache - let opt_nonce = match db_get_nonce(mempool_db, address) { - Ok(opt_nonce) => opt_nonce, - Err(e) => { - warn!("error retrieving nonce from mempool db: {}", e); - None - } - }; - match opt_nonce { - Some(nonce) => { - // Copy this into the in-memory cache if there is space - if self.cache.len() < self.max_cache_size { - self.cache.insert(address.clone(), nonce); - } - (nonce, false) - } - None => { - let nonce = - StacksChainState::get_nonce(clarity_tx, &address.clone().into()); - - let should_store_again = match db_set_nonce(mempool_db, address, nonce) { - Ok(_) => false, - Err(e) => { - debug!("error caching nonce to sqlite: {}", e); - true - } - }; - - if self.cache.len() < self.max_cache_size { - self.cache.insert(address.clone(), nonce); - } - (nonce, should_store_again) - } - } - } - } - } - - /// Store the (address, nonce) pair to the `nonces` table. - /// If storage fails, return false. - /// Otherwise return true. - fn update(&mut self, address: StacksAddress, value: u64, mempool_db: &DBConn) -> bool { - // Sqlite cache - let success = match db_set_nonce(mempool_db, &address, value) { - Ok(_) => true, - Err(e) => { - warn!("error caching nonce to sqlite: {}", e); - false - } - }; - - // In-memory cache - if let Some(nonce) = self.cache.get_mut(&address) { - *nonce = value; - } - - success - } -} - -fn db_set_nonce(conn: &DBConn, address: &StacksAddress, nonce: u64) -> Result<(), db_error> { - let addr_str = address.to_string(); - let nonce_i64 = u64_to_sql(nonce)?; - - let sql = "INSERT OR REPLACE INTO nonces (address, nonce) VALUES (?1, ?2)"; - conn.execute(sql, params![addr_str, nonce_i64])?; - Ok(()) -} - -fn db_get_nonce(conn: &DBConn, address: &StacksAddress) -> Result, db_error> { - let addr_str = address.to_string(); - - let sql = "SELECT nonce FROM nonces WHERE address = ?"; - query_row(conn, sql, params![addr_str]) -} - -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub fn db_get_all_nonces(conn: &DBConn) -> Result, db_error> { let sql = "SELECT * FROM nonces"; let mut stmt = conn.prepare(sql).map_err(db_error::SqliteError)?; @@ -1143,7 +1084,7 @@ struct CandidateCache { } impl CandidateCache { - fn new(candidate_retry_cache_size: u64) -> Self { + fn new(candidate_retry_cache_size: usize) -> Self { let max_size: usize = candidate_retry_cache_size .try_into() .expect("Could not cast `candidate_retry_cache_size` as usize."); @@ -1291,6 +1232,9 @@ impl MemPoolDB { MemPoolDB::instantiate_schema_7(tx)?; } 7 => { + MemPoolDB::instantiate_schema_8(tx)?; + } + 8 => { break; } _ => { @@ -1377,6 +1321,16 @@ impl MemPoolDB { Ok(()) } + /// Optimize indexes for mempool visits + #[cfg_attr(test, mutants::skip)] + fn instantiate_schema_8(tx: &DBTx) -> Result<(), db_error> { + for sql_exec in MEMPOOL_SCHEMA_8_NONCE_SORTING { + tx.execute_batch(sql_exec)?; + } + + Ok(()) + } + #[cfg_attr(test, mutants::skip)] pub fn db_path(chainstate_root_path: &str) -> Result { let mut path = PathBuf::from(chainstate_root_path); @@ -1486,10 +1440,12 @@ impl MemPoolDB { } #[cfg_attr(test, mutants::skip)] - pub fn reset_nonce_cache(&mut self) -> Result<(), db_error> { + pub fn reset_mempool_caches(&mut self) -> Result<(), db_error> { debug!("reset nonce cache"); - let sql = "DELETE FROM nonces"; - self.db.execute(sql, NO_PARAMS)?; + // Delete all rows from the nonces table + self.db.execute("DELETE FROM nonces", NO_PARAMS)?; + // Also delete all rows from the considered_txs table + self.db.execute("DELETE FROM considered_txs", NO_PARAMS)?; Ok(()) } @@ -1626,28 +1582,29 @@ impl MemPoolDB { { let start_time = Instant::now(); let mut total_considered = 0; + let mut considered_txs = Vec::with_capacity(MAX_BLOCK_TXS); debug!("Mempool walk for {}ms", settings.max_walk_time_ms,); + let mut nonce_cache = NonceCache::new(settings.nonce_cache_size); + let mut nonce_conn = self.reopen(true)?; + + // == Queries for `GlobalFeeRate` mempool walk strategy + // + // Selects mempool transactions only based on their fee rate. Transactions with NULL fee rates get randomly selected for + // consideration. let tx_consideration_sampler = Uniform::new(0, 100); let mut rng = rand::thread_rng(); let mut candidate_cache = CandidateCache::new(settings.candidate_retry_cache_size); - let mut nonce_cache = NonceCache::new(settings.nonce_cache_size); - - // set of (address, nonce) to store after the inner loop completes. This will be done in a - // single transaction. This cannot grow to more than `settings.nonce_cache_size` entries. - let mut retry_store = HashMap::new(); - let sql = " - SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate - FROM mempool - WHERE fee_rate IS NULL - "; + SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate + FROM mempool + WHERE fee_rate IS NULL + "; let mut query_stmt_null = self.db.prepare(sql).map_err(Error::SqliteError)?; let mut null_iterator = query_stmt_null .query(NO_PARAMS) .map_err(Error::SqliteError)?; - let sql = " SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate FROM mempool @@ -1660,172 +1617,235 @@ impl MemPoolDB { .map_err(Error::SqliteError)?; let stop_reason = loop { - if start_time.elapsed().as_millis() > settings.max_walk_time_ms as u128 { - debug!("Mempool iteration deadline exceeded"; + let mut state_changed = false; + + // == Query for `NextNonceWithHighestFeeRate` mempool walk strategy + // + // Selects the next mempool transaction to consider using a heuristic that maximizes miner fee profitability and minimizes + // CPU time wasted on already-mined or not-yet-mineable transactions. This heuristic takes the following steps: + // + // 1. Filters out transactions to consider only those that have the next expected nonce for both the origin and sponsor, + // when possible + // 2. Adds a "simulated" fee rate to transactions that don't have it by multiplying the mempool's maximum current fee rate + // by a random number. This helps us mix these transactions with others to guarantee they get processed in a reasonable + // order + // 3. Ranks transactions by prioritizing those with next nonces and higher fees (per origin and sponsor address) + // 4. Takes the top ranked transaction and returns it for evaluation + // + // This logic prevents miners from repeatedly visiting (and then skipping) high fee transactions that would get evaluated + // first based on their `fee_rate` but are otherwise non-mineable because they have very high or invalid nonces. A large + // volume of these transactions would cause considerable slowness when selecting valid transactions to mine. This query + // also makes sure transactions that have NULL `fee_rate`s are visited, because they will also get ranked according to + // their origin address nonce. + let sql = " + WITH nonce_filtered AS ( + SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate, + CASE + WHEN fee_rate IS NULL THEN (ABS(RANDOM()) % 10000 / 10000.0) * (SELECT MAX(fee_rate) FROM mempool) + ELSE fee_rate + END AS sort_fee_rate + FROM mempool AS m + LEFT JOIN nonces AS no ON m.origin_address = no.address + LEFT JOIN nonces AS ns ON m.sponsor_address = ns.address + WHERE (no.address IS NULL OR m.origin_nonce = no.nonce) + AND (ns.address IS NULL OR m.sponsor_nonce = ns.nonce) + AND m.txid NOT IN (SELECT txid FROM considered_txs) + ORDER BY accept_time ASC + LIMIT 11650 -- max transactions that can fit in one block + ), + address_nonce_ranked AS ( + SELECT *, + ROW_NUMBER() OVER ( + PARTITION BY origin_address + ORDER BY origin_nonce ASC, sort_fee_rate DESC + ) AS origin_rank, + ROW_NUMBER() OVER ( + PARTITION BY sponsor_address + ORDER BY sponsor_nonce ASC, sort_fee_rate DESC + ) AS sponsor_rank + FROM nonce_filtered + ) + SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate + FROM address_nonce_ranked + ORDER BY origin_rank ASC, sponsor_rank ASC, sort_fee_rate DESC + "; + let mut query_stmt_nonce_rank = self.db.prepare(sql).map_err(Error::SqliteError)?; + let mut nonce_rank_iterator = query_stmt_nonce_rank + .query(NO_PARAMS) + .map_err(Error::SqliteError)?; + + let stop_reason = loop { + if start_time.elapsed().as_millis() > settings.max_walk_time_ms as u128 { + debug!("Mempool: iteration deadline exceeded"; "deadline_ms" => settings.max_walk_time_ms); - break MempoolIterationStopReason::DeadlineReached; - } - - let start_with_no_estimate = - tx_consideration_sampler.sample(&mut rng) < settings.consider_no_estimate_tx_prob; - - // First, try to read from the retry list - let (candidate, update_estimate) = match candidate_cache.next() { - Some(tx) => { - let update_estimate = tx.fee_rate.is_none(); - (tx, update_estimate) + break MempoolIterationStopReason::DeadlineReached; } - None => { - // When the retry list is empty, read from the mempool db, - // randomly selecting from either the null fee-rate transactions - // or those with fee-rate estimates. - let opt_tx = if start_with_no_estimate { - null_iterator.next().map_err(Error::SqliteError)? - } else { - fee_iterator.next().map_err(Error::SqliteError)? - }; - match opt_tx { - Some(row) => (MemPoolTxInfoPartial::from_row(row)?, start_with_no_estimate), - None => { - // If the selected iterator is empty, check the other - match if start_with_no_estimate { - fee_iterator.next().map_err(Error::SqliteError)? - } else { - null_iterator.next().map_err(Error::SqliteError)? - } { - Some(row) => ( - MemPoolTxInfoPartial::from_row(row)?, - !start_with_no_estimate, - ), - None => { - debug!("No more transactions to consider in mempool"); - break MempoolIterationStopReason::NoMoreCandidates; + + // First, try to read from the retry list + let (candidate, update_estimate) = match settings.strategy { + MemPoolWalkStrategy::GlobalFeeRate => { + // First, try to read from the retry list + match candidate_cache.next() { + Some(tx) => { + let update_estimate = tx.fee_rate.is_none(); + (tx, update_estimate) + } + None => { + // When the retry list is empty, read from the mempool db, + // randomly selecting from either the null fee-rate transactions + // or those with fee-rate estimates. + let start_with_no_estimate = tx_consideration_sampler + .sample(&mut rng) + < settings.consider_no_estimate_tx_prob; + let opt_tx = if start_with_no_estimate { + null_iterator.next().map_err(Error::SqliteError)? + } else { + fee_iterator.next().map_err(Error::SqliteError)? + }; + match opt_tx { + Some(row) => ( + MemPoolTxInfoPartial::from_row(row)?, + start_with_no_estimate, + ), + None => { + // If the selected iterator is empty, check the other + match if start_with_no_estimate { + fee_iterator.next().map_err(Error::SqliteError)? + } else { + null_iterator.next().map_err(Error::SqliteError)? + } { + Some(row) => ( + MemPoolTxInfoPartial::from_row(row)?, + !start_with_no_estimate, + ), + None => { + break MempoolIterationStopReason::NoMoreCandidates; + } + } + } } } } } - } - }; + MemPoolWalkStrategy::NextNonceWithHighestFeeRate => { + match nonce_rank_iterator.next().map_err(Error::SqliteError)? { + Some(row) => { + let tx = MemPoolTxInfoPartial::from_row(row)?; + let update_estimate = tx.fee_rate.is_none(); + (tx, update_estimate) + } + None => { + break MempoolIterationStopReason::NoMoreCandidates; + } + } + } + }; + + state_changed = true; - // Check the nonces. - let (expected_origin_nonce, retry_store_origin_nonce) = - nonce_cache.get(&candidate.origin_address, clarity_tx, self.conn()); - let (expected_sponsor_nonce, retry_store_sponsor_nonce) = - nonce_cache.get(&candidate.sponsor_address, clarity_tx, self.conn()); - - // Try storing these nonces later if we failed to do so here, e.g. due to some other - // thread holding the write-lock on the mempool DB. - if retry_store_origin_nonce { - Self::save_nonce_for_retry( - &mut retry_store, - settings.nonce_cache_size, - candidate.origin_address.clone(), + // Check the nonces. + let expected_origin_nonce = + nonce_cache.get(&candidate.origin_address, clarity_tx, &mut nonce_conn); + let expected_sponsor_nonce = + nonce_cache.get(&candidate.sponsor_address, clarity_tx, &mut nonce_conn); + + match order_nonces( + candidate.origin_nonce, expected_origin_nonce, - ); - } - if retry_store_sponsor_nonce { - Self::save_nonce_for_retry( - &mut retry_store, - settings.nonce_cache_size, - candidate.sponsor_address.clone(), + candidate.sponsor_nonce, expected_sponsor_nonce, - ); - } - - match order_nonces( - candidate.origin_nonce, - expected_origin_nonce, - candidate.sponsor_nonce, - expected_sponsor_nonce, - ) { - Ordering::Less => { - debug!( - "Mempool: unexecutable: drop tx"; - "txid" => %candidate.txid, - "tx_origin_addr" => %candidate.origin_address, - "tx_origin_nonce" => candidate.origin_nonce, - "fee_rate" => candidate.fee_rate.unwrap_or_default(), - "expected_origin_nonce" => expected_origin_nonce, - "expected_sponsor_nonce" => expected_sponsor_nonce, - ); - // This transaction cannot execute in this pass, just drop it - continue; - } - Ordering::Greater => { - debug!( - "Mempool: nonces too high, cached for later"; - "txid" => %candidate.txid, - "tx_origin_addr" => %candidate.origin_address, - "tx_origin_nonce" => candidate.origin_nonce, - "fee_rate" => candidate.fee_rate.unwrap_or_default(), - "expected_origin_nonce" => expected_origin_nonce, - "expected_sponsor_nonce" => expected_sponsor_nonce, - ); - // This transaction could become runnable in this pass, save it for later - candidate_cache.push(candidate); - continue; - } - Ordering::Equal => { - // Candidate transaction: fall through - } - }; + ) { + Ordering::Less => { + debug!( + "Mempool: unexecutable: drop tx"; + "txid" => %candidate.txid, + "tx_origin_addr" => %candidate.origin_address, + "tx_origin_nonce" => candidate.origin_nonce, + "fee_rate" => candidate.fee_rate.unwrap_or_default(), + "expected_origin_nonce" => expected_origin_nonce, + "expected_sponsor_nonce" => expected_sponsor_nonce, + ); + // This transaction cannot execute in this pass, just drop it + continue; + } + Ordering::Greater => { + debug!( + "Mempool: nonces too high"; + "txid" => %candidate.txid, + "tx_origin_addr" => %candidate.origin_address, + "tx_origin_nonce" => candidate.origin_nonce, + "fee_rate" => candidate.fee_rate.unwrap_or_default(), + "expected_origin_nonce" => expected_origin_nonce, + "expected_sponsor_nonce" => expected_sponsor_nonce, + ); + if settings.strategy == MemPoolWalkStrategy::GlobalFeeRate { + // This transaction could become runnable in this pass, save it for later + candidate_cache.push(candidate); + } + continue; + } + Ordering::Equal => { + // Candidate transaction: fall through + } + }; + considered_txs.push(candidate.txid); - // Read in and deserialize the transaction. - let tx_info_option = MemPoolDB::get_tx(self.conn(), &candidate.txid)?; - let tx_info = match tx_info_option { - Some(tx) => tx, - None => { - // Note: Don't panic here because maybe the state has changed from garbage collection. - warn!("Miner: could not find a tx for id {:?}", &candidate.txid); - continue; - } - }; + // Read in and deserialize the transaction. + let tx_info_option = MemPoolDB::get_tx(self.conn(), &candidate.txid)?; + let tx_info = match tx_info_option { + Some(tx) => tx, + None => { + // Note: Don't panic here because maybe the state has changed from garbage collection. + warn!("Miner: could not find a tx for id {:?}", &candidate.txid); + continue; + } + }; - let (tx_type, do_consider) = match &tx_info.tx.payload { - TransactionPayload::TokenTransfer(..) => ( - "TokenTransfer".to_string(), - settings - .txs_to_consider - .contains(&MemPoolWalkTxTypes::TokenTransfer), - ), - TransactionPayload::SmartContract(..) => ( - "SmartContract".to_string(), - settings - .txs_to_consider - .contains(&MemPoolWalkTxTypes::SmartContract), - ), - TransactionPayload::ContractCall(..) => ( - "ContractCall".to_string(), - settings - .txs_to_consider - .contains(&MemPoolWalkTxTypes::ContractCall), - ), - _ => ("".to_string(), true), - }; - if !do_consider { - debug!("Will skip mempool tx, since it does not have an acceptable type"; + let (tx_type, do_consider) = match &tx_info.tx.payload { + TransactionPayload::TokenTransfer(..) => ( + "TokenTransfer".to_string(), + settings + .txs_to_consider + .contains(&MemPoolWalkTxTypes::TokenTransfer), + ), + TransactionPayload::SmartContract(..) => ( + "SmartContract".to_string(), + settings + .txs_to_consider + .contains(&MemPoolWalkTxTypes::SmartContract), + ), + TransactionPayload::ContractCall(..) => ( + "ContractCall".to_string(), + settings + .txs_to_consider + .contains(&MemPoolWalkTxTypes::ContractCall), + ), + _ => ("".to_string(), true), + }; + if !do_consider { + debug!("Mempool: will skip tx, since it does not have an acceptable type"; "txid" => %tx_info.tx.txid(), "type" => %tx_type); - continue; - } + continue; + } - let do_consider = settings.filter_origins.is_empty() - || settings - .filter_origins - .contains(&tx_info.metadata.origin_address); + let do_consider = settings.filter_origins.is_empty() + || settings + .filter_origins + .contains(&tx_info.metadata.origin_address); - if !do_consider { - debug!("Will skip mempool tx, since it does not have an allowed origin"; + if !do_consider { + debug!("Mempool: will skip tx, since it does not have an allowed origin"; "txid" => %tx_info.tx.txid(), "origin" => %tx_info.metadata.origin_address); - continue; - } + continue; + } - let consider = ConsiderTransaction { - tx: tx_info, - update_estimate, - }; - debug!("Consider mempool transaction"; + let consider = ConsiderTransaction { + tx: tx_info, + update_estimate, + }; + debug!("Mempool: consider transaction"; "txid" => %consider.tx.tx.txid(), "origin_addr" => %consider.tx.metadata.origin_address, "origin_nonce" => candidate.origin_nonce, @@ -1835,87 +1855,89 @@ impl MemPoolDB { "tx_fee" => consider.tx.metadata.tx_fee, "fee_rate" => candidate.fee_rate, "size" => consider.tx.metadata.len); - total_considered += 1; - - // Run `todo` on the transaction. - match todo(clarity_tx, &consider, self.cost_estimator.as_mut())? { - Some(tx_event) => { - match tx_event { - TransactionEvent::Success(_) => { - // Bump nonces in the cache for the executed transaction - let stored = nonce_cache.update( - consider.tx.metadata.origin_address, - expected_origin_nonce + 1, - self.conn(), - ); - if !stored { - Self::save_nonce_for_retry( - &mut retry_store, - settings.nonce_cache_size, + total_considered += 1; + + // Run `todo` on the transaction. + match todo(clarity_tx, &consider, self.cost_estimator.as_mut())? { + Some(tx_event) => { + match tx_event { + TransactionEvent::Success(_) => { + // Bump nonces in the cache for the executed transaction + nonce_cache.set( consider.tx.metadata.origin_address, expected_origin_nonce + 1, + &mut nonce_conn, ); - } - - if consider.tx.tx.auth.is_sponsored() { - let stored = nonce_cache.update( - consider.tx.metadata.sponsor_address, - expected_sponsor_nonce + 1, - self.conn(), - ); - if !stored { - Self::save_nonce_for_retry( - &mut retry_store, - settings.nonce_cache_size, + if consider.tx.tx.auth.is_sponsored() { + nonce_cache.set( consider.tx.metadata.sponsor_address, expected_sponsor_nonce + 1, + &mut nonce_conn, ); } + output_events.push(tx_event); + } + TransactionEvent::Skipped(_) => { + // don't push `Skipped` events to the observer + } + _ => { + output_events.push(tx_event); } - output_events.push(tx_event); - } - TransactionEvent::Skipped(_) => { - // don't push `Skipped` events to the observer - } - _ => { - output_events.push(tx_event); } } + None => { + debug!("Mempool: early exit from iterator"); + break MempoolIterationStopReason::IteratorExited; + } } - None => { - debug!("Mempool iteration early exit from iterator"); - break MempoolIterationStopReason::IteratorExited; + + if settings.strategy == MemPoolWalkStrategy::GlobalFeeRate { + // Reset for finding the next transaction to process + debug!( + "Mempool: reset: retry list has {} entries", + candidate_cache.len() + ); + candidate_cache.reset(); + } + }; + + // If we've reached the end of the mempool, or if we've stopped + // iterating for some other reason, break out of the loop + if settings.strategy != MemPoolWalkStrategy::NextNonceWithHighestFeeRate + || stop_reason != MempoolIterationStopReason::NoMoreCandidates + || !state_changed + { + if stop_reason == MempoolIterationStopReason::NoMoreCandidates { + debug!("Mempool: no more transactions to consider"); } + break stop_reason; } - // Reset for finding the next transaction to process - debug!( - "Mempool: reset: retry list has {} entries", - candidate_cache.len() - ); - candidate_cache.reset(); + // Flush the nonce cache to the database before performing the next + // query. + nonce_cache.flush(&mut nonce_conn); + + // Flush the candidate cache to the database before performing the + // next query. + flush_considered_txs(&mut nonce_conn, &mut considered_txs); }; // drop these rusqlite statements and queries, since their existence as immutable borrows on the // connection prevents us from beginning a transaction below (which requires a mutable // borrow). drop(null_iterator); - drop(fee_iterator); drop(query_stmt_null); + drop(fee_iterator); drop(query_stmt_fee); - if !retry_store.is_empty() { - let tx = self.tx_begin()?; - for (address, nonce) in retry_store.into_iter() { - nonce_cache.update(address, nonce, &tx); - } - tx.commit()?; - } + // Write through the nonce cache to the database + nonce_cache.flush(&mut self.db); - debug!( + info!( "Mempool iteration finished"; "considered_txs" => u128::from(total_considered), - "elapsed_ms" => start_time.elapsed().as_millis() + "elapsed_ms" => start_time.elapsed().as_millis(), + "stop_reason" => ?stop_reason ); Ok((total_considered, stop_reason)) } @@ -2879,3 +2901,48 @@ impl MemPoolDB { Ok((ret, next_page, num_rows_visited)) } } + +/// Flush the considered transaction IDs to the DB. +/// Do not return until successful. After a successful flush, clear the vector. +pub fn flush_considered_txs(conn: &mut DBConn, considered_txs: &mut Vec) { + const MAX_BACKOFF: Duration = Duration::from_secs(30); + let mut backoff = Duration::from_millis(rand::thread_rng().gen_range(50..200)); + + loop { + // Pass a slice to the try function. + let result = try_flush_considered_txs(conn, considered_txs.as_slice()); + + match result { + Ok(_) => { + // On success, clear the vector so that it’s empty. + considered_txs.clear(); + return; + } + Err(e) => { + warn!("Considered txid flush failed: {e}. Retrying in {backoff:?}"); + thread::sleep(backoff); + if backoff < MAX_BACKOFF { + backoff = + backoff * 2 + Duration::from_millis(rand::thread_rng().gen_range(50..200)); + } + } + } + } +} + +/// Try to flush the considered transaction IDs to the DB. +pub fn try_flush_considered_txs( + conn: &mut DBConn, + considered_txs: &[Txid], +) -> Result<(), db_error> { + let sql = "INSERT OR IGNORE INTO considered_txs (txid) VALUES (?1)"; + + let db_tx = conn.transaction()?; + + for txid in considered_txs { + db_tx.execute(sql, params![txid])?; + } + + db_tx.commit()?; + Ok(()) +} diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 899f9d4a2f..5d6720d238 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -30,9 +30,12 @@ use crate::burnchains::bitcoin::BitcoinNetworkType; use crate::burnchains::{Burnchain, Error as burnchain_error}; use crate::chainstate::burn::ConsensusHash; pub mod mempool; +pub mod nonce_cache; #[cfg(test)] pub mod tests; +#[cfg(any(test, feature = "testing"))] +pub mod util; use std::cmp::Ordering; pub type StacksEpoch = GenericStacksEpoch; diff --git a/stackslib/src/core/nonce_cache.rs b/stackslib/src/core/nonce_cache.rs new file mode 100644 index 0000000000..e15ff36151 --- /dev/null +++ b/stackslib/src/core/nonce_cache.rs @@ -0,0 +1,327 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::HashMap; +use std::thread; +use std::time::Duration; + +use clarity::types::chainstate::StacksAddress; +use clarity::util::lru_cache::LruCache; +use clarity::vm::clarity::ClarityConnection; +use rand::Rng; +use rusqlite::params; + +use super::mempool::MemPoolTx; +use super::MemPoolDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::util_lib::db::{query_row, u64_to_sql, DBConn, Error as db_error}; + +/// Used to cache nonces in memory and in the mempool database. +/// 1. MARF - source of truth for nonces +/// 2. Nonce DB - table in mempool sqlite database +/// 3. HashMap - in-memory cache for nonces +/// The in-memory cache is restricted to a maximum size to avoid memory +/// exhaustion. When the cache is full, it should be flushed to the database +/// and cleared. It is recommended to do this in between batches of candidate +/// transactions from the mempool. +pub struct NonceCache { + /// In-memory LRU cache of nonces. + cache: LruCache, +} + +impl NonceCache { + pub fn new(max_size: usize) -> Self { + Self { + cache: LruCache::new(max_size), + } + } + + /// Get a nonce. + /// First, the RAM cache will be checked for this address. + /// If absent, then the `nonces` table will be queried for this address. + /// If absent, then the MARF will be queried for this address. + /// + /// If not in RAM, the nonce will be opportunistically stored to the `nonces` table. If that + /// fails due to lock contention, then the method will return `true` for its second tuple argument. + /// + /// Returns (nonce, should-try-store-again?) + pub fn get( + &mut self, + address: &StacksAddress, + clarity_tx: &mut C, + mempool_db: &mut DBConn, + ) -> u64 + where + C: ClarityConnection, + { + // Check in-memory cache + match self.cache.get(address) { + Some(nonce) => nonce, + None => { + // Check sqlite cache + let opt_nonce = match db_get_nonce(mempool_db, address) { + Ok(opt_nonce) => opt_nonce, + Err(e) => { + warn!("error retrieving nonce from mempool db: {}", e); + None + } + }; + match opt_nonce { + Some(nonce) => { + // Insert into in-memory cache, but it is not dirty, + // since we just got it from the database. + let evicted = self.cache.insert_clean(address.clone(), nonce); + if evicted.is_some() { + // If we evicted something, we need to flush the cache. + self.flush_with_evicted(mempool_db, evicted); + } + nonce + } + None => { + let nonce = + StacksChainState::get_nonce(clarity_tx, &address.clone().into()); + + self.set(address.clone(), nonce, mempool_db); + nonce + } + } + } + } + } + + /// Set the nonce for `address` to `value` in the in-memory cache. + /// If this causes an eviction, flush the in-memory cache to the DB. + pub fn set(&mut self, address: StacksAddress, value: u64, conn: &mut DBConn) { + let evicted = self.cache.insert(address.clone(), value); + if evicted.is_some() { + // If we evicted something, we need to flush the cache. + self.flush_with_evicted(conn, evicted); + } + } + + /// Flush the in-memory cache the the DB, including `evicted`. + /// Do not return until successful. + pub fn flush_with_evicted(&mut self, conn: &mut DBConn, evicted: Option<(StacksAddress, u64)>) { + const MAX_BACKOFF: Duration = Duration::from_secs(30); + let mut backoff = Duration::from_millis(rand::thread_rng().gen_range(50..200)); + + loop { + let result = self.try_flush_with_evicted(conn, evicted); + + match result { + Ok(_) => return, // Success: exit the loop + Err(e) => { + // Calculate a backoff duration + warn!("Nonce cache flush failed: {e}. Retrying in {backoff:?}"); + + // Sleep for the backoff duration + thread::sleep(backoff); + + if backoff < MAX_BACKOFF { + // Exponential backoff + backoff = backoff * 2 + + Duration::from_millis(rand::thread_rng().gen_range(50..200)); + } + } + } + } + } + + /// Try to flush the in-memory cache the the DB, including `evicted`. + pub fn try_flush_with_evicted( + &mut self, + conn: &mut DBConn, + evicted: Option<(StacksAddress, u64)>, + ) -> Result<(), db_error> { + // Flush the cache to the database + let sql = "INSERT OR REPLACE INTO nonces (address, nonce) VALUES (?1, ?2)"; + + let tx = conn.transaction()?; + + if let Some((addr, nonce)) = evicted { + tx.execute(sql, params![addr, nonce])?; + } + + self.cache.flush(|addr, nonce| { + tx.execute(sql, params![addr, nonce])?; + Ok::<(), db_error>(()) + })?; + + tx.commit()?; + + Ok(()) + } + + /// Flush the in-memory cache the the DB. + /// Do not return until successful. + pub fn flush(&mut self, conn: &mut DBConn) { + self.flush_with_evicted(conn, None) + } +} + +fn db_set_nonce(conn: &DBConn, address: &StacksAddress, nonce: u64) -> Result<(), db_error> { + let addr_str = address.to_string(); + let nonce_i64 = u64_to_sql(nonce)?; + + let sql = "INSERT OR REPLACE INTO nonces (address, nonce) VALUES (?1, ?2)"; + conn.execute(sql, params![addr_str, nonce_i64])?; + Ok(()) +} + +fn db_get_nonce(conn: &DBConn, address: &StacksAddress) -> Result, db_error> { + let addr_str = address.to_string(); + + let sql = "SELECT nonce FROM nonces WHERE address = ?"; + query_row(conn, sql, params![addr_str]) +} + +#[cfg(test)] +mod tests { + use clarity::consts::CHAIN_ID_TESTNET; + use clarity::types::chainstate::StacksBlockId; + use clarity::types::Address; + use clarity::vm::tests::{TEST_BURN_STATE_DB, TEST_HEADER_DB}; + + use super::*; + use crate::chainstate::stacks::db::test::{chainstate_path, instantiate_chainstate}; + use crate::chainstate::stacks::index::ClarityMarfTrieId; + use crate::clarity_vm::clarity::ClarityInstance; + use crate::clarity_vm::database::marf::MarfedKV; + + #[test] + fn test_nonce_cache() { + let _chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, CHAIN_ID_TESTNET, &chainstate_path).unwrap(); + let mut cache = NonceCache::new(2); + + let addr1 = + StacksAddress::from_string("ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM").unwrap(); + let addr2 = + StacksAddress::from_string("ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5").unwrap(); + let addr3 = + StacksAddress::from_string("ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG").unwrap(); + + let conn = &mut mempool.db; + cache.set(addr1.clone(), 1, conn); + cache.set(addr2.clone(), 2, conn); + + let marf = MarfedKV::temporary(); + let mut clarity_instance = ClarityInstance::new(false, CHAIN_ID_TESTNET, marf); + clarity_instance + .begin_test_genesis_block( + &StacksBlockId::sentinel(), + &StacksBlockId([0u8; 32]), + &TEST_HEADER_DB, + &TEST_BURN_STATE_DB, + ) + .commit_block(); + let mut clarity_conn = clarity_instance.begin_block( + &StacksBlockId([0 as u8; 32]), + &StacksBlockId([1 as u8; 32]), + &TEST_HEADER_DB, + &TEST_BURN_STATE_DB, + ); + + clarity_conn.as_transaction(|clarity_tx| { + assert_eq!(cache.get(&addr1, clarity_tx, conn), 1); + assert_eq!(cache.get(&addr2, clarity_tx, conn), 2); + // addr3 is not in the cache, so it should be fetched from the + // clarity instance (and get 0) + assert_eq!(cache.get(&addr3, clarity_tx, conn), 0); + }); + } + + #[test] + fn test_db_set_nonce() { + let _chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, CHAIN_ID_TESTNET, &chainstate_path).unwrap(); + let conn = &mut mempool.db; + let addr = StacksAddress::from_string("ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC").unwrap(); + db_set_nonce(&conn, &addr, 123).unwrap(); + assert_eq!(db_get_nonce(&conn, &addr).unwrap().unwrap(), 123); + } + + #[test] + fn test_nonce_cache_eviction() { + let _chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, CHAIN_ID_TESTNET, &chainstate_path).unwrap(); + let mut cache = NonceCache::new(2); // Cache size of 2 + + let addr1 = + StacksAddress::from_string("ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM").unwrap(); + let addr2 = + StacksAddress::from_string("ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5").unwrap(); + let addr3 = + StacksAddress::from_string("ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG").unwrap(); + + let conn = &mut mempool.db; + + // Fill cache to capacity + cache.set(addr1.clone(), 1, conn); + cache.set(addr2.clone(), 2, conn); + + // This should cause addr1 to be evicted + cache.set(addr3.clone(), 3, conn); + + // Verify addr1 was written to DB during eviction + assert_eq!(db_get_nonce(&conn, &addr1).unwrap().unwrap(), 1); + } + + #[test] + fn test_nonce_cache_flush() { + let _chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, CHAIN_ID_TESTNET, &chainstate_path).unwrap(); + let mut cache = NonceCache::new(3); + + let addr1 = + StacksAddress::from_string("ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM").unwrap(); + let addr2 = + StacksAddress::from_string("ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5").unwrap(); + + let conn = &mut mempool.db; + + cache.set(addr1.clone(), 5, conn); + cache.set(addr2.clone(), 10, conn); + + // Explicitly flush cache + cache.flush(conn); + + // Verify both entries were written to DB + assert_eq!(db_get_nonce(&conn, &addr1).unwrap().unwrap(), 5); + assert_eq!(db_get_nonce(&conn, &addr2).unwrap().unwrap(), 10); + } + + #[test] + fn test_db_nonce_overwrite() { + let _chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, CHAIN_ID_TESTNET, &chainstate_path).unwrap(); + let conn = &mut mempool.db; + + let addr = StacksAddress::from_string("ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC").unwrap(); + + // Set initial nonce + db_set_nonce(&conn, &addr, 1).unwrap(); + assert_eq!(db_get_nonce(&conn, &addr).unwrap().unwrap(), 1); + + // Overwrite with new nonce + db_set_nonce(&conn, &addr, 2).unwrap(); + assert_eq!(db_get_nonce(&conn, &addr).unwrap().unwrap(), 2); + } +} diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 092b019b39..120acb478f 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -39,6 +39,7 @@ use stacks_common::util::secp256k1::{MessageSignature, *}; use stacks_common::util::vrf::VRFProof; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log, sleep_ms}; +use super::mempool::MemPoolWalkStrategy; use super::MemPoolDB; use crate::burnchains::{Address, Txid}; use crate::chainstate::burn::ConsensusHash; @@ -64,6 +65,7 @@ use crate::core::mempool::{ db_get_all_nonces, MemPoolSyncData, MemPoolWalkSettings, MemPoolWalkTxTypes, TxTag, BLOOM_COUNTER_DEPTH, BLOOM_COUNTER_ERROR_RATE, MAX_BLOOM_COUNTER_TXS, }; +use crate::core::util::{insert_tx_in_mempool, make_stacks_transfer, to_addr}; use crate::core::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use crate::net::Error as NetError; use crate::util_lib::bloom::test::setup_bloom_counter; @@ -332,7 +334,7 @@ fn mempool_walk_over_fork() { ); mempool - .reset_nonce_cache() + .reset_mempool_caches() .expect("Should be able to reset nonces"); chainstate.with_read_only_clarity_tx( @@ -371,7 +373,7 @@ fn mempool_walk_over_fork() { ); mempool - .reset_nonce_cache() + .reset_mempool_caches() .expect("Should be able to reset nonces"); // The mempool iterator no longer does any consideration of what block accepted @@ -412,7 +414,7 @@ fn mempool_walk_over_fork() { ); mempool - .reset_nonce_cache() + .reset_mempool_caches() .expect("Should be able to reset nonces"); chainstate.with_read_only_clarity_tx( @@ -451,7 +453,7 @@ fn mempool_walk_over_fork() { ); mempool - .reset_nonce_cache() + .reset_mempool_caches() .expect("Should be able to reset nonces"); // let's test replace-across-fork while we're here. @@ -673,7 +675,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { ); // Next with 0% - let _ = mempool.reset_nonce_cache(); + let _ = mempool.reset_mempool_caches(); mempool_settings.consider_no_estimate_tx_prob = 0; chainstate.with_read_only_clarity_tx( @@ -709,7 +711,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { ); // Then with with 100% - let _ = mempool.reset_nonce_cache(); + let _ = mempool.reset_mempool_caches(); mempool_settings.consider_no_estimate_tx_prob = 100; chainstate.with_read_only_clarity_tx( @@ -2763,3 +2765,87 @@ fn test_filter_txs_by_type() { }, ); } + +#[test] +fn large_mempool() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let mut senders = (0..1024) + .map(|_| (StacksPrivateKey::random(), 0)) + .collect::>(); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let b = make_block( + &mut chainstate, + ConsensusHash([0x2; 20]), + &( + FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + FIRST_STACKS_BLOCK_HASH.clone(), + ), + 2, + 2, + ); + let block_height = 10; + + println!("Adding transactions to mempool"); + let mempool_tx = mempool.tx_begin().unwrap(); + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = to_addr(sender_sk); + let fee = thread_rng().gen_range(180..2000); + let transfer_tx = + make_stacks_transfer(sender_sk, *nonce, fee, 0x80000000, &recipient, 1); + insert_tx_in_mempool( + &mempool_tx, + transfer_tx, + &sender_addr, + *nonce, + fee, + &ConsensusHash([0x2; 20]), + &FIRST_STACKS_BLOCK_HASH, + block_height, + ); + *nonce += 1; + } + } + mempool_tx.commit().unwrap(); + + let mut mempool_settings = MemPoolWalkSettings::default(); + mempool_settings.strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + let mut tx_events = Vec::new(); + + println!("Iterating mempool"); + chainstate.with_read_only_clarity_tx( + &TEST_BURN_STATE_DB, + &StacksBlockHeader::make_index_block_hash(&b.0, &b.1), + |clarity_conn| { + let mut count_txs = 0; + mempool + .iterate_candidates::<_, ChainstateError, _>( + clarity_conn, + &mut tx_events, + mempool_settings.clone(), + |_, available_tx, _| { + count_txs += 1; + Ok(Some( + // Generate any success result + TransactionResult::success( + &available_tx.tx.tx, + StacksTransactionReceipt::from_stx_transfer( + available_tx.tx.tx.clone(), + vec![], + Value::okay(Value::Bool(true)).unwrap(), + ExecutionCost::ZERO, + ), + ) + .convert_to_event(), + )) + }, + ) + .unwrap(); + // It should be able to iterate through at least 10000 transactions in 5s + assert!(count_txs > 10000); + }, + ); +} diff --git a/stackslib/src/core/util.rs b/stackslib/src/core/util.rs new file mode 100644 index 0000000000..d49a7a5922 --- /dev/null +++ b/stackslib/src/core/util.rs @@ -0,0 +1,520 @@ +use std::io::Cursor; + +use chrono::Utc; +use clarity::codec::StacksMessageCodec; +use clarity::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, +}; +use clarity::vm::tests::BurnStateDB; +use clarity::vm::types::PrincipalData; +use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; +use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; +use crate::chainstate::stacks::{ + CoinbasePayload, StacksBlock, StacksMicroblock, StacksMicroblockHeader, StacksTransaction, + StacksTransactionSigner, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, + TransactionContractCall, TransactionPayload, TransactionPostConditionMode, + TransactionSmartContract, TransactionSpendingCondition, TransactionVersion, +}; +use crate::util_lib::strings::StacksString; + +#[allow(clippy::too_many_arguments)] +pub fn sign_sponsored_sig_tx_anchor_mode_version( + payload: TransactionPayload, + sender: &StacksPrivateKey, + payer: &StacksPrivateKey, + sender_nonce: u64, + payer_nonce: u64, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, + version: TransactionVersion, +) -> StacksTransaction { + sign_tx_anchor_mode_version( + payload, + sender, + Some(payer), + sender_nonce, + Some(payer_nonce), + tx_fee, + chain_id, + anchor_mode, + version, + ) +} + +pub fn sign_standard_single_sig_tx( + payload: TransactionPayload, + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, +) -> StacksTransaction { + sign_standard_single_sig_tx_anchor_mode( + payload, + sender, + nonce, + tx_fee, + chain_id, + TransactionAnchorMode::OnChainOnly, + ) +} + +pub fn sign_standard_single_sig_tx_anchor_mode( + payload: TransactionPayload, + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, +) -> StacksTransaction { + sign_standard_single_sig_tx_anchor_mode_version( + payload, + sender, + nonce, + tx_fee, + chain_id, + anchor_mode, + TransactionVersion::Testnet, + ) +} + +pub fn sign_standard_single_sig_tx_anchor_mode_version( + payload: TransactionPayload, + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, + version: TransactionVersion, +) -> StacksTransaction { + sign_tx_anchor_mode_version( + payload, + sender, + None, + nonce, + None, + tx_fee, + chain_id, + anchor_mode, + version, + ) +} + +#[allow(clippy::too_many_arguments)] +pub fn sign_tx_anchor_mode_version( + payload: TransactionPayload, + sender: &StacksPrivateKey, + payer: Option<&StacksPrivateKey>, + sender_nonce: u64, + payer_nonce: Option, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, + version: TransactionVersion, +) -> StacksTransaction { + let mut sender_spending_condition = + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(sender)) + .expect("Failed to create p2pkh spending condition from public key."); + sender_spending_condition.set_nonce(sender_nonce); + + let auth = match (payer, payer_nonce) { + (Some(payer), Some(payer_nonce)) => { + let mut payer_spending_condition = TransactionSpendingCondition::new_singlesig_p2pkh( + StacksPublicKey::from_private(payer), + ) + .expect("Failed to create p2pkh spending condition from public key."); + payer_spending_condition.set_nonce(payer_nonce); + payer_spending_condition.set_tx_fee(tx_fee); + TransactionAuth::Sponsored(sender_spending_condition, payer_spending_condition) + } + _ => { + sender_spending_condition.set_tx_fee(tx_fee); + TransactionAuth::Standard(sender_spending_condition) + } + }; + let mut unsigned_tx = StacksTransaction::new(version, auth, payload); + unsigned_tx.anchor_mode = anchor_mode; + unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; + unsigned_tx.chain_id = chain_id; + + let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); + tx_signer.sign_origin(sender).unwrap(); + if let (Some(payer), Some(_)) = (payer, payer_nonce) { + tx_signer.sign_sponsor(payer).unwrap(); + } + + tx_signer.get_tx().unwrap() +} + +#[allow(clippy::too_many_arguments)] +pub fn serialize_sign_tx_anchor_mode_version( + payload: TransactionPayload, + sender: &StacksPrivateKey, + payer: Option<&StacksPrivateKey>, + sender_nonce: u64, + payer_nonce: Option, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, + version: TransactionVersion, +) -> Vec { + let tx = sign_tx_anchor_mode_version( + payload, + sender, + payer, + sender_nonce, + payer_nonce, + tx_fee, + chain_id, + anchor_mode, + version, + ); + + let mut buf = vec![]; + tx.consensus_serialize(&mut buf).unwrap(); + buf +} + +pub fn make_contract_publish_versioned( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_name: &str, + contract_content: &str, + version: Option, +) -> Vec { + let name = ContractName::from(contract_name); + let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); + + let payload = + TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); + + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_contract_publish( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_name: &str, + contract_content: &str, +) -> Vec { + make_contract_publish_versioned( + sender, + nonce, + tx_fee, + chain_id, + contract_name, + contract_content, + None, + ) +} + +pub fn make_contract_publish_microblock_only_versioned( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_name: &str, + contract_content: &str, + version: Option, +) -> Vec { + let name = ContractName::from(contract_name); + let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); + + let payload = + TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); + + let tx = sign_standard_single_sig_tx_anchor_mode( + payload, + sender, + nonce, + tx_fee, + chain_id, + TransactionAnchorMode::OffChainOnly, + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_contract_publish_microblock_only( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_name: &str, + contract_content: &str, +) -> Vec { + make_contract_publish_microblock_only_versioned( + sender, + nonce, + tx_fee, + chain_id, + contract_name, + contract_content, + None, + ) +} + +pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { + StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(sk)], + ) + .unwrap() +} + +pub fn make_stacks_transfer( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + recipient: &PrincipalData, + amount: u64, +) -> Vec { + let payload = + TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +#[allow(clippy::too_many_arguments)] +pub fn make_sponsored_stacks_transfer_on_testnet( + sender: &StacksPrivateKey, + payer: &StacksPrivateKey, + sender_nonce: u64, + payer_nonce: u64, + tx_fee: u64, + chain_id: u32, + recipient: &PrincipalData, + amount: u64, +) -> Vec { + let payload = + TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); + let tx = sign_sponsored_sig_tx_anchor_mode_version( + payload, + sender, + payer, + sender_nonce, + payer_nonce, + tx_fee, + chain_id, + TransactionAnchorMode::OnChainOnly, + TransactionVersion::Testnet, + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_stacks_transfer_mblock_only( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + recipient: &PrincipalData, + amount: u64, +) -> Vec { + let payload = + TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); + let tx = sign_standard_single_sig_tx_anchor_mode( + payload, + sender, + nonce, + tx_fee, + chain_id, + TransactionAnchorMode::OffChainOnly, + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_poison( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + header_1: StacksMicroblockHeader, + header_2: StacksMicroblockHeader, +) -> Vec { + let payload = TransactionPayload::PoisonMicroblock(header_1, header_2); + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32) -> Vec { + let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +#[allow(clippy::too_many_arguments)] +pub fn make_contract_call( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_addr: &StacksAddress, + contract_name: &str, + function_name: &str, + function_args: &[Value], +) -> Vec { + let contract_name = ContractName::from(contract_name); + let function_name = ClarityName::from(function_name); + + let payload = TransactionContractCall { + address: *contract_addr, + contract_name, + function_name, + function_args: function_args.to_vec(), + }; + + let tx = sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +#[allow(clippy::too_many_arguments)] +pub fn make_contract_call_mblock_only( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_addr: &StacksAddress, + contract_name: &str, + function_name: &str, + function_args: &[Value], +) -> Vec { + let contract_name = ContractName::from(contract_name); + let function_name = ClarityName::from(function_name); + + let payload = TransactionContractCall { + address: *contract_addr, + contract_name, + function_name, + function_args: function_args.to_vec(), + }; + + let tx = sign_standard_single_sig_tx_anchor_mode( + payload.into(), + sender, + nonce, + tx_fee, + chain_id, + TransactionAnchorMode::OffChainOnly, + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_microblock( + privk: &StacksPrivateKey, + chainstate: &mut StacksChainState, + burn_dbconn: &dyn BurnStateDB, + consensus_hash: ConsensusHash, + block: StacksBlock, + txs: Vec, +) -> StacksMicroblock { + let mut block_bytes = vec![]; + block.consensus_serialize(&mut block_bytes).unwrap(); + + let mut microblock_builder = StacksMicroblockBuilder::new( + block.block_hash(), + consensus_hash, + chainstate, + burn_dbconn, + BlockBuilderSettings::max_value(), + ) + .unwrap(); + let mempool_txs: Vec<_> = txs + .into_iter() + .map(|tx| { + // TODO: better fee estimation + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + (tx, tx_bytes.len() as u64) + }) + .collect(); + + // NOTE: we intentionally do not check the block's microblock pubkey hash against the private + // key, because we may need to test that microblocks get rejected due to bad signatures. + microblock_builder + .mine_next_microblock_from_txs(mempool_txs, privk) + .unwrap() +} + +pub fn insert_tx_in_mempool( + db_tx: &rusqlite::Transaction, + tx_hex: Vec, + origin_addr: &StacksAddress, + origin_nonce: u64, + fee: u64, + consensus_hash: &ConsensusHash, + block_header_hash: &BlockHeaderHash, + height: u64, +) { + let sql = "INSERT OR REPLACE INTO mempool ( + txid, + origin_address, + origin_nonce, + sponsor_address, + sponsor_nonce, + tx_fee, + length, + consensus_hash, + block_header_hash, + height, + accept_time, + tx, + fee_rate) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)"; + + let origin_addr_str = origin_addr.to_string(); + let length = tx_hex.len() as u64; + let fee_rate = fee / length * 30; + + let txid = { + let mut cursor = Cursor::new(&tx_hex); + StacksTransaction::consensus_deserialize(&mut cursor) + .expect("Failed to deserialize transaction") + .txid() + }; + let args = rusqlite::params![ + txid, + origin_addr_str, + origin_nonce, + origin_addr_str, + origin_nonce, + fee, + length, + consensus_hash, + block_header_hash, + height, + Utc::now().timestamp(), + tx_hex, + fee_rate + ]; + db_tx + .execute(sql, args) + .expect("Failed to insert transaction into mempool"); +} diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 616ea8f81f..7339572720 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -4929,14 +4929,4 @@ pub mod test { acct } } - - pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { - StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(sk)], - ) - .unwrap() - } } diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 987e4f3bac..c71d46cb13 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -43,10 +43,11 @@ use crate::chainstate::stacks::{ TransactionPayload, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; +use crate::core::util::to_addr; use crate::net::api::gettenureinfo::RPCGetTenureInfo; use crate::net::download::nakamoto::{TenureStartEnd, WantedTenure, *}; use crate::net::inv::nakamoto::NakamotoTenureInv; -use crate::net::test::{dns_thread_start, to_addr, TestEventObserver}; +use crate::net::test::{dns_thread_start, TestEventObserver}; use crate::net::tests::inv::nakamoto::{ make_nakamoto_peer_from_invs, make_nakamoto_peers_from_invs_ext, peer_get_nakamoto_invs, }; diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index 625cb7cd01..c8248ef452 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -43,10 +43,11 @@ use crate::chainstate::stacks::{ TransactionAuth, TransactionPayload, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; +use crate::core::util::to_addr; use crate::core::StacksEpochExtension; use crate::net::inv::nakamoto::{InvGenerator, NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::comms::NeighborComms; -use crate::net::test::{to_addr, TestEventObserver, TestPeer}; +use crate::net::test::{TestEventObserver, TestPeer}; use crate::net::tests::{NakamotoBootPlan, NakamotoBootStep, NakamotoBootTenure}; use crate::net::{ Error as NetError, GetNakamotoInvData, HandshakeData, NakamotoInvData, NeighborAddress, diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index 9576ae7e54..81dc0cd43c 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -32,6 +32,7 @@ use crate::burnchains::*; use crate::chainstate::nakamoto::coordinator::tests::make_token_transfer; use crate::chainstate::stacks::test::*; use crate::chainstate::stacks::*; +use crate::core::util::to_addr; use crate::core::StacksEpochExtension; use crate::net::atlas::*; use crate::net::codec::*; diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 7155cf5966..eeb7650f31 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -224,6 +224,8 @@ pub struct BlockMinerThread { burn_tip_at_start: ConsensusHash, /// flag to indicate an abort driven from the relayer abort_flag: Arc, + /// Should the nonce cache be reset before mining the next block? + reset_nonce_cache: bool, } impl BlockMinerThread { @@ -257,6 +259,7 @@ impl BlockMinerThread { abort_flag: Arc::new(AtomicBool::new(false)), tenure_cost: ExecutionCost::ZERO, tenure_budget: ExecutionCost::ZERO, + reset_nonce_cache: true, } } @@ -506,6 +509,14 @@ impl BlockMinerThread { } let new_block = loop { + if self.reset_nonce_cache { + let mut mem_pool = self + .config + .connect_mempool_db() + .expect("Database failure opening mempool"); + mem_pool.reset_mempool_caches()?; + } + // If we're mock mining, we may not have processed the block that the // actual tenure winner committed to yet. So, before attempting to // mock mine, check if the parent is processed. @@ -550,6 +561,7 @@ impl BlockMinerThread { } info!("Miner interrupted while mining, will try again"); + // sleep, and try again. if the miner was interrupted because the burnchain // view changed, the next `mine_block()` invocation will error thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); @@ -557,6 +569,7 @@ impl BlockMinerThread { } Err(NakamotoNodeError::MiningFailure(ChainstateError::NoTransactionsToMine)) => { debug!("Miner did not find any transactions to mine"); + self.reset_nonce_cache = false; break None; } Err(e) => { @@ -1253,6 +1266,11 @@ impl BlockMinerThread { return Err(ChainstateError::MinerAborted.into()); } + // If we attempt to build a block, we should reset the nonce cache. + // In the special case where no transactions are found, this flag will + // be reset to false. + self.reset_nonce_cache = true; + // build the block itself let mut block_metadata = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 7462acd963..0dbb548461 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -14,6 +14,10 @@ use stacks::chainstate::stacks::{ StacksBlockHeader, StacksPrivateKey, StacksTransaction, TransactionPayload, }; use stacks::config::{EventKeyType, InitialBalance}; +use stacks::core::util::{ + make_contract_call, make_contract_call_mblock_only, make_contract_publish, + make_contract_publish_microblock_only, to_addr, +}; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, @@ -25,11 +29,7 @@ use stacks_common::util::sleep_ms; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; -use crate::tests::{ - make_contract_call, make_contract_call_mblock_only, make_contract_publish, - make_contract_publish_microblock_only, run_until_burnchain_height, select_transactions_where, - to_addr, -}; +use crate::tests::{run_until_burnchain_height, select_transactions_where}; use crate::{neon, BitcoinRegtestController, BurnchainController, Keychain}; #[test] diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index b287d2dec4..3d85c0e909 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -3,7 +3,7 @@ use std::{env, thread}; use ::core::str; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; -use clarity::vm::ClarityVersion; +use clarity::vm::{ClarityVersion, Value}; use stacks::burnchains::bitcoin::address::{ BitcoinAddress, LegacyBitcoinAddressType, SegwitBitcoinAddress, }; @@ -25,6 +25,7 @@ use stacks::chainstate::stacks::miner::{ use stacks::chainstate::stacks::StacksBlockHeader; use stacks::clarity_cli::vm_execute as execute; use stacks::config::{Config, InitialBalance}; +use stacks::core::util::make_contract_call; use stacks::core::{self, EpochList, BURNCHAIN_TX_SEARCH_WINDOW}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{ diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 3b3f8c1908..f1ef3c4dc4 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -2,13 +2,14 @@ use std::collections::HashMap; use std::{env, thread}; use clarity::vm::types::PrincipalData; -use clarity::vm::ClarityVersion; +use clarity::vm::{ClarityVersion, Value}; use stacks::burnchains::{Burnchain, PoxConstants}; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready}; use stacks::clarity_cli::vm_execute as execute; use stacks::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use stacks::core::util::{make_contract_call, make_stacks_transfer}; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 057669547a..1c6c19e970 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -17,8 +17,10 @@ use std::collections::HashMap; use std::{env, thread}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; +use clarity::vm::Value; use stacks::burnchains::{Burnchain, PoxConstants}; use stacks::config::InitialBalance; +use stacks::core::util::make_contract_call; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks_common::util::sleep_ms; diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index ffe9572045..fc1fc1a64e 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -27,6 +27,7 @@ use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{Error, StacksTransaction, TransactionPayload}; use stacks::clarity_cli::vm_execute as execute; use stacks::config::InitialBalance; +use stacks::core::util::{make_contract_call, to_addr}; use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::consts::STACKS_EPOCH_MAX; @@ -42,7 +43,6 @@ use crate::tests::neon_integrations::{ get_account, get_chain_info, get_pox_info, neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, }; -use crate::tests::{make_contract_call, to_addr}; use crate::{neon, BitcoinRegtestController, BurnchainController}; #[cfg(test)] diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 3864d9c350..1a1ef463f1 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -18,6 +18,7 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use stacks::burnchains::{Burnchain, PoxConstants}; use stacks::config::InitialBalance; +use stacks::core::util::{make_stacks_transfer_mblock_only, to_addr}; use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::StacksPrivateKey; @@ -28,7 +29,6 @@ use crate::tests::neon_integrations::{ get_account, get_chain_info, neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, }; -use crate::tests::{make_stacks_transfer_mblock_only, to_addr}; use crate::{neon, BitcoinRegtestController, BurnchainController}; #[test] diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index a67d8ae2c8..dbfd48307e 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -26,6 +26,10 @@ use stacks::clarity_vm::clarity::ClarityConnection; use stacks::codec::StacksMessageCodec; use stacks::config::InitialBalance; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; +use stacks::core::util::{ + make_contract_call, make_contract_publish, make_sponsored_stacks_transfer_on_testnet, + make_stacks_transfer, to_addr, +}; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, @@ -37,12 +41,8 @@ use stacks::net::api::getistraitimplemented::GetIsTraitImplementedResponse; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, VRFSeed}; use stacks_common::util::hash::{hex_bytes, to_hex, Sha256Sum}; -use super::{ - make_contract_call, make_contract_publish, make_stacks_transfer, to_addr, ADDR_4, SK_1, SK_2, - SK_3, -}; +use super::{ADDR_4, SK_1, SK_2, SK_3}; use crate::helium::RunLoop; -use crate::tests::make_sponsored_stacks_transfer_on_testnet; const OTHER_CONTRACT: &str = " (define-data-var x uint u0) diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index fa83181529..b60a004116 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -15,6 +15,10 @@ use stacks::chainstate::stacks::{ }; use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolDB; +use stacks::core::util::{ + make_coinbase, make_contract_call, make_contract_publish, make_poison, make_stacks_transfer, + sign_standard_single_sig_tx_anchor_mode_version, to_addr, +}; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::cost_estimates::metrics::UnitMetric; use stacks::cost_estimates::UnitEstimator; @@ -24,10 +28,7 @@ use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress}; use stacks_common::util::hash::*; use stacks_common::util::secp256k1::*; -use super::{ - make_coinbase, make_contract_call, make_contract_publish, make_poison, make_stacks_transfer, - serialize_sign_standard_single_sig_tx_anchor_mode_version, to_addr, SK_1, SK_2, -}; +use super::{SK_1, SK_2}; use crate::helium::RunLoop; use crate::Keychain; @@ -506,7 +507,7 @@ fn mempool_setup_chainstate() { 1000, TokenTransferMemo([0; 34]), ); - let tx_bytes = serialize_sign_standard_single_sig_tx_anchor_mode_version( + let tx = sign_standard_single_sig_tx_anchor_mode_version( payload, &contract_sk, 5, @@ -515,8 +516,8 @@ fn mempool_setup_chainstate() { TransactionAnchorMode::OnChainOnly, TransactionVersion::Mainnet, ); - let tx = - StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); let e = chain_state .will_admit_mempool_tx( &NULL_BURN_STATE_DB, diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index a4546d231b..702c7244da 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -18,29 +18,21 @@ use std::sync::atomic::AtomicU64; use std::sync::{Arc, Mutex}; use clarity::vm::costs::ExecutionCost; -use clarity::vm::database::BurnStateDB; use clarity::vm::events::STXEventType; -use clarity::vm::types::PrincipalData; -use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; use lazy_static::lazy_static; use neon_integrations::test_observer::EVENT_OBSERVER_PORT; use rand::Rng; use stacks::chainstate::burn::ConsensusHash; -use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::events::StacksTransactionEvent; -use stacks::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; use stacks::chainstate::stacks::{ - CoinbasePayload, StacksBlock, StacksMicroblock, StacksMicroblockHeader, StacksPrivateKey, - StacksPublicKey, StacksTransaction, StacksTransactionSigner, TokenTransferMemo, - TransactionAnchorMode, TransactionAuth, TransactionContractCall, TransactionPayload, - TransactionPostConditionMode, TransactionSmartContract, TransactionSpendingCondition, - TransactionVersion, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + StacksPrivateKey, StacksPublicKey, StacksTransaction, TransactionPayload, }; +#[cfg(any(test, feature = "testing"))] +use stacks::core::util::{make_contract_publish, to_addr}; use stacks::core::{StacksEpoch, StacksEpochExtension, StacksEpochId, CHAIN_ID_TESTNET}; -use stacks::util_lib::strings::StacksString; use stacks_common::address::AddressHashMode; use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress}; +use stacks_common::types::chainstate::BlockHeaderHash; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{hex_bytes, to_hex}; @@ -133,222 +125,6 @@ pub fn insert_new_port(port: u16) -> bool { ports.insert(port) } -#[allow(clippy::too_many_arguments)] -pub fn serialize_sign_sponsored_sig_tx_anchor_mode_version( - payload: TransactionPayload, - sender: &StacksPrivateKey, - payer: &StacksPrivateKey, - sender_nonce: u64, - payer_nonce: u64, - tx_fee: u64, - chain_id: u32, - anchor_mode: TransactionAnchorMode, - version: TransactionVersion, -) -> Vec { - serialize_sign_tx_anchor_mode_version( - payload, - sender, - Some(payer), - sender_nonce, - Some(payer_nonce), - tx_fee, - chain_id, - anchor_mode, - version, - ) -} - -pub fn serialize_sign_standard_single_sig_tx( - payload: TransactionPayload, - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, -) -> Vec { - serialize_sign_standard_single_sig_tx_anchor_mode( - payload, - sender, - nonce, - tx_fee, - chain_id, - TransactionAnchorMode::OnChainOnly, - ) -} - -pub fn serialize_sign_standard_single_sig_tx_anchor_mode( - payload: TransactionPayload, - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - anchor_mode: TransactionAnchorMode, -) -> Vec { - serialize_sign_standard_single_sig_tx_anchor_mode_version( - payload, - sender, - nonce, - tx_fee, - chain_id, - anchor_mode, - TransactionVersion::Testnet, - ) -} - -pub fn serialize_sign_standard_single_sig_tx_anchor_mode_version( - payload: TransactionPayload, - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - anchor_mode: TransactionAnchorMode, - version: TransactionVersion, -) -> Vec { - serialize_sign_tx_anchor_mode_version( - payload, - sender, - None, - nonce, - None, - tx_fee, - chain_id, - anchor_mode, - version, - ) -} - -#[allow(clippy::too_many_arguments)] -pub fn serialize_sign_tx_anchor_mode_version( - payload: TransactionPayload, - sender: &StacksPrivateKey, - payer: Option<&StacksPrivateKey>, - sender_nonce: u64, - payer_nonce: Option, - tx_fee: u64, - chain_id: u32, - anchor_mode: TransactionAnchorMode, - version: TransactionVersion, -) -> Vec { - let mut sender_spending_condition = - TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(sender)) - .expect("Failed to create p2pkh spending condition from public key."); - sender_spending_condition.set_nonce(sender_nonce); - - let auth = match (payer, payer_nonce) { - (Some(payer), Some(payer_nonce)) => { - let mut payer_spending_condition = TransactionSpendingCondition::new_singlesig_p2pkh( - StacksPublicKey::from_private(payer), - ) - .expect("Failed to create p2pkh spending condition from public key."); - payer_spending_condition.set_nonce(payer_nonce); - payer_spending_condition.set_tx_fee(tx_fee); - TransactionAuth::Sponsored(sender_spending_condition, payer_spending_condition) - } - _ => { - sender_spending_condition.set_tx_fee(tx_fee); - TransactionAuth::Standard(sender_spending_condition) - } - }; - let mut unsigned_tx = StacksTransaction::new(version, auth, payload); - unsigned_tx.anchor_mode = anchor_mode; - unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; - unsigned_tx.chain_id = chain_id; - - let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); - tx_signer.sign_origin(sender).unwrap(); - if let (Some(payer), Some(_)) = (payer, payer_nonce) { - tx_signer.sign_sponsor(payer).unwrap(); - } - - let mut buf = vec![]; - tx_signer - .get_tx() - .unwrap() - .consensus_serialize(&mut buf) - .unwrap(); - buf -} - -pub fn make_contract_publish_versioned( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_name: &str, - contract_content: &str, - version: Option, -) -> Vec { - let name = ContractName::from(contract_name); - let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); - - let payload = - TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); - - serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) -} - -pub fn make_contract_publish( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_name: &str, - contract_content: &str, -) -> Vec { - make_contract_publish_versioned( - sender, - nonce, - tx_fee, - chain_id, - contract_name, - contract_content, - None, - ) -} - -pub fn make_contract_publish_microblock_only_versioned( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_name: &str, - contract_content: &str, - version: Option, -) -> Vec { - let name = ContractName::from(contract_name); - let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); - - let payload = - TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); - - serialize_sign_standard_single_sig_tx_anchor_mode( - payload, - sender, - nonce, - tx_fee, - chain_id, - TransactionAnchorMode::OffChainOnly, - ) -} - -pub fn make_contract_publish_microblock_only( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_name: &str, - contract_content: &str, -) -> Vec { - make_contract_publish_microblock_only_versioned( - sender, - nonce, - tx_fee, - chain_id, - contract_name, - contract_content, - None, - ) -} - pub fn new_test_conf() -> Config { // secretKey: "b1cf9cee5083f421c84d7cb53be5edf2801c3c78d63d53917aee0bdc8bd160ee01", // publicKey: "03e2ed46873d0db820e8c6001aabc082d72b5b900b53b7a1b9714fe7bde3037b81", @@ -409,183 +185,6 @@ pub fn set_random_binds(config: &mut Config) { config.node.p2p_address = format!("{localhost}:{p2p_port}"); } -pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { - StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(sk)], - ) - .unwrap() -} - -pub fn make_stacks_transfer( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - recipient: &PrincipalData, - amount: u64, -) -> Vec { - let payload = - TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) -} - -#[allow(clippy::too_many_arguments)] -pub fn make_sponsored_stacks_transfer_on_testnet( - sender: &StacksPrivateKey, - payer: &StacksPrivateKey, - sender_nonce: u64, - payer_nonce: u64, - tx_fee: u64, - chain_id: u32, - recipient: &PrincipalData, - amount: u64, -) -> Vec { - let payload = - TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - serialize_sign_sponsored_sig_tx_anchor_mode_version( - payload, - sender, - payer, - sender_nonce, - payer_nonce, - tx_fee, - chain_id, - TransactionAnchorMode::OnChainOnly, - TransactionVersion::Testnet, - ) -} - -pub fn make_stacks_transfer_mblock_only( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - recipient: &PrincipalData, - amount: u64, -) -> Vec { - let payload = - TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - serialize_sign_standard_single_sig_tx_anchor_mode( - payload, - sender, - nonce, - tx_fee, - chain_id, - TransactionAnchorMode::OffChainOnly, - ) -} - -pub fn make_poison( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - header_1: StacksMicroblockHeader, - header_2: StacksMicroblockHeader, -) -> Vec { - let payload = TransactionPayload::PoisonMicroblock(header_1, header_2); - serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) -} - -pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32) -> Vec { - let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); - serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) -} - -#[allow(clippy::too_many_arguments)] -pub fn make_contract_call( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_addr: &StacksAddress, - contract_name: &str, - function_name: &str, - function_args: &[Value], -) -> Vec { - let contract_name = ContractName::from(contract_name); - let function_name = ClarityName::from(function_name); - - let payload = TransactionContractCall { - address: *contract_addr, - contract_name, - function_name, - function_args: function_args.to_vec(), - }; - - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) -} - -#[allow(clippy::too_many_arguments)] -pub fn make_contract_call_mblock_only( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_addr: &StacksAddress, - contract_name: &str, - function_name: &str, - function_args: &[Value], -) -> Vec { - let contract_name = ContractName::from(contract_name); - let function_name = ClarityName::from(function_name); - - let payload = TransactionContractCall { - address: *contract_addr, - contract_name, - function_name, - function_args: function_args.to_vec(), - }; - - serialize_sign_standard_single_sig_tx_anchor_mode( - payload.into(), - sender, - nonce, - tx_fee, - chain_id, - TransactionAnchorMode::OffChainOnly, - ) -} - -fn make_microblock( - privk: &StacksPrivateKey, - chainstate: &mut StacksChainState, - burn_dbconn: &dyn BurnStateDB, - consensus_hash: ConsensusHash, - block: StacksBlock, - txs: Vec, -) -> StacksMicroblock { - let mut block_bytes = vec![]; - block.consensus_serialize(&mut block_bytes).unwrap(); - - let mut microblock_builder = StacksMicroblockBuilder::new( - block.block_hash(), - consensus_hash, - chainstate, - burn_dbconn, - BlockBuilderSettings::max_value(), - ) - .unwrap(); - let mempool_txs: Vec<_> = txs - .into_iter() - .map(|tx| { - // TODO: better fee estimation - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - (tx, tx_bytes.len() as u64) - }) - .collect(); - - // NOTE: we intentionally do not check the block's microblock pubkey hash against the private - // key, because we may need to test that microblocks get rejected due to bad signatures. - microblock_builder - .mine_next_microblock_from_txs(mempool_txs, privk) - .unwrap() -} - /// Deserializes the `StacksTransaction` objects from `blocks` and returns all those that /// match `test_fn`. pub fn select_transactions_where( diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b6b4bb7645..c249234aed 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -31,7 +31,8 @@ use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::{RejectReason, SignerMessage as SignerMessageV0}; use libsigner::{SignerSession, StackerDBSession}; -use rusqlite::OptionalExtension; +use rand::{thread_rng, Rng}; +use rusqlite::{Connection, OptionalExtension}; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ @@ -61,7 +62,10 @@ use stacks::chainstate::stacks::{ TransactionVersion, MAX_BLOCK_LEN, }; use stacks::config::{EventKeyType, InitialBalance}; -use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; +use stacks::core::mempool::{MemPoolWalkStrategy, MAXIMUM_MEMPOOL_TX_CHAINING}; +use stacks::core::util::{ + insert_tx_in_mempool, make_contract_call, make_contract_publish_versioned, make_stacks_transfer, +}; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, @@ -113,10 +117,7 @@ use crate::tests::neon_integrations::{ run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, wait_for_runloop, }; use crate::tests::signer::SignerTest; -use crate::tests::{ - gen_random_port, get_chain_info, make_contract_call, make_contract_publish, - make_contract_publish_versioned, make_stacks_transfer, to_addr, -}; +use crate::tests::{gen_random_port, get_chain_info, make_contract_publish, to_addr}; use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; pub static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; @@ -887,7 +888,7 @@ pub fn boot_to_epoch_3( let signer_pk = StacksPublicKey::from_private(signer_sk); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( stacker_sk, 0, 1000, @@ -950,7 +951,7 @@ pub fn boot_to_epoch_3( let signer_index = get_signer_index(&signer_set, &Secp256k1PublicKey::from_private(signer_sk)) .unwrap(); - let voting_tx = tests::make_contract_call( + let voting_tx = make_contract_call( signer_sk, 0, 300, @@ -1049,7 +1050,7 @@ pub fn boot_to_pre_epoch_3_boundary( let signer_pk = StacksPublicKey::from_private(signer_sk); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( stacker_sk, 0, 1000, @@ -1112,7 +1113,7 @@ pub fn boot_to_pre_epoch_3_boundary( let signer_index = get_signer_index(&signer_set, &Secp256k1PublicKey::from_private(signer_sk)) .unwrap(); - let voting_tx = tests::make_contract_call( + let voting_tx = make_contract_call( signer_sk, 0, 300, @@ -1287,7 +1288,7 @@ pub fn setup_epoch_3_reward_set( .to_rsv(); let signer_pk = StacksPublicKey::from_private(signer_sk); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( stacker_sk, 0, 1000, @@ -2731,7 +2732,7 @@ fn correct_burn_outs() { .unwrap() .to_rsv(); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( account.0, account.2.nonce, 1000, @@ -4675,7 +4676,7 @@ fn burn_ops_integration_test() { let signer_key_arg_1: StacksPublicKeyBuffer = signer_pk_1.to_bytes_compressed().as_slice().into(); - let set_signer_key_auth_tx = tests::make_contract_call( + let set_signer_key_auth_tx = make_contract_call( &signer_sk_1, 1, 500, @@ -6280,7 +6281,7 @@ fn clarity_burn_state() { // Pause mining to prevent the stacks block from being mined before the tenure change is processed TEST_MINE_STALL.set(true); // Submit a tx for the next block (the next block will be a new tenure, so the burn block height will increment) - let call_tx = tests::make_contract_call( + let call_tx = make_contract_call( &sender_sk, sender_nonce, tx_fee, @@ -6369,7 +6370,7 @@ fn clarity_burn_state() { result.expect_result_ok().expect("Read-only call failed"); // Submit a tx to trigger the next block - let call_tx = tests::make_contract_call( + let call_tx = make_contract_call( &sender_sk, sender_nonce, tx_fee, @@ -11379,3 +11380,646 @@ fn rbf_on_config_change() { run_loop_thread.join().unwrap(); } + +/// This function intends to check the timing of the mempool iteration when there +/// are a large number of transactions in the mempool. It will boot to epoch 3, +/// fan out some STX transfers to a large number of accounts, wait for these to +/// all be mined, and then pause block mining, and submit a large number of +/// transactions to the mempool. It will then unpause block mining and check +/// how long it takes for the miner to mine the first block, and how long it +/// takes to empty the mempool. Several tests below call this function, testing +/// different strategies and fees. +fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.miner.mempool_walk_strategy = strategy; + + let sender_signer_sk = Secp256k1PrivateKey::random(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + let stacker_sk = setup_stacker(&mut naka_conf); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + let transfer_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Start with 10 accounts with initial balances. + let initial_sender_sks = (0..10) + .map(|_| StacksPrivateKey::random()) + .collect::>(); + let initial_sender_addrs = initial_sender_sks + .iter() + .map(|sk| tests::to_addr(sk)) + .collect::>(); + + // These 10 accounts will send to 25 accounts each, then those 260 accounts + // will send to 25 accounts each, for a total of 6760 accounts. + // At the end of the funding round, we want to have 6760 accounts with + // enough balance to send 1 uSTX 25 times. + // With a fee of 180 to 2000 uSTX per send, we need each account to have + // 2001 * 25 = 50_025 uSTX. + // The 260 accounts in the middle will need to have enough to send that + // amount to 25 other accounts, plus the fee, and then enough to send the + // transfers themselves as well: + // (50025 + 180) * 25 + 50025 = 1_305_150 uSTX. + // The 10 initial accounts will need to have enough to send that amount to + // 25 other accounts, plus enough to send the transfers themselves as well: + // (1305150 + 180) * 25 + 1305150 = 33_938_400 uSTX. + let initial_balance = 33_938_400; + for addr in initial_sender_addrs.iter() { + naka_conf.add_initial_balance(PrincipalData::from(*addr).to_string(), initial_balance); + } + // This will hold tuples for all of our senders, with the sender pk and + // the nonce + let mut senders = initial_sender_sks + .iter() + .map(|sk| (sk, 0)) + .collect::>(); + + test_observer::spawn(); + test_observer::register(&mut naka_conf, &[EventKeyType::MinedBlocks]); + + let mempool_db_path = format!( + "{}/nakamoto-neon/chainstate/mempool.sqlite", + naka_conf.node.working_dir + ); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, .. + } = run_loop.counters(); + let counters = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + blind_signer(&naka_conf, &signers, &counters); + + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Open a sqlite DB at mempool_db_path so that we can quickly add + // transactions to the mempool. + let mut conn = Connection::open(&mempool_db_path).unwrap(); + let db_tx = conn.transaction().unwrap(); + + info!("Sending the first round of funding"); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient_addr.into(), + 1_305_150, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending first round of funding took {:?}", timer.elapsed()); + + // Wait for the first round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of funding to be mined"); + + info!( + "Sending and mining first round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Sending the second round of funding"); + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let sender_addr = tests::to_addr(sender_sk); + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient_addr.into(), + 50_025, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending second round of funding took {:?}", timer.elapsed()); + + // Wait for the second round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for second round of funding to be mined"); + + info!( + "Sending and mining second round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Pause mining and fill the mempool with the transfers"); + + // Pause block mining + TEST_MINE_STALL.set(true); + + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + + // Fill the mempool with the transfers + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = tests::to_addr(sender_sk); + let fee = set_fee(); + assert!(fee >= 180 && fee <= 2000); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + fee, + naka_conf.burnchain.chain_id, + &recipient, + 1, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + } + } + db_tx.commit().unwrap(); + + info!("Sending transfers took {:?}", timer.elapsed()); + + info!("Mining transfers"); + let proposed_blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + + // Unpause block mining + TEST_MINE_STALL.set(false); + + // Wait for the first block to be proposed. + wait_for(30, || { + let proposed_blocks = test_observer::get_mined_nakamoto_blocks().len(); + Ok(proposed_blocks > proposed_blocks_before) + }) + .expect("Timed out waiting for first block to be mined"); + + let blocks = test_observer::get_mined_nakamoto_blocks(); + let last_block = blocks.last().unwrap(); + info!( + "First block contains {} transactions", + last_block.tx_events.len() + ); + if strategy == MemPoolWalkStrategy::NextNonceWithHighestFeeRate { + assert!(last_block.tx_events.len() > 5000); + } + + // Wait for the transfers to all be mined + wait_for(7200, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for transfers to be mined"); + + info!("Mining transfers took {:?}", timer.elapsed()); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +#[test] +#[ignore] +fn large_mempool_original_constant_fee() { + large_mempool_base(MemPoolWalkStrategy::GlobalFeeRate, || 180); +} + +#[test] +#[ignore] +fn large_mempool_original_random_fee() { + large_mempool_base(MemPoolWalkStrategy::GlobalFeeRate, || { + thread_rng().gen_range(180..2000) + }); +} + +#[test] +#[ignore] +fn large_mempool_next_constant_fee() { + large_mempool_base(MemPoolWalkStrategy::NextNonceWithHighestFeeRate, || 180); +} + +#[test] +#[ignore] +fn large_mempool_next_random_fee() { + large_mempool_base(MemPoolWalkStrategy::NextNonceWithHighestFeeRate, || { + thread_rng().gen_range(180..2000) + }); +} + +#[test] +#[ignore] +/// This test intends to check the timing of the mempool iteration when there +/// are a large number of transactions in the mempool. It will boot to epoch 3, +/// fan out some STX transfers to a large number of accounts, wait for these to +/// all be mined, and then pause block mining, and submit a large number of +/// transactions to the mempool from those accounts with random fees between +/// the minimum allowed fee of 180 uSTX and 2000 uSTX. It will then unpause +/// block mining and check how long it takes for the miner to mine the first +/// block, and how long it takes to empty the mempool. +fn larger_mempool() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.miner.mempool_walk_strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + + let sender_signer_sk = Secp256k1PrivateKey::random(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + let stacker_sk = setup_stacker(&mut naka_conf); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + let transfer_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Start with 10 accounts with initial balances. + let initial_sender_sks = (0..10) + .map(|_| StacksPrivateKey::random()) + .collect::>(); + let initial_sender_addrs = initial_sender_sks + .iter() + .map(|sk| tests::to_addr(sk)) + .collect::>(); + + // These 10 accounts will send to 25 accounts each, then those 260 accounts + // will send to 25 accounts each, for a total of 6760 accounts. + // At the end of the funding round, we want to have 6760 accounts with + // enough balance to send 1 uSTX 25 times for each of 2 rounds of sends. + // With a fee of 180 uSTX per send, we need each account to end up with + // 2001 * 25 * 10 = 500_250 uSTX. + // The 260 accounts in the middle will need to have + // (500250 + 180) * 26 = 13_011_180 uSTX. + // The 10 initial accounts will need to have + // (13011180 + 180) * 26 = 338_295_360 uSTX. + let initial_balance = 338_295_360; + for addr in initial_sender_addrs.iter() { + naka_conf.add_initial_balance(PrincipalData::from(*addr).to_string(), initial_balance); + } + // This will hold tuples for all of our senders, with the sender pk and + // the nonce + let mut senders = initial_sender_sks + .iter() + .map(|sk| (sk, 0)) + .collect::>(); + + test_observer::spawn(); + test_observer::register_any(&mut naka_conf); + + let mempool_db_path = format!( + "{}/nakamoto-neon/chainstate/mempool.sqlite", + naka_conf.node.working_dir + ); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_proposed_blocks, + .. + } = run_loop.counters(); + let counters = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + blind_signer(&naka_conf, &signers, &counters); + + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Open a sqlite DB at mempool_db_path so that we can quickly add + // transactions to the mempool. + let mut conn = Connection::open(&mempool_db_path).unwrap(); + let db_tx = conn.transaction().unwrap(); + + info!("Sending the first round of funding"); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient_addr.into(), + 13_011_180, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending first round of funding took {:?}", timer.elapsed()); + + // Wait for the first round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of funding to be mined"); + + info!( + "Sending and mining first round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Sending the second round of funding"); + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let sender_addr = tests::to_addr(sender_sk); + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient_addr.into(), + 500_250, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending second round of funding took {:?}", timer.elapsed()); + + // Wait for the second round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for second round of funding to be mined"); + + info!( + "Sending and mining second round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Pause mining and fill the mempool with the transfers"); + + // Pause block mining + TEST_MINE_STALL.set(true); + + let timer = Instant::now(); + + // Fill the mempool with the transfers + for _ in 0..10 { + let db_tx = conn.transaction().unwrap(); + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = tests::to_addr(sender_sk); + let fee = thread_rng().gen_range(180..2000); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + fee, + naka_conf.burnchain.chain_id, + &recipient, + 1, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + } + } + db_tx.commit().unwrap(); + } + + info!("Sending transfers took {:?}", timer.elapsed()); + + let blocks_proposed_before = naka_proposed_blocks.load(Ordering::SeqCst); + + info!("Mining transfers"); + + let timer = Instant::now(); + + // Unpause block mining + TEST_MINE_STALL.set(false); + + // Wait for the first block to be proposed. + wait_for(10, || { + let blocks_proposed = naka_proposed_blocks.load(Ordering::SeqCst); + Ok(blocks_proposed > blocks_proposed_before) + }) + .expect("Timed out waiting for first block to be mined"); + + info!("Mining first block of transfers took {:?}", timer.elapsed()); + + // Wait for the transfers to all be mined + wait_for(7200, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for transfers to be mined"); + + info!("Mining transfers took {:?}", timer.elapsed()); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 68b8474efb..e805df59cd 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -42,6 +42,10 @@ use stacks::cli; use stacks::codec::StacksMessageCodec; use stacks::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use stacks::core::mempool::MemPoolWalkTxTypes; +use stacks::core::util::{ + make_contract_call, make_contract_publish, make_contract_publish_microblock_only, + make_microblock, make_stacks_transfer, make_stacks_transfer_mblock_only, to_addr, +}; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_20, BLOCK_LIMIT_MAINNET_205, BLOCK_LIMIT_MAINNET_21, CHAIN_ID_TESTNET, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, @@ -78,11 +82,7 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, sleep_ms}; use super::bitcoin_regtest::BitcoinCoreController; -use super::{ - make_contract_call, make_contract_publish, make_contract_publish_microblock_only, - make_microblock, make_stacks_transfer, make_stacks_transfer_mblock_only, to_addr, ADDR_4, SK_1, - SK_2, SK_3, -}; +use super::{ADDR_4, SK_1, SK_2, SK_3}; use crate::burnchains::bitcoin_regtest_controller::{self, addr2str, BitcoinRPCRequest, UTXO}; use crate::neon_node::RelayerThread; use crate::operations::BurnchainOpSigner; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index edfdd5f7de..3ac0443edc 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -29,6 +29,8 @@ use libsigner::v0::messages::{ use libsigner::{ BlockProposal, BlockProposalData, SignerSession, StackerDBSession, VERSION_STRING, }; +use rand::{thread_rng, Rng}; +use rusqlite::Connection; use stacks::address::AddressHashMode; use stacks::burnchains::Txid; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -42,6 +44,10 @@ use stacks::chainstate::stacks::miner::{TransactionEvent, TransactionSuccessEven use stacks::chainstate::stacks::{StacksTransaction, TenureChangeCause, TransactionPayload}; use stacks::codec::StacksMessageCodec; use stacks::config::{Config as NeonConfig, EventKeyType, EventObserverConfig}; +use stacks::core::mempool::MemPoolWalkStrategy; +use stacks::core::util::{ + insert_tx_in_mempool, make_contract_call, make_contract_publish, make_stacks_transfer, +}; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::getsigner::GetSignerResponse; @@ -98,9 +104,7 @@ use crate::tests::neon_integrations::{ get_sortition_info_ch, next_block_and_wait, run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, }; -use crate::tests::{ - self, gen_random_port, make_contract_call, make_contract_publish, make_stacks_transfer, -}; +use crate::tests::{self, gen_random_port}; use crate::{nakamoto_node, BitcoinRegtestController, BurnchainController, Config, Keychain}; impl SignerTest { @@ -156,7 +160,7 @@ impl SignerTest { .to_rsv(); let signer_pk = StacksPublicKey::from_private(stacker_sk); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( stacker_sk, 0, 1000, @@ -4737,7 +4741,7 @@ fn signer_set_rollover() { .to_rsv(); let signer_pk = Secp256k1PublicKey::from_private(stacker_sk); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( stacker_sk, 0, 1000, @@ -9226,7 +9230,7 @@ fn injected_signatures_are_ignored_across_boundaries() { .to_rsv(); let signer_pk = Secp256k1PublicKey::from_private(&new_signer_private_key); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( &new_signer_private_key, 0, 1000, @@ -12172,8 +12176,11 @@ fn transfers_in_block(block: &serde_json::Value) -> usize { let raw_tx = tx["raw_tx"].as_str().unwrap(); let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::TokenTransfer(..) = &parsed.payload { - count += 1; + if let TransactionPayload::TokenTransfer(_, amount, _) = &parsed.payload { + // Don't count phantom transactions, which have a 0 amount. + if *amount > 0 { + count += 1; + } } } count @@ -12438,3 +12445,557 @@ fn signer_can_accept_rejected_block() { signer_test.shutdown(); } + +/// This function intends to check the timing of the mempool iteration when +/// there are a large number of transactions in the mempool. It will boot to +/// epoch 3, fan out some STX transfers to a large number of accounts, wait for +/// these to all be mined, and then pause block mining, and submit a large +/// number of transactions to the mempool. It will then unpause block mining +/// and wait for the first block to be mined. Since the default miner +/// configuration specifies to spend 5 seconds mining a block, we expect that +/// this first block should be proposed within 10 seconds and approved within +/// 20 seconds. We also verify that the block contains at least 5,000 +/// transactions, since a lower count than that would indicate a clear +/// regression. Several tests below call this function, testing different +/// strategies and fees. +fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let transfer_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Start with 10 accounts with initial balances. + let initial_sender_sks = (0..10) + .map(|_| StacksPrivateKey::random()) + .collect::>(); + let initial_sender_addrs = initial_sender_sks + .iter() + .map(|sk| tests::to_addr(sk)) + .collect::>(); + + // These 10 accounts will send to 25 accounts each, then those 260 accounts + // will send to 25 accounts each, for a total of 6760 accounts. + // At the end of the funding round, we want to have 6760 accounts with + // enough balance to send 1 uSTX 25 times. + // With a fee of 180 to 2000 uSTX per send, we need each account to have + // 2001 * 25 = 50_025 uSTX. + // The 260 accounts in the middle will need to have enough to send that + // amount to 25 other accounts, plus the fee, and then enough to send the + // transfers themselves as well: + // (50025 + 180) * 25 + 50025 = 1_305_150 uSTX. + // The 10 initial accounts will need to have enough to send that amount to + // 25 other accounts, plus enough to send the transfers themselves as well: + // (1305150 + 180) * 25 + 1305150 = 33_938_400 uSTX. + let initial_balance = 33_938_400; + let initial_balances = initial_sender_addrs + .iter() + .map(|addr| (addr.clone(), initial_balance)) + .collect::>(); + + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + initial_balances, + |_| {}, + |conf| { + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + conf.miner.mempool_walk_strategy = strategy; + }, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + // This will hold tuples for all of our senders, with the sender pk and + // the nonce + let mut senders = initial_sender_sks + .iter() + .map(|sk| (sk, 0)) + .collect::>(); + + let mempool_db_path = format!( + "{}/nakamoto-neon/chainstate/mempool.sqlite", + signer_test.running_nodes.conf.node.working_dir + ); + let chain_id = signer_test.running_nodes.conf.burnchain.chain_id; + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Open a sqlite DB at mempool_db_path so that we can quickly add + // transactions to the mempool. + let mut conn = Connection::open(&mempool_db_path).unwrap(); + let db_tx = conn.transaction().unwrap(); + + info!("Sending the first round of funding"); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + chain_id, + &recipient_addr.into(), + 1_305_150, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending first round of funding took {:?}", timer.elapsed()); + + // Wait for the first round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of funding to be mined"); + + info!( + "Sending and mining first round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Sending the second round of funding"); + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let sender_addr = tests::to_addr(sender_sk); + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + chain_id, + &recipient_addr.into(), + 50_025, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending second round of funding took {:?}", timer.elapsed()); + + // Wait for the second round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for second round of funding to be mined"); + + info!( + "Sending and mining second round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Pause mining and fill the mempool with the transfers"); + + // Pause block mining + TEST_MINE_STALL.set(true); + + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + + // Fill the mempool with the transfers + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = tests::to_addr(sender_sk); + let fee = set_fee(); + assert!(fee >= 180 && fee <= 2000); + let transfer_tx = make_stacks_transfer(sender_sk, *nonce, fee, chain_id, &recipient, 1); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + } + } + db_tx.commit().unwrap(); + + info!("Sending transfers took {:?}", timer.elapsed()); + + let proposed_blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + let blocks_before = test_observer::get_blocks().len(); + + info!("Mining transfers..."); + + // Unpause block mining + TEST_MINE_STALL.set(false); + + // Wait for the first block to be proposed. + wait_for(30, || { + let proposed_blocks = test_observer::get_mined_nakamoto_blocks().len(); + Ok(proposed_blocks > proposed_blocks_before) + }) + .expect("Timed out waiting for first block to be mined"); + + let blocks = test_observer::get_mined_nakamoto_blocks(); + let last_block = blocks.last().unwrap(); + info!( + "First block contains {} transactions", + last_block.tx_events.len() + ); + if strategy == MemPoolWalkStrategy::NextNonceWithHighestFeeRate { + assert!(last_block.tx_events.len() > 5000); + } + + // Wait for the first block to be accepted. + wait_for(20, || { + let blocks = test_observer::get_blocks().len(); + Ok(blocks > blocks_before) + }) + .expect("Timed out waiting for first block to be mined"); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +fn large_mempool_original_constant_fee() { + large_mempool_base(MemPoolWalkStrategy::GlobalFeeRate, || 180); +} + +#[test] +#[ignore] +fn large_mempool_original_random_fee() { + large_mempool_base(MemPoolWalkStrategy::GlobalFeeRate, || { + thread_rng().gen_range(180..2000) + }); +} + +#[test] +#[ignore] +fn large_mempool_next_constant_fee() { + large_mempool_base(MemPoolWalkStrategy::NextNonceWithHighestFeeRate, || 180); +} + +#[test] +#[ignore] +fn large_mempool_next_random_fee() { + large_mempool_base(MemPoolWalkStrategy::NextNonceWithHighestFeeRate, || { + thread_rng().gen_range(180..2000) + }); +} + +#[test] +#[ignore] +/// This test intends to check the timing of the mempool iteration when there +/// are a large number of transactions in the mempool. It will boot to epoch 3, +/// fan out some STX transfers to a large number of accounts, wait for these to +/// all be mined, and then pause block mining, and submit a large number of +/// transactions to the mempool from those accounts, all with the same fee. It +/// will then unpause block mining and wait for the first block to be mined. +/// Since the default miner configuration specifies to spend 5 seconds mining a +/// block, we expect that this first block should be proposed within 10 seconds +/// and approved within 20 seconds. We also verify that the block contains at +/// least 5,000 transactions, since a lower count than that would indicate a +/// clear regression. +fn larger_mempool() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let transfer_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Start with 10 accounts with initial balances. + let initial_sender_sks = (0..10) + .map(|_| StacksPrivateKey::random()) + .collect::>(); + let initial_sender_addrs = initial_sender_sks + .iter() + .map(|sk| tests::to_addr(sk)) + .collect::>(); + + // These 10 accounts will send to 25 accounts each, then those 260 accounts + // will send to 25 accounts each, for a total of 6760 accounts. + // At the end of the funding round, we want to have 6760 accounts with + // enough balance to send 1 uSTX 25 times for each of 2 rounds of sends. + // With a fee of 180 uSTX per send, we need each account to end up with + // 2001 * 25 * 10 = 500_250 uSTX. + // The 260 accounts in the middle will need to have + // (500250 + 180) * 26 = 13_011_180 uSTX. + // The 10 initial accounts will need to have + // (13011180 + 180) * 26 = 338_295_360 uSTX. + let initial_balance = 338_295_360; + let initial_balances = initial_sender_addrs + .iter() + .map(|addr| (addr.clone(), initial_balance)) + .collect::>(); + + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + initial_balances, + |_| {}, + |conf| { + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + conf.miner.mempool_walk_strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + }, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + // This will hold tuples for all of our senders, with the sender pk and + // the nonce + let mut senders = initial_sender_sks + .iter() + .map(|sk| (sk, 0)) + .collect::>(); + + let mempool_db_path = format!( + "{}/nakamoto-neon/chainstate/mempool.sqlite", + signer_test.running_nodes.conf.node.working_dir + ); + let chain_id = signer_test.running_nodes.conf.burnchain.chain_id; + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Open a sqlite DB at mempool_db_path so that we can quickly add + // transactions to the mempool. + let mut conn = Connection::open(&mempool_db_path).unwrap(); + let db_tx = conn.transaction().unwrap(); + + info!("Sending the first round of funding"); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + chain_id, + &recipient_addr.into(), + 13_011_180, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending first round of funding took {:?}", timer.elapsed()); + + // Wait for the first round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of funding to be mined"); + + info!( + "Sending and mining first round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Sending the second round of funding"); + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let sender_addr = tests::to_addr(sender_sk); + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + chain_id, + &recipient_addr.into(), + 500_250, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending second round of funding took {:?}", timer.elapsed()); + + // Wait for the second round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for second round of funding to be mined"); + + info!( + "Sending and mining second round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Pause mining and fill the mempool with the transfers"); + + // Pause block mining + TEST_MINE_STALL.set(true); + + let timer = Instant::now(); + + // Fill the mempool with the transfers + for _ in 0..10 { + let db_tx = conn.transaction().unwrap(); + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = + make_stacks_transfer(sender_sk, *nonce, transfer_fee, chain_id, &recipient, 1); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + } + } + db_tx.commit().unwrap(); + } + + info!("Sending transfers took {:?}", timer.elapsed()); + + let proposed_blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + + info!("Mining transfers..."); + + // Unpause block mining + TEST_MINE_STALL.set(false); + + // Wait for the first block to be proposed. + wait_for(30, || { + let proposed_blocks = test_observer::get_mined_nakamoto_blocks().len(); + Ok(proposed_blocks > proposed_blocks_before) + }) + .expect("Timed out waiting for first block to be mined"); + + let blocks = test_observer::get_mined_nakamoto_blocks(); + let last_block = blocks.last().unwrap(); + info!( + "First block contains {} transactions", + last_block.tx_events.len() + ); + + // Wait for the first round of transfers to all be mined + wait_for(43200, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of transfers to be mined"); + + info!("Mining first round of transfers took {:?}", timer.elapsed()); + signer_test.shutdown(); +}