diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index c80e96005c8829..9aac0b1689b9c7 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -5,7 +5,6 @@ use { log::*, rand::{thread_rng, Rng}, rayon::prelude::*, - solana_client::connection_cache::ConnectionCache, solana_core::{ banking_stage::BankingStage, banking_trace::{BankingPacketBatch, BankingTracer, BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT}, @@ -35,7 +34,6 @@ use { transaction::Transaction, }, solana_streamer::socket::SocketAddrSpace, - solana_tpu_client::tpu_client::DEFAULT_TPU_CONNECTION_POOL_SIZE, std::{ sync::{atomic::Ordering, Arc, RwLock}, thread::sleep, @@ -449,17 +447,6 @@ fn main() { ClusterInfo::new(node.info, keypair, SocketAddrSpace::Unspecified) }; let cluster_info = Arc::new(cluster_info); - let tpu_disable_quic = matches.is_present("tpu_disable_quic"); - let connection_cache = match tpu_disable_quic { - false => ConnectionCache::new_quic( - "connection_cache_banking_bench_quic", - DEFAULT_TPU_CONNECTION_POOL_SIZE, - ), - true => ConnectionCache::with_udp( - "connection_cache_banking_bench_udp", - DEFAULT_TPU_CONNECTION_POOL_SIZE, - ), - }; let banking_stage = BankingStage::new_num_threads( block_production_method, &cluster_info, @@ -471,10 +458,8 @@ fn main() { None, replay_vote_sender, None, - Arc::new(connection_cache), bank_forks.clone(), &Arc::new(PrioritizationFeeCache::new(0u64)), - false, ); // This is so that the signal_receiver does not go out of scope after the closure. diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 3e2d5572e4e761..f54c9aa03ac827 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -13,7 +13,6 @@ use { log::*, rand::{thread_rng, Rng}, rayon::prelude::*, - solana_client::connection_cache::ConnectionCache, solana_core::{ banking_stage::{ committer::Committer, @@ -300,10 +299,8 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { None, s, None, - Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks, &Arc::new(PrioritizationFeeCache::new(0u64)), - false, ); let chunk_len = verified.len() / CHUNKS; diff --git a/core/benches/forwarder.rs b/core/benches/forwarder.rs deleted file mode 100644 index 10a050f3d97d4b..00000000000000 --- a/core/benches/forwarder.rs +++ /dev/null @@ -1,197 +0,0 @@ -#![feature(test)] -extern crate test; -use { - itertools::Itertools, - solana_client::connection_cache::ConnectionCache, - solana_core::{ - banking_stage::{ - forwarder::Forwarder, - leader_slot_metrics::LeaderSlotMetricsTracker, - unprocessed_packet_batches::{DeserializedPacket, UnprocessedPacketBatches}, - unprocessed_transaction_storage::{ThreadType, UnprocessedTransactionStorage}, - BankingStageStats, - }, - tracer_packet_stats::TracerPacketStats, - }, - solana_gossip::cluster_info::{ClusterInfo, Node}, - solana_ledger::{ - blockstore::Blockstore, - genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo}, - }, - solana_perf::{data_budget::DataBudget, packet::Packet}, - solana_poh::{poh_recorder::create_test_recorder, poh_service::PohService}, - solana_runtime::{bank::Bank, genesis_utils::bootstrap_validator_stake_lamports}, - solana_sdk::{poh_config::PohConfig, signature::Keypair, signer::Signer, system_transaction}, - solana_streamer::socket::SocketAddrSpace, - std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - tempfile::TempDir, - test::Bencher, -}; - -struct BenchSetup { - exit: Arc, - poh_service: PohService, - forwarder: Forwarder>, - unprocessed_packet_batches: UnprocessedTransactionStorage, - tracker: LeaderSlotMetricsTracker, - stats: BankingStageStats, - tracer_stats: TracerPacketStats, -} - -fn setup(num_packets: usize, contentious_transaction: bool) -> BenchSetup { - let validator_keypair = Arc::new(Keypair::new()); - let genesis_config_info = create_genesis_config_with_leader( - 10_000, - &validator_keypair.pubkey(), - bootstrap_validator_stake_lamports(), - ); - let GenesisConfigInfo { genesis_config, .. } = &genesis_config_info; - - let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(genesis_config); - - let ledger_path = TempDir::new().unwrap(); - let blockstore = Arc::new( - Blockstore::open(ledger_path.as_ref()) - .expect("Expected to be able to open database ledger"), - ); - let poh_config = PohConfig { - // limit tick count to avoid clearing working_bank at - // PohRecord then PohRecorderError(MaxHeightReached) at BankingStage - target_tick_count: Some(bank.max_tick_height().saturating_sub(1)), - ..PohConfig::default() - }; - - let (exit, poh_recorder, poh_service, _entry_receiver) = - create_test_recorder(bank, blockstore, Some(poh_config), None); - - let local_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey()); - let cluster_info = ClusterInfo::new( - local_node.info.clone(), - validator_keypair, - SocketAddrSpace::Unspecified, - ); - let cluster_info = Arc::new(cluster_info); - let min_balance = genesis_config.rent.minimum_balance(0); - let hash = genesis_config.hash(); - - // packets are deserialized upon receiving, failed packets will not be - // forwarded; Therefore need to create real packets here. - let keypair = Keypair::new(); - let packets = (0..num_packets) - .map(|_| { - let mut transaction = - system_transaction::transfer(&keypair, &Keypair::new().pubkey(), min_balance, hash); - if !contentious_transaction { - transaction.message.account_keys[0] = solana_sdk::pubkey::Pubkey::new_unique(); - } - let mut packet = Packet::from_data(None, transaction).unwrap(); - packet.meta_mut().set_tracer(true); - packet.meta_mut().set_from_staked_node(true); - DeserializedPacket::new(packet).unwrap() - }) - .collect_vec(); - - let unprocessed_packet_batches = UnprocessedTransactionStorage::new_transaction_storage( - UnprocessedPacketBatches::from_iter(packets, num_packets), - ThreadType::Transactions, - ); - - let connection_cache = ConnectionCache::new("connection_cache_test"); - // use a restrictive data budget to bench everything except actual sending data over - // connection. - let data_budget = DataBudget::restricted(); - let forwarder = Forwarder::new( - poh_recorder, - bank_forks, - cluster_info, - Arc::new(connection_cache), - Arc::new(data_budget), - ); - - BenchSetup { - exit, - poh_service, - forwarder, - unprocessed_packet_batches, - tracker: LeaderSlotMetricsTracker::new(0), - stats: BankingStageStats::default(), - tracer_stats: TracerPacketStats::new(0), - } -} - -#[bench] -fn bench_forwarder_handle_forwading_contentious_transaction(bencher: &mut Bencher) { - let num_packets = 10240; - let BenchSetup { - exit, - poh_service, - mut forwarder, - mut unprocessed_packet_batches, - mut tracker, - stats, - mut tracer_stats, - } = setup(num_packets, true); - - // hold packets so they can be reused for benching - let hold = true; - bencher.iter(|| { - forwarder.handle_forwarding( - &mut unprocessed_packet_batches, - hold, - &mut tracker, - &stats, - &mut tracer_stats, - ); - // reset packet.forwarded flag to reuse `unprocessed_packet_batches` - if let UnprocessedTransactionStorage::LocalTransactionStorage(unprocessed_packets) = - &mut unprocessed_packet_batches - { - for deserialized_packet in unprocessed_packets.iter_mut() { - deserialized_packet.forwarded = false; - } - } - }); - - exit.store(true, Ordering::Relaxed); - poh_service.join().unwrap(); -} - -#[bench] -fn bench_forwarder_handle_forwading_parallel_transactions(bencher: &mut Bencher) { - let num_packets = 10240; - let BenchSetup { - exit, - poh_service, - mut forwarder, - mut unprocessed_packet_batches, - mut tracker, - stats, - mut tracer_stats, - } = setup(num_packets, false); - - // hold packets so they can be reused for benching - let hold = true; - bencher.iter(|| { - forwarder.handle_forwarding( - &mut unprocessed_packet_batches, - hold, - &mut tracker, - &stats, - &mut tracer_stats, - ); - // reset packet.forwarded flag to reuse `unprocessed_packet_batches` - if let UnprocessedTransactionStorage::LocalTransactionStorage(unprocessed_packets) = - &mut unprocessed_packet_batches - { - for deserialized_packet in unprocessed_packets.iter_mut() { - deserialized_packet.forwarded = false; - } - } - }); - - exit.store(true, Ordering::Relaxed); - poh_service.join().unwrap(); -} diff --git a/core/benches/sigverify_stage.rs b/core/benches/sigverify_stage.rs index 3f11cc150574d3..1d18b8cdceb36c 100644 --- a/core/benches/sigverify_stage.rs +++ b/core/benches/sigverify_stage.rs @@ -158,7 +158,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher, use_same_tx: bool) { trace!("start"); let (packet_s, packet_r) = unbounded(); let (verified_s, verified_r) = BankingTracer::channel_for_test(); - let verifier = TransactionSigVerifier::new(verified_s); + let verifier = TransactionSigVerifier::new(verified_s, None); let stage = SigVerifyStage::new(packet_r, verifier, "solSigVerBench", "bench"); bencher.iter(move || { @@ -237,7 +237,7 @@ fn prepare_batches(discard_factor: i32) -> (Vec, usize) { fn bench_shrink_sigverify_stage_core(bencher: &mut Bencher, discard_factor: i32) { let (batches0, num_valid_packets) = prepare_batches(discard_factor); let (verified_s, _verified_r) = BankingTracer::channel_for_test(); - let verifier = TransactionSigVerifier::new(verified_s); + let verifier = TransactionSigVerifier::new(verified_s, None); let mut c = 0; let mut total_shrink_time = 0; diff --git a/core/src/banking_simulation.rs b/core/src/banking_simulation.rs index a8a67b3e5653b2..4853befbe8c275 100644 --- a/core/src/banking_simulation.rs +++ b/core/src/banking_simulation.rs @@ -12,7 +12,6 @@ use { crossbeam_channel::{unbounded, Sender}, itertools::Itertools, log::*, - solana_client::connection_cache::ConnectionCache, solana_gossip::{ cluster_info::{ClusterInfo, Node}, contact_info::ContactInfo, @@ -762,7 +761,6 @@ impl BankingSimulator { let (tpu_vote_sender, tpu_vote_receiver) = retracer.create_channel_tpu_vote(); let (gossip_vote_sender, gossip_vote_receiver) = retracer.create_channel_gossip_vote(); - let connection_cache = Arc::new(ConnectionCache::new("connection_cache_sim")); let (replay_vote_sender, _replay_vote_receiver) = unbounded(); let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded(); let shred_version = compute_shred_version( @@ -811,10 +809,8 @@ impl BankingSimulator { None, replay_vote_sender, None, - connection_cache, bank_forks.clone(), prioritization_fee_cache, - false, ); let (&_slot, &raw_base_event_time) = freeze_time_by_slot diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 966d598e9d8049..fa87e6d0bf3d74 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -7,7 +7,6 @@ use { committer::Committer, consumer::Consumer, decision_maker::{BufferedPacketsDecision, DecisionMaker}, - forwarder::Forwarder, latest_unprocessed_votes::{LatestUnprocessedVotes, VoteSource}, leader_slot_metrics::LeaderSlotMetricsTracker, packet_receiver::PacketReceiver, @@ -30,11 +29,10 @@ use { }, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, histogram::Histogram, - solana_client::connection_cache::ConnectionCache, solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, solana_ledger::blockstore_processor::TransactionStatusSender, solana_measure::measure_us, - solana_perf::{data_budget::DataBudget, packet::PACKETS_PER_BATCH}, + solana_perf::packet::PACKETS_PER_BATCH, solana_poh::poh_recorder::{PohRecorder, TransactionRecorder}, solana_runtime::{ bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache, @@ -56,7 +54,6 @@ use { // Below modules are pub to allow use by banking_stage bench pub mod committer; pub mod consumer; -pub mod forwarder; pub mod leader_slot_metrics; pub mod qos_service; pub mod unprocessed_packet_batches; @@ -64,7 +61,6 @@ pub mod unprocessed_transaction_storage; mod consume_worker; mod decision_maker; -mod forward_packet_batches_by_accounts; mod immutable_deserialized_packet; mod latest_unprocessed_votes; mod leader_slot_timing_metrics; @@ -100,8 +96,6 @@ pub struct BankingStageStats { current_buffered_packets_count: AtomicUsize, rebuffered_packets_count: AtomicUsize, consumed_buffered_packets_count: AtomicUsize, - forwarded_transaction_count: AtomicUsize, - forwarded_vote_count: AtomicUsize, batch_packet_indexes_len: Histogram, // Timing @@ -146,8 +140,6 @@ impl BankingStageStats { + self.filter_pending_packets_elapsed.load(Ordering::Relaxed) + self.packet_conversion_elapsed.load(Ordering::Relaxed) + self.transaction_processing_elapsed.load(Ordering::Relaxed) - + self.forwarded_transaction_count.load(Ordering::Relaxed) as u64 - + self.forwarded_vote_count.load(Ordering::Relaxed) as u64 + self.batch_packet_indexes_len.entries() } @@ -211,16 +203,6 @@ impl BankingStageStats { .swap(0, Ordering::Relaxed), i64 ), - ( - "forwarded_transaction_count", - self.forwarded_transaction_count.swap(0, Ordering::Relaxed), - i64 - ), - ( - "forwarded_vote_count", - self.forwarded_vote_count.swap(0, Ordering::Relaxed), - i64 - ), ( "consume_buffered_packets_elapsed", self.consume_buffered_packets_elapsed @@ -312,16 +294,6 @@ pub enum ForwardOption { ForwardTransaction, } -#[derive(Debug, Default)] -pub struct FilterForwardingResults { - pub(crate) total_forwardable_packets: usize, - pub(crate) total_tracer_packets_in_buffer: usize, - pub(crate) total_forwardable_tracer_packets: usize, - pub(crate) total_dropped_packets: usize, - pub(crate) total_packet_conversion_us: u64, - pub(crate) total_filter_packets_us: u64, -} - pub trait LikeClusterInfo: Send + Sync + 'static + Clone { fn id(&self) -> Pubkey; @@ -356,10 +328,8 @@ impl BankingStage { transaction_status_sender: Option, replay_vote_sender: ReplayVoteSender, log_messages_bytes_limit: Option, - connection_cache: Arc, bank_forks: Arc>, prioritization_fee_cache: &Arc, - enable_forwarding: bool, ) -> Self { Self::new_num_threads( block_production_method, @@ -372,10 +342,8 @@ impl BankingStage { transaction_status_sender, replay_vote_sender, log_messages_bytes_limit, - connection_cache, bank_forks, prioritization_fee_cache, - enable_forwarding, ) } @@ -391,10 +359,8 @@ impl BankingStage { transaction_status_sender: Option, replay_vote_sender: ReplayVoteSender, log_messages_bytes_limit: Option, - connection_cache: Arc, bank_forks: Arc>, prioritization_fee_cache: &Arc, - enable_forwarding: bool, ) -> Self { match block_production_method { BlockProductionMethod::ThreadLocalMultiIterator => { @@ -408,7 +374,6 @@ impl BankingStage { transaction_status_sender, replay_vote_sender, log_messages_bytes_limit, - connection_cache, bank_forks, prioritization_fee_cache, ) @@ -423,10 +388,8 @@ impl BankingStage { transaction_status_sender, replay_vote_sender, log_messages_bytes_limit, - connection_cache, bank_forks, prioritization_fee_cache, - enable_forwarding, ), } } @@ -442,7 +405,6 @@ impl BankingStage { transaction_status_sender: Option, replay_vote_sender: ReplayVoteSender, log_messages_bytes_limit: Option, - connection_cache: Arc, bank_forks: Arc>, prioritization_fee_cache: &Arc, ) -> Self { @@ -450,7 +412,6 @@ impl BankingStage { // Single thread to generate entries from many banks. // This thread talks to poh_service and broadcasts the entries once they have been recorded. // Once an entry has been recorded, its blockhash is registered with the bank. - let data_budget = Arc::new(DataBudget::default()); let batch_limit = TOTAL_BUFFERED_PACKETS / ((num_threads - NUM_VOTE_PROCESSING_THREADS) as usize); // Keeps track of extraneous vote transactions for the vote threads @@ -494,14 +455,6 @@ impl BankingStage { ), }; - let forwarder = Forwarder::new( - poh_recorder.clone(), - bank_forks.clone(), - cluster_info.clone(), - connection_cache.clone(), - data_budget.clone(), - ); - Self::spawn_thread_local_multi_iterator_thread( id, packet_receiver, @@ -509,7 +462,7 @@ impl BankingStage { committer.clone(), transaction_recorder.clone(), log_messages_bytes_limit, - forwarder, + bank_forks.clone(), unprocessed_transaction_storage, ) }) @@ -528,16 +481,13 @@ impl BankingStage { transaction_status_sender: Option, replay_vote_sender: ReplayVoteSender, log_messages_bytes_limit: Option, - connection_cache: Arc, bank_forks: Arc>, prioritization_fee_cache: &Arc, - enable_forwarding: bool, ) -> Self { assert!(num_threads >= MIN_TOTAL_THREADS); // Single thread to generate entries from many banks. // This thread talks to poh_service and broadcasts the entries once they have been recorded. // Once an entry has been recorded, its blockhash is registered with the bank. - let data_budget = Arc::new(DataBudget::default()); // Keeps track of extraneous vote transactions for the vote threads let latest_unprocessed_votes = { let bank = bank_forks.read().unwrap().working_bank(); @@ -567,13 +517,7 @@ impl BankingStage { committer.clone(), transaction_recorder.clone(), log_messages_bytes_limit, - Forwarder::new( - poh_recorder.clone(), - bank_forks.clone(), - cluster_info.clone(), - connection_cache.clone(), - data_budget.clone(), - ), + bank_forks.clone(), UnprocessedTransactionStorage::new_vote_storage( latest_unprocessed_votes.clone(), vote_source, @@ -615,16 +559,6 @@ impl BankingStage { ) } - let forwarder = enable_forwarding.then(|| { - Forwarder::new( - poh_recorder.clone(), - bank_forks.clone(), - cluster_info.clone(), - connection_cache.clone(), - data_budget.clone(), - ) - }); - // Spawn the central scheduler thread bank_thread_hdls.push({ let packet_deserializer = PacketDeserializer::new(non_vote_receiver); @@ -635,7 +569,6 @@ impl BankingStage { bank_forks, scheduler, worker_metrics, - forwarder, ); Builder::new() .name("solBnkTxSched".to_string()) @@ -652,14 +585,14 @@ impl BankingStage { Self { bank_thread_hdls } } - fn spawn_thread_local_multi_iterator_thread( + fn spawn_thread_local_multi_iterator_thread( id: u32, packet_receiver: BankingPacketReceiver, decision_maker: DecisionMaker, committer: Committer, transaction_recorder: TransactionRecorder, log_messages_bytes_limit: Option, - mut forwarder: Forwarder, + bank_forks: Arc>, unprocessed_transaction_storage: UnprocessedTransactionStorage, ) -> JoinHandle<()> { let mut packet_receiver = PacketReceiver::new(id, packet_receiver); @@ -676,8 +609,8 @@ impl BankingStage { Self::process_loop( &mut packet_receiver, &decision_maker, - &mut forwarder, &consumer, + &bank_forks, id, unprocessed_transaction_storage, ) @@ -686,14 +619,12 @@ impl BankingStage { } #[allow(clippy::too_many_arguments)] - fn process_buffered_packets( + fn process_buffered_packets( decision_maker: &DecisionMaker, - forwarder: &mut Forwarder, consumer: &Consumer, unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, banking_stage_stats: &BankingStageStats, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, - tracer_packet_stats: &mut TracerPacketStats, ) { if unprocessed_transaction_storage.should_not_process() { return; @@ -724,27 +655,13 @@ impl BankingStage { .increment_consume_buffered_packets_us(consume_buffered_packets_us); } BufferedPacketsDecision::Forward => { - let ((), forward_us) = measure_us!(forwarder.handle_forwarding( - unprocessed_transaction_storage, - false, - slot_metrics_tracker, - banking_stage_stats, - tracer_packet_stats, - )); - slot_metrics_tracker.increment_forward_us(forward_us); + unprocessed_transaction_storage.clear(); // Take metrics action after forwarding packets to include forwarded // metrics into current slot slot_metrics_tracker.apply_action(metrics_action); } BufferedPacketsDecision::ForwardAndHold => { - let ((), forward_and_hold_us) = measure_us!(forwarder.handle_forwarding( - unprocessed_transaction_storage, - true, - slot_metrics_tracker, - banking_stage_stats, - tracer_packet_stats, - )); - slot_metrics_tracker.increment_forward_and_hold_us(forward_and_hold_us); + // todo!("clean packets"); // Take metrics action after forwarding packets slot_metrics_tracker.apply_action(metrics_action); } @@ -752,11 +669,11 @@ impl BankingStage { } } - fn process_loop( + fn process_loop( packet_receiver: &mut PacketReceiver, decision_maker: &DecisionMaker, - forwarder: &mut Forwarder, consumer: &Consumer, + bank_forks: &RwLock, id: u32, mut unprocessed_transaction_storage: UnprocessedTransactionStorage, ) { @@ -772,12 +689,10 @@ impl BankingStage { { let (_, process_buffered_packets_us) = measure_us!(Self::process_buffered_packets( decision_maker, - forwarder, consumer, &mut unprocessed_transaction_storage, &banking_stage_stats, &mut slot_metrics_tracker, - &mut tracer_packet_stats, )); slot_metrics_tracker .increment_process_buffered_packets_us(process_buffered_packets_us); @@ -786,6 +701,11 @@ impl BankingStage { tracer_packet_stats.report(1000); + if !unprocessed_transaction_storage.should_not_process() { + unprocessed_transaction_storage + .cache_epoch_boundary_info(&bank_forks.read().unwrap().working_bank()); + } + match packet_receiver.receive_and_buffer_packets( &mut unprocessed_transaction_storage, &mut banking_stage_stats, @@ -904,10 +824,8 @@ mod tests { None, replay_vote_sender, None, - Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks, &Arc::new(PrioritizationFeeCache::new(0u64)), - false, ); drop(non_vote_sender); drop(tpu_vote_sender); @@ -960,10 +878,8 @@ mod tests { None, replay_vote_sender, None, - Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks, &Arc::new(PrioritizationFeeCache::new(0u64)), - false, ); trace!("sending bank"); drop(non_vote_sender); @@ -1040,10 +956,8 @@ mod tests { None, replay_vote_sender, None, - Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks.clone(), // keep a local-copy of bank-forks so worker threads do not lose weak access to bank-forks &Arc::new(PrioritizationFeeCache::new(0u64)), - false, ); // fund another account so we can send 2 good transactions in a single batch. @@ -1211,7 +1125,6 @@ mod tests { None, replay_vote_sender, None, - Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks, &Arc::new(PrioritizationFeeCache::new(0u64)), ); @@ -1402,10 +1315,8 @@ mod tests { None, replay_vote_sender, None, - Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks, &Arc::new(PrioritizationFeeCache::new(0u64)), - false, ); let keypairs = (0..100).map(|_| Keypair::new()).collect_vec(); diff --git a/core/src/banking_stage/forward_packet_batches_by_accounts.rs b/core/src/banking_stage/forward_packet_batches_by_accounts.rs deleted file mode 100644 index 67b323c2876a18..00000000000000 --- a/core/src/banking_stage/forward_packet_batches_by_accounts.rs +++ /dev/null @@ -1,481 +0,0 @@ -use { - super::immutable_deserialized_packet::ImmutableDeserializedPacket, - solana_cost_model::{ - block_cost_limits, - cost_model::CostModel, - cost_tracker::{CostTracker, UpdatedCosts}, - transaction_cost::TransactionCost, - }, - solana_feature_set::FeatureSet, - solana_perf::packet::Packet, - solana_sdk::transaction::SanitizedTransaction, - solana_svm_transaction::svm_message::SVMMessage, - std::sync::Arc, -}; - -/// `ForwardBatch` to have half of default cost_tracker limits, as smaller batch -/// allows better granularity in composing forwarding transactions; e.g., -/// transactions in each batch are potentially more evenly distributed across accounts. -const FORWARDED_BLOCK_COMPUTE_RATIO: u32 = 2; -/// this number divided by`FORWARDED_BLOCK_COMPUTE_RATIO` is the total blocks to forward. -/// To accommodate transactions without `compute_budget` instruction, which will -/// have default 200_000 compute units, it has 100 batches as default to forward -/// up to 12_000 such transaction. (120 such transactions fill up a batch, 100 -/// batches allows 12_000 transactions) -const DEFAULT_NUMBER_OF_BATCHES: u32 = 100; - -/// `ForwardBatch` represents one forwardable batch of transactions with a -/// limited number of total compute units -#[derive(Debug, Default)] -pub struct ForwardBatch { - // `forwardable_packets` keeps forwardable packets in a vector in its - // original fee prioritized order - forwardable_packets: Vec>, -} - -impl ForwardBatch { - pub fn get_forwardable_packets(&self) -> impl Iterator { - self.forwardable_packets - .iter() - .map(|immutable_packet| immutable_packet.original_packet()) - } - - pub fn len(&self) -> usize { - self.forwardable_packets.len() - } - - pub fn is_empty(&self) -> bool { - self.forwardable_packets.is_empty() - } - - pub fn reset(&mut self) { - self.forwardable_packets.clear(); - } -} - -/// To avoid forward queue being saturated by transactions for single hot account, -/// the forwarder will group and send prioritized transactions by account limit -/// to allow transactions on non-congested accounts to be forwarded alongside higher fee -/// transactions that saturate those highly demanded accounts. -#[derive(Debug)] -pub struct ForwardPacketBatchesByAccounts { - // Forwardable packets are staged in number of batches, each batch is limited - // by cost_tracker on both account limit and block limits. Those limits are - // set as `limit_ratio` of regular block limits to facilitate quicker iteration. - forward_batches: Vec, - - // Single cost tracker that imposes the total cu limits for forwarding. - cost_tracker: CostTracker, - - // Compute Unit limits for each batch - batch_vote_limit: u64, - batch_block_limit: u64, - batch_account_limit: u64, -} - -impl ForwardPacketBatchesByAccounts { - pub fn new_with_default_batch_limits() -> Self { - Self::new(FORWARDED_BLOCK_COMPUTE_RATIO, DEFAULT_NUMBER_OF_BATCHES) - } - - pub fn new(limit_ratio: u32, number_of_batches: u32) -> Self { - let forward_batches = (0..number_of_batches) - .map(|_| ForwardBatch::default()) - .collect(); - - let batch_vote_limit = block_cost_limits::MAX_VOTE_UNITS.saturating_div(limit_ratio as u64); - let batch_block_limit = - block_cost_limits::MAX_BLOCK_UNITS.saturating_div(limit_ratio as u64); - let batch_account_limit = - block_cost_limits::MAX_WRITABLE_ACCOUNT_UNITS.saturating_div(limit_ratio as u64); - - let mut cost_tracker = CostTracker::default(); - cost_tracker.set_limits( - batch_account_limit.saturating_mul(number_of_batches as u64), - batch_block_limit.saturating_mul(number_of_batches as u64), - batch_vote_limit.saturating_mul(number_of_batches as u64), - ); - Self { - forward_batches, - cost_tracker, - batch_vote_limit, - batch_block_limit, - batch_account_limit, - } - } - - pub fn try_add_packet( - &mut self, - sanitized_transaction: &SanitizedTransaction, - immutable_packet: Arc, - feature_set: &FeatureSet, - ) -> bool { - let tx_cost = CostModel::calculate_cost(sanitized_transaction, feature_set); - - if let Ok(updated_costs) = self.cost_tracker.try_add(&tx_cost) { - let batch_index = self.get_batch_index_by_updated_costs(&tx_cost, &updated_costs); - - if let Some(forward_batch) = self.forward_batches.get_mut(batch_index) { - forward_batch.forwardable_packets.push(immutable_packet); - } else { - // A successfully added tx_cost means it does not exceed block limit, nor vote - // limit, nor account limit. batch_index calculated as quotient from division - // will not be out of bounds. - unreachable!("batch_index out of bounds"); - } - true - } else { - false - } - } - - pub fn iter_batches(&self) -> impl Iterator { - self.forward_batches.iter() - } - - pub fn reset(&mut self) { - for forward_batch in self.forward_batches.iter_mut() { - forward_batch.reset(); - } - - self.cost_tracker.reset(); - } - - // Successfully added packet should be placed into the batch where no block/vote/account limits - // would be exceeded. Eg, if by block limit, it can be put into batch #1; by vote limit, it can - // be put into batch #2; and by account limit, it can be put into batch #3; then it should be - // put into batch #3 to satisfy all batch limits. - fn get_batch_index_by_updated_costs( - &self, - tx_cost: &TransactionCost, - updated_costs: &UpdatedCosts, - ) -> usize { - let Some(batch_index_by_block_limit) = - updated_costs.updated_block_cost.checked_div(match tx_cost { - TransactionCost::SimpleVote { .. } => self.batch_vote_limit, - TransactionCost::Transaction(_) => self.batch_block_limit, - }) - else { - unreachable!("batch vote limit or block limit must not be zero") - }; - - let batch_index_by_account_limit = - updated_costs.updated_costliest_account_cost / self.batch_account_limit; - - batch_index_by_block_limit.max(batch_index_by_account_limit) as usize - } -} - -#[cfg(test)] -mod tests { - use { - super::*, - crate::banking_stage::unprocessed_packet_batches::DeserializedPacket, - solana_cost_model::transaction_cost::{UsageCostDetails, WritableKeysTransaction}, - solana_feature_set::FeatureSet, - solana_sdk::{ - compute_budget::ComputeBudgetInstruction, - message::{Message, TransactionSignatureDetails}, - pubkey::Pubkey, - system_instruction, - transaction::Transaction, - }, - }; - - /// build test transaction, return corresponding sanitized_transaction and deserialized_packet, - /// and the batch limit_ratio that would only allow one transaction per bucket. - fn build_test_transaction_and_packet( - priority: u64, - write_to_account: &Pubkey, - ) -> (SanitizedTransaction, DeserializedPacket, u32) { - let from_account = solana_sdk::pubkey::new_rand(); - - let transaction = Transaction::new_unsigned(Message::new( - &[ - ComputeBudgetInstruction::set_compute_unit_price(priority), - system_instruction::transfer(&from_account, write_to_account, 2), - ], - Some(&from_account), - )); - let sanitized_transaction = - SanitizedTransaction::from_transaction_for_tests(transaction.clone()); - let tx_cost = CostModel::calculate_cost(&sanitized_transaction, &FeatureSet::all_enabled()); - let cost = tx_cost.sum(); - let deserialized_packet = - DeserializedPacket::new(Packet::from_data(None, transaction).unwrap()).unwrap(); - - // set limit ratio so each batch can only have one test transaction - let limit_ratio: u32 = - ((block_cost_limits::MAX_WRITABLE_ACCOUNT_UNITS - cost + 1) / cost) as u32; - (sanitized_transaction, deserialized_packet, limit_ratio) - } - - fn zero_transaction_cost() -> TransactionCost<'static, WritableKeysTransaction> { - static DUMMY_TRANSACTION: WritableKeysTransaction = WritableKeysTransaction(vec![]); - - TransactionCost::Transaction(UsageCostDetails { - transaction: &DUMMY_TRANSACTION, - signature_cost: 0, - write_lock_cost: 0, - data_bytes_cost: 0, - programs_execution_cost: 0, - loaded_accounts_data_size_cost: 0, - allocated_accounts_data_size: 0, - signature_details: TransactionSignatureDetails::new(0, 0, 0), - }) - } - - #[test] - fn test_try_add_packet_to_multiple_batches() { - // setup two transactions, one has high priority that writes to hot account, the - // other write to non-contentious account with no priority - let hot_account = solana_sdk::pubkey::new_rand(); - let other_account = solana_sdk::pubkey::new_rand(); - let (tx_high_priority, packet_high_priority, limit_ratio) = - build_test_transaction_and_packet(10, &hot_account); - let (tx_low_priority, packet_low_priority, _) = - build_test_transaction_and_packet(0, &other_account); - - // setup forwarding with 2 buckets, each only allow one transaction - let number_of_batches = 2; - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new(limit_ratio, number_of_batches); - - // Assert initially both batches are empty - { - let mut batches = forward_packet_batches_by_accounts.iter_batches(); - assert_eq!(0, batches.next().unwrap().len()); - assert_eq!(0, batches.next().unwrap().len()); - assert!(batches.next().is_none()); - } - - // Assert one high-priority packet will be added to 1st bucket successfully - { - assert!(forward_packet_batches_by_accounts.try_add_packet( - &tx_high_priority, - packet_high_priority.immutable_section().clone(), - &FeatureSet::all_enabled(), - )); - let mut batches = forward_packet_batches_by_accounts.iter_batches(); - assert_eq!(1, batches.next().unwrap().len()); - assert_eq!(0, batches.next().unwrap().len()); - assert!(batches.next().is_none()); - } - - // Assert second high-priority packet will not fit in first bucket, but will - // be added to 2nd bucket - { - assert!(forward_packet_batches_by_accounts.try_add_packet( - &tx_high_priority, - packet_high_priority.immutable_section().clone(), - &FeatureSet::all_enabled(), - )); - let mut batches = forward_packet_batches_by_accounts.iter_batches(); - assert_eq!(1, batches.next().unwrap().len()); - assert_eq!(1, batches.next().unwrap().len()); - } - - // Assert 3rd high-priority packet can not added since both buckets would - // exceed hot-account limit - { - assert!(!forward_packet_batches_by_accounts.try_add_packet( - &tx_high_priority, - packet_high_priority.immutable_section().clone(), - &FeatureSet::all_enabled(), - )); - let mut batches = forward_packet_batches_by_accounts.iter_batches(); - assert_eq!(1, batches.next().unwrap().len()); - assert_eq!(1, batches.next().unwrap().len()); - assert!(batches.next().is_none()); - } - - // Assert lower priority packet will be successfully added to first bucket - // since non-contentious account is still free - { - assert!(forward_packet_batches_by_accounts.try_add_packet( - &tx_low_priority, - packet_low_priority.immutable_section().clone(), - &FeatureSet::all_enabled(), - )); - let mut batches = forward_packet_batches_by_accounts.iter_batches(); - assert_eq!(2, batches.next().unwrap().len()); - assert_eq!(1, batches.next().unwrap().len()); - assert!(batches.next().is_none()); - } - } - - #[test] - fn test_try_add_packet_to_single_batch() { - let (tx, packet, limit_ratio) = - build_test_transaction_and_packet(10, &solana_sdk::pubkey::new_rand()); - let number_of_batches = 1; - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new(limit_ratio, number_of_batches); - - // Assert initially batch is empty, and accepting new packets - { - let mut batches = forward_packet_batches_by_accounts.iter_batches(); - assert_eq!(0, batches.next().unwrap().len()); - assert!(batches.next().is_none()); - } - - // Assert can successfully add first packet to forwarding buffer - { - assert!(forward_packet_batches_by_accounts.try_add_packet( - &tx, - packet.immutable_section().clone(), - &FeatureSet::all_enabled() - )); - - let mut batches = forward_packet_batches_by_accounts.iter_batches(); - assert_eq!(1, batches.next().unwrap().len()); - } - - // Assert cannot add same packet to forwarding buffer again, due to reached account limit; - { - assert!(!forward_packet_batches_by_accounts.try_add_packet( - &tx, - packet.immutable_section().clone(), - &FeatureSet::all_enabled() - )); - - let mut batches = forward_packet_batches_by_accounts.iter_batches(); - assert_eq!(1, batches.next().unwrap().len()); - } - - // Assert can still add non-contentious packet to same batch - { - // build a small packet to a non-contentious account with high priority - let (tx2, packet2, _) = - build_test_transaction_and_packet(100, &solana_sdk::pubkey::new_rand()); - - assert!(forward_packet_batches_by_accounts.try_add_packet( - &tx2, - packet2.immutable_section().clone(), - &FeatureSet::all_enabled() - )); - - let mut batches = forward_packet_batches_by_accounts.iter_batches(); - assert_eq!(2, batches.next().unwrap().len()); - } - } - - #[test] - fn test_get_batch_index_by_updated_costs() { - let test_cost = 99; - - // check against vote limit only - { - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); - forward_packet_batches_by_accounts.batch_vote_limit = test_cost + 1; - - let dummy_transaction = WritableKeysTransaction(vec![]); - let transaction_cost = TransactionCost::SimpleVote { - transaction: &dummy_transaction, - }; - assert_eq!( - 0, - forward_packet_batches_by_accounts.get_batch_index_by_updated_costs( - &transaction_cost, - &UpdatedCosts { - updated_block_cost: test_cost, - updated_costliest_account_cost: 0 - } - ) - ); - assert_eq!( - 1, - forward_packet_batches_by_accounts.get_batch_index_by_updated_costs( - &transaction_cost, - &UpdatedCosts { - updated_block_cost: test_cost + 1, - updated_costliest_account_cost: 0 - } - ) - ); - } - - // check against block limit only - { - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); - forward_packet_batches_by_accounts.batch_block_limit = test_cost + 1; - - let transaction_cost = zero_transaction_cost(); - assert_eq!( - 0, - forward_packet_batches_by_accounts.get_batch_index_by_updated_costs( - &transaction_cost, - &UpdatedCosts { - updated_block_cost: test_cost, - updated_costliest_account_cost: 0 - } - ) - ); - assert_eq!( - 1, - forward_packet_batches_by_accounts.get_batch_index_by_updated_costs( - &transaction_cost, - &UpdatedCosts { - updated_block_cost: test_cost + 1, - updated_costliest_account_cost: 0 - } - ) - ); - } - - // check against account limit only - { - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); - forward_packet_batches_by_accounts.batch_account_limit = test_cost + 1; - - let transaction_cost = zero_transaction_cost(); - assert_eq!( - 0, - forward_packet_batches_by_accounts.get_batch_index_by_updated_costs( - &transaction_cost, - &UpdatedCosts { - updated_block_cost: 0, - updated_costliest_account_cost: test_cost - } - ) - ); - assert_eq!( - 1, - forward_packet_batches_by_accounts.get_batch_index_by_updated_costs( - &transaction_cost, - &UpdatedCosts { - updated_block_cost: 0, - updated_costliest_account_cost: test_cost + 1 - } - ) - ); - } - - // by block limit, it can be put into batch #1; - // by vote limit, it can be put into batch #2; - // by account limit, it can be put into batch #3; - // it should be put into batch #3 to satisfy all batch limits. - { - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); - forward_packet_batches_by_accounts.batch_block_limit = test_cost + 1; - forward_packet_batches_by_accounts.batch_vote_limit = test_cost / 2 + 1; - forward_packet_batches_by_accounts.batch_account_limit = test_cost / 3 + 1; - - let transaction_cost = zero_transaction_cost(); - assert_eq!( - 2, - forward_packet_batches_by_accounts.get_batch_index_by_updated_costs( - &transaction_cost, - &UpdatedCosts { - updated_block_cost: test_cost, - updated_costliest_account_cost: test_cost - } - ) - ); - } - } -} diff --git a/core/src/banking_stage/forwarder.rs b/core/src/banking_stage/forwarder.rs deleted file mode 100644 index d48c1556fa206e..00000000000000 --- a/core/src/banking_stage/forwarder.rs +++ /dev/null @@ -1,604 +0,0 @@ -use { - super::{ - forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, - leader_slot_metrics::LeaderSlotMetricsTracker, - unprocessed_transaction_storage::UnprocessedTransactionStorage, BankingStageStats, - ForwardOption, - }, - crate::{ - banking_stage::{ - immutable_deserialized_packet::ImmutableDeserializedPacket, LikeClusterInfo, - }, - next_leader::{next_leader, next_leader_tpu_vote}, - tracer_packet_stats::TracerPacketStats, - }, - solana_client::connection_cache::ConnectionCache, - solana_connection_cache::client_connection::ClientConnection as TpuConnection, - solana_feature_set::FeatureSet, - solana_measure::measure_us, - solana_perf::{data_budget::DataBudget, packet::Packet}, - solana_poh::poh_recorder::PohRecorder, - solana_runtime::bank_forks::BankForks, - solana_sdk::{pubkey::Pubkey, transaction::SanitizedTransaction, transport::TransportError}, - solana_streamer::sendmmsg::batch_send, - std::{ - iter::repeat, - net::{SocketAddr, UdpSocket}, - sync::{atomic::Ordering, Arc, RwLock}, - }, -}; - -pub struct Forwarder { - poh_recorder: Arc>, - bank_forks: Arc>, - socket: UdpSocket, - cluster_info: T, - connection_cache: Arc, - data_budget: Arc, - forward_packet_batches_by_accounts: ForwardPacketBatchesByAccounts, -} - -impl Forwarder { - pub fn new( - poh_recorder: Arc>, - bank_forks: Arc>, - cluster_info: T, - connection_cache: Arc, - data_budget: Arc, - ) -> Self { - Self { - poh_recorder, - bank_forks, - socket: UdpSocket::bind("0.0.0.0:0").unwrap(), - cluster_info, - connection_cache, - data_budget, - forward_packet_batches_by_accounts: - ForwardPacketBatchesByAccounts::new_with_default_batch_limits(), - } - } - - pub fn clear_batches(&mut self) { - self.forward_packet_batches_by_accounts.reset(); - } - - pub fn try_add_packet( - &mut self, - sanitized_transaction: &SanitizedTransaction, - immutable_packet: Arc, - feature_set: &FeatureSet, - ) -> bool { - self.forward_packet_batches_by_accounts.try_add_packet( - sanitized_transaction, - immutable_packet, - feature_set, - ) - } - - pub fn forward_batched_packets(&self, forward_option: &ForwardOption) { - self.forward_packet_batches_by_accounts - .iter_batches() - .filter(|&batch| !batch.is_empty()) - .for_each(|forwardable_batch| { - let _ = self - .forward_packets(forward_option, forwardable_batch.get_forwardable_packets()); - }); - } - - /// This function is exclusively used by multi-iterator banking threads to handle forwarding - /// logic per thread. Central scheduler Controller uses try_add_packet() ... forward_batched_packets() - /// to handle forwarding slight differently. - pub fn handle_forwarding( - &mut self, - unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, - hold: bool, - slot_metrics_tracker: &mut LeaderSlotMetricsTracker, - banking_stage_stats: &BankingStageStats, - tracer_packet_stats: &mut TracerPacketStats, - ) { - let forward_option = unprocessed_transaction_storage.forward_option(); - - // get current working bank from bank_forks, use it to sanitize transaction and - // load all accounts from address loader; - let current_bank = self.bank_forks.read().unwrap().working_bank(); - - // if we have crossed an epoch boundary, recache any state - unprocessed_transaction_storage.cache_epoch_boundary_info(¤t_bank); - - // sanitize and filter packets that are no longer valid (could be too old, a duplicate of something - // already processed), then add to forwarding buffer. - let filter_forwarding_result = unprocessed_transaction_storage - .filter_forwardable_packets_and_add_batches( - current_bank, - &mut self.forward_packet_batches_by_accounts, - ); - slot_metrics_tracker.increment_transactions_from_packets_us( - filter_forwarding_result.total_packet_conversion_us, - ); - banking_stage_stats.packet_conversion_elapsed.fetch_add( - filter_forwarding_result.total_packet_conversion_us, - Ordering::Relaxed, - ); - banking_stage_stats - .filter_pending_packets_elapsed - .fetch_add( - filter_forwarding_result.total_filter_packets_us, - Ordering::Relaxed, - ); - banking_stage_stats.dropped_forward_packets_count.fetch_add( - filter_forwarding_result.total_dropped_packets, - Ordering::Relaxed, - ); - - self.forward_packet_batches_by_accounts - .iter_batches() - .filter(|&batch| !batch.is_empty()) - .for_each(|forward_batch| { - slot_metrics_tracker.increment_forwardable_batches_count(1); - - let batched_forwardable_packets_count = forward_batch.len(); - let (_forward_result, successful_forwarded_packets_count, leader_pubkey) = self - .forward_buffered_packets( - &forward_option, - forward_batch.get_forwardable_packets(), - banking_stage_stats, - ); - - if let Some(leader_pubkey) = leader_pubkey { - tracer_packet_stats.increment_total_forwardable_tracer_packets( - filter_forwarding_result.total_forwardable_tracer_packets, - leader_pubkey, - ); - } - let failed_forwarded_packets_count = batched_forwardable_packets_count - .saturating_sub(successful_forwarded_packets_count); - - if failed_forwarded_packets_count > 0 { - slot_metrics_tracker.increment_failed_forwarded_packets_count( - failed_forwarded_packets_count as u64, - ); - slot_metrics_tracker.increment_packet_batch_forward_failure_count(1); - } - - if successful_forwarded_packets_count > 0 { - slot_metrics_tracker.increment_successful_forwarded_packets_count( - successful_forwarded_packets_count as u64, - ); - } - }); - self.clear_batches(); - - if !hold { - slot_metrics_tracker.increment_cleared_from_buffer_after_forward_count( - filter_forwarding_result.total_forwardable_packets as u64, - ); - tracer_packet_stats.increment_total_cleared_from_buffer_after_forward( - filter_forwarding_result.total_tracer_packets_in_buffer, - ); - unprocessed_transaction_storage.clear_forwarded_packets(); - } - } - - /// Forwards all valid, unprocessed packets in the iterator, up to a rate limit. - /// Returns whether forwarding succeeded, the number of attempted forwarded packets - /// if any, the time spent forwarding in us, and the leader pubkey if any. - pub fn forward_packets<'a>( - &self, - forward_option: &ForwardOption, - forwardable_packets: impl Iterator, - ) -> ( - std::result::Result<(), TransportError>, - usize, - u64, - Option, - ) { - let Some((leader_pubkey, addr)) = self.get_leader_and_addr(forward_option) else { - return (Ok(()), 0, 0, None); - }; - - self.update_data_budget(); - let packet_vec: Vec<_> = forwardable_packets - .filter(|p| !p.meta().forwarded()) - .filter(|p| p.meta().is_from_staked_node()) - .filter(|p| self.data_budget.take(p.meta().size)) - .filter_map(|p| p.data(..).map(|data| data.to_vec())) - .collect(); - - let packet_vec_len = packet_vec.len(); - // TODO: see https://github.com/solana-labs/solana/issues/23819 - // fix this so returns the correct number of succeeded packets - // when there's an error sending the batch. This was left as-is for now - // in favor of shipping Quic support, which was considered higher-priority - let (res, forward_us) = if !packet_vec.is_empty() { - measure_us!(self.forward(forward_option, packet_vec, &addr)) - } else { - (Ok(()), 0) - }; - - (res, packet_vec_len, forward_us, Some(leader_pubkey)) - } - - /// Forwards all valid, unprocessed packets in the buffer, up to a rate limit. Returns - /// the number of successfully forwarded packets in second part of tuple - fn forward_buffered_packets<'a>( - &self, - forward_option: &ForwardOption, - forwardable_packets: impl Iterator, - banking_stage_stats: &BankingStageStats, - ) -> ( - std::result::Result<(), TransportError>, - usize, - Option, - ) { - let (res, num_packets, _forward_us, leader_pubkey) = - self.forward_packets(forward_option, forwardable_packets); - if let Err(ref err) = res { - warn!("failed to forward packets: {err}"); - } - - if num_packets > 0 { - if let ForwardOption::ForwardTpuVote = forward_option { - banking_stage_stats - .forwarded_vote_count - .fetch_add(num_packets, Ordering::Relaxed); - } else { - banking_stage_stats - .forwarded_transaction_count - .fetch_add(num_packets, Ordering::Relaxed); - } - } - - (res, num_packets, leader_pubkey) - } - - /// Get the pubkey and socket address for the leader to forward to - fn get_leader_and_addr(&self, forward_option: &ForwardOption) -> Option<(Pubkey, SocketAddr)> { - match forward_option { - ForwardOption::NotForward => None, - ForwardOption::ForwardTransaction => { - next_leader(&self.cluster_info, &self.poh_recorder, |node| { - node.tpu_forwards(self.connection_cache.protocol()) - }) - } - ForwardOption::ForwardTpuVote => { - next_leader_tpu_vote(&self.cluster_info, &self.poh_recorder) - } - } - } - - /// Re-fill the data budget if enough time has passed - fn update_data_budget(&self) { - const INTERVAL_MS: u64 = 100; - // 12 MB outbound limit per second - const MAX_BYTES_PER_SECOND: usize = 12_000_000; - const MAX_BYTES_PER_INTERVAL: usize = MAX_BYTES_PER_SECOND * INTERVAL_MS as usize / 1000; - const MAX_BYTES_BUDGET: usize = MAX_BYTES_PER_INTERVAL * 5; - self.data_budget.update(INTERVAL_MS, |bytes| { - std::cmp::min( - bytes.saturating_add(MAX_BYTES_PER_INTERVAL), - MAX_BYTES_BUDGET, - ) - }); - } - - fn forward( - &self, - forward_option: &ForwardOption, - packet_vec: Vec>, - addr: &SocketAddr, - ) -> Result<(), TransportError> { - match forward_option { - ForwardOption::ForwardTpuVote => { - // The vote must be forwarded using only UDP. - let pkts: Vec<_> = packet_vec.into_iter().zip(repeat(*addr)).collect(); - batch_send(&self.socket, &pkts).map_err(|err| err.into()) - } - ForwardOption::ForwardTransaction => { - let conn = self.connection_cache.get_connection(addr); - conn.send_data_batch_async(packet_vec) - } - ForwardOption::NotForward => panic!("should not forward"), - } - } -} - -#[cfg(test)] -mod tests { - use { - super::*, - crate::banking_stage::{ - tests::{create_slow_genesis_config_with_leader, new_test_cluster_info}, - unprocessed_packet_batches::{DeserializedPacket, UnprocessedPacketBatches}, - unprocessed_transaction_storage::ThreadType, - }, - solana_client::rpc_client::SerializableTransaction, - solana_gossip::cluster_info::{ClusterInfo, Node}, - solana_ledger::{blockstore::Blockstore, genesis_utils::GenesisConfigInfo}, - solana_perf::packet::PacketFlags, - solana_poh::{poh_recorder::create_test_recorder, poh_service::PohService}, - solana_runtime::bank::Bank, - solana_sdk::{ - hash::Hash, poh_config::PohConfig, signature::Keypair, signer::Signer, - system_transaction, transaction::VersionedTransaction, - }, - solana_streamer::{ - nonblocking::testing_utilities::{ - setup_quic_server_with_sockets, SpawnTestServerResult, TestServerConfig, - }, - quic::rt, - }, - std::{ - sync::atomic::AtomicBool, - time::{Duration, Instant}, - }, - tempfile::TempDir, - tokio::time::sleep, - }; - - struct TestSetup { - _ledger_dir: TempDir, - blockhash: Hash, - rent_min_balance: u64, - - bank_forks: Arc>, - poh_recorder: Arc>, - exit: Arc, - poh_service: PohService, - cluster_info: Arc, - local_node: Node, - } - - fn setup() -> TestSetup { - let validator_keypair = Arc::new(Keypair::new()); - let genesis_config_info = - create_slow_genesis_config_with_leader(10_000, &validator_keypair.pubkey()); - let GenesisConfigInfo { genesis_config, .. } = &genesis_config_info; - - let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(genesis_config); - - let ledger_path = TempDir::new().unwrap(); - let blockstore = Arc::new( - Blockstore::open(ledger_path.as_ref()) - .expect("Expected to be able to open database ledger"), - ); - let poh_config = PohConfig { - // limit tick count to avoid clearing working_bank at - // PohRecord then PohRecorderError(MaxHeightReached) at BankingStage - target_tick_count: Some(bank.max_tick_height() - 1), - ..PohConfig::default() - }; - - let (exit, poh_recorder, poh_service, _entry_receiver) = - create_test_recorder(bank, blockstore, Some(poh_config), None); - - let (local_node, cluster_info) = new_test_cluster_info(Some(validator_keypair)); - let cluster_info = Arc::new(cluster_info); - - TestSetup { - _ledger_dir: ledger_path, - blockhash: genesis_config.hash(), - rent_min_balance: genesis_config.rent.minimum_balance(0), - - bank_forks, - poh_recorder, - exit, - poh_service, - cluster_info, - local_node, - } - } - - async fn check_all_received( - socket: UdpSocket, - expected_num_packets: usize, - expected_packet_size: usize, - expected_blockhash: &Hash, - ) { - let SpawnTestServerResult { - join_handle, - exit, - receiver, - server_address: _, - stats: _, - } = setup_quic_server_with_sockets(vec![socket], None, TestServerConfig::default()); - - let now = Instant::now(); - let mut total_packets = 0; - while now.elapsed().as_secs() < 5 { - if let Ok(packets) = receiver.try_recv() { - total_packets += packets.len(); - for packet in packets.iter() { - assert_eq!(packet.meta().size, expected_packet_size); - let tx: VersionedTransaction = packet.deserialize_slice(..).unwrap(); - assert_eq!( - tx.get_recent_blockhash(), - expected_blockhash, - "Unexpected blockhash, tx: {tx:?}, expected blockhash: {expected_blockhash}." - ); - } - } else { - sleep(Duration::from_millis(100)).await; - } - if total_packets >= expected_num_packets { - break; - } - } - assert_eq!(total_packets, expected_num_packets); - - exit.store(true, Ordering::Relaxed); - join_handle.await.unwrap(); - } - - #[test] - fn test_forwarder_budget() { - let TestSetup { - blockhash, - rent_min_balance, - bank_forks, - poh_recorder, - exit, - poh_service, - cluster_info, - local_node, - .. - } = setup(); - - // Create `PacketBatch` with 1 unprocessed packet - let tx = system_transaction::transfer( - &Keypair::new(), - &solana_sdk::pubkey::new_rand(), - rent_min_balance, - blockhash, - ); - let mut packet = Packet::from_data(None, tx).unwrap(); - // unstaked transactions will not be forwarded - packet.meta_mut().set_from_staked_node(true); - let expected_packet_size = packet.meta().size; - let deserialized_packet = DeserializedPacket::new(packet).unwrap(); - - let test_cases = vec![ - ("budget-restricted", DataBudget::restricted(), 0), - ("budget-available", DataBudget::default(), 1), - ]; - let runtime = rt("solQuicTestRt".to_string()); - for (_name, data_budget, expected_num_forwarded) in test_cases { - let mut forwarder = Forwarder::new( - poh_recorder.clone(), - bank_forks.clone(), - cluster_info.clone(), - Arc::new(ConnectionCache::new("connection_cache_test")), - Arc::new(data_budget), - ); - let unprocessed_packet_batches: UnprocessedPacketBatches = - UnprocessedPacketBatches::from_iter( - vec![deserialized_packet.clone()].into_iter(), - 1, - ); - let stats = BankingStageStats::default(); - forwarder.handle_forwarding( - &mut UnprocessedTransactionStorage::new_transaction_storage( - unprocessed_packet_batches, - ThreadType::Transactions, - ), - true, - &mut LeaderSlotMetricsTracker::new(0), - &stats, - &mut TracerPacketStats::new(0), - ); - - let recv_socket = &local_node.sockets.tpu_forwards_quic[0]; - runtime.block_on(check_all_received( - (*recv_socket).try_clone().unwrap(), - expected_num_forwarded, - expected_packet_size, - &blockhash, - )); - } - - exit.store(true, Ordering::Relaxed); - poh_service.join().unwrap(); - } - - #[test] - fn test_handle_forwarding() { - let TestSetup { - blockhash, - rent_min_balance, - bank_forks, - poh_recorder, - exit, - poh_service, - cluster_info, - local_node, - .. - } = setup(); - - let keypair = Keypair::new(); - let pubkey = solana_sdk::pubkey::new_rand(); - - // forwarded packets will not be forwarded again - let forwarded_packet = { - let transaction = - system_transaction::transfer(&keypair, &pubkey, rent_min_balance, blockhash); - let mut packet = Packet::from_data(None, transaction).unwrap(); - packet.meta_mut().flags |= PacketFlags::FORWARDED; - DeserializedPacket::new(packet).unwrap() - }; - // packets from unstaked nodes will not be forwarded - let unstaked_packet = { - let transaction = - system_transaction::transfer(&keypair, &pubkey, rent_min_balance, blockhash); - let packet = Packet::from_data(None, transaction).unwrap(); - DeserializedPacket::new(packet).unwrap() - }; - // packets with incorrect blockhash will be filtered out - let incorrect_blockhash_packet = { - let transaction = - system_transaction::transfer(&keypair, &pubkey, rent_min_balance, Hash::default()); - let packet = Packet::from_data(None, transaction).unwrap(); - DeserializedPacket::new(packet).unwrap() - }; - - // maybe also add packet without stake and packet with incorrect blockhash? - let (expected_packet_size, normal_packet) = { - let transaction = system_transaction::transfer(&keypair, &pubkey, 1, blockhash); - let mut packet = Packet::from_data(None, transaction).unwrap(); - packet.meta_mut().set_from_staked_node(true); - (packet.meta().size, DeserializedPacket::new(packet).unwrap()) - }; - - let mut unprocessed_packet_batches = UnprocessedTransactionStorage::new_transaction_storage( - UnprocessedPacketBatches::from_iter( - vec![ - forwarded_packet, - unstaked_packet, - incorrect_blockhash_packet, - normal_packet, - ], - 4, - ), - ThreadType::Transactions, - ); - let connection_cache = ConnectionCache::new("connection_cache_test"); - - let test_cases = vec![ - ("fwd-normal", true, 2, 1), - ("fwd-no-op", true, 2, 0), - ("fwd-no-hold", false, 0, 0), - ]; - - let mut forwarder = Forwarder::new( - poh_recorder, - bank_forks, - cluster_info, - Arc::new(connection_cache), - Arc::new(DataBudget::default()), - ); - let runtime = rt("solQuicTestRt".to_string()); - for (name, hold, expected_num_unprocessed, expected_num_processed) in test_cases { - let stats = BankingStageStats::default(); - forwarder.handle_forwarding( - &mut unprocessed_packet_batches, - hold, - &mut LeaderSlotMetricsTracker::new(0), - &stats, - &mut TracerPacketStats::new(0), - ); - - let recv_socket = &local_node.sockets.tpu_forwards_quic[0]; - - runtime.block_on(check_all_received( - (*recv_socket).try_clone().unwrap(), - expected_num_processed, - expected_packet_size, - &blockhash, - )); - - let num_unprocessed_packets: usize = unprocessed_packet_batches.len(); - assert_eq!(num_unprocessed_packets, expected_num_unprocessed, "{name}"); - } - - exit.store(true, Ordering::Relaxed); - poh_service.join().unwrap(); - } -} diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index ae13c37caa5a0e..63b2fbf23dc65f 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -1,8 +1,5 @@ use { - super::{ - forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, - immutable_deserialized_packet::{DeserializedPacketError, ImmutableDeserializedPacket}, - }, + super::immutable_deserialized_packet::{DeserializedPacketError, ImmutableDeserializedPacket}, itertools::Itertools, rand::{thread_rng, Rng}, solana_perf::packet::Packet, @@ -399,57 +396,6 @@ impl LatestUnprocessedVotes { ); } - /// Returns how many packets were forwardable - /// Performs a weighted random order based on stake and stops forwarding at the first error - /// Votes from validators with 0 stakes are ignored - pub fn get_and_insert_forwardable_packets( - &self, - bank: Arc, - forward_packet_batches_by_accounts: &mut ForwardPacketBatchesByAccounts, - ) -> usize { - let pubkeys_by_stake = self.weighted_random_order_by_stake(); - let mut forwarded_count: usize = 0; - for pubkey in pubkeys_by_stake { - let Some(vote) = self.get_entry(pubkey) else { - continue; - }; - - let mut vote = vote.write().unwrap(); - if vote.is_vote_taken() || vote.is_forwarded() { - continue; - } - - let deserialized_vote_packet = vote.vote.as_ref().unwrap().clone(); - let Some((sanitized_vote_transaction, _deactivation_slot)) = deserialized_vote_packet - .build_sanitized_transaction( - bank.vote_only_bank(), - bank.as_ref(), - bank.get_reserved_account_keys(), - ) - else { - continue; - }; - - let forwarding_successful = forward_packet_batches_by_accounts.try_add_packet( - &sanitized_vote_transaction, - deserialized_vote_packet, - &bank.feature_set, - ); - - if !forwarding_successful { - // To match behavior of regular transactions we stop forwarding votes as soon as one - // fails. We are assuming that failure (try_add_packet) means no more space - // available. - break; - } - - vote.forwarded = true; - forwarded_count += 1; - } - - forwarded_count - } - /// Drains all votes yet to be processed sorted by a weighted random ordering by stake /// Do not touch votes that are for a different fork from `bank` as we know they will fail, /// however the next bank could be built on a different fork and consume these votes. @@ -495,22 +441,6 @@ impl LatestUnprocessedVotes { .unwrap_or(false) } - /// Sometimes we forward and hold the packets, sometimes we forward and clear. - /// This also clears all gossip votes since by definition they have been forwarded - pub fn clear_forwarded_packets(&self) { - self.latest_vote_per_vote_pubkey - .read() - .unwrap() - .values() - .filter(|lock| lock.read().unwrap().is_forwarded()) - .for_each(|lock| { - let mut vote = lock.write().unwrap(); - if vote.is_forwarded() && vote.take_vote().is_some() { - self.num_unprocessed_votes.fetch_sub(1, Ordering::Relaxed); - } - }); - } - pub(super) fn should_deprecate_legacy_vote_ixs(&self) -> bool { self.deprecate_legacy_vote_ixs.load(Ordering::Relaxed) } @@ -525,7 +455,6 @@ mod tests { solana_perf::packet::{Packet, PacketBatch, PacketFlags}, solana_runtime::{ bank::Bank, - epoch_stakes::EpochStakes, genesis_utils::{self, ValidatorVoteKeypairs}, }, solana_sdk::{ @@ -966,171 +895,6 @@ mod tests { tpu.join().unwrap(); } - #[test] - fn test_forwardable_packets() { - let latest_unprocessed_votes = LatestUnprocessedVotes::new_for_tests(&[]); - let bank_0 = Bank::new_for_tests(&GenesisConfig::default()); - let mut bank = Bank::new_from_parent( - Arc::new(bank_0), - &Pubkey::new_unique(), - MINIMUM_SLOTS_PER_EPOCH, - ); - assert_eq!(bank.epoch(), 1); - bank.set_epoch_stakes_for_test( - bank.epoch().saturating_add(2), - EpochStakes::new_for_tests(HashMap::new(), bank.epoch().saturating_add(2)), - ); - let bank = Arc::new(bank); - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); - - let keypair_a = ValidatorVoteKeypairs::new_rand(); - let keypair_b = ValidatorVoteKeypairs::new_rand(); - - let vote_a = from_slots(vec![(1, 1)], VoteSource::Gossip, &keypair_a, None); - let vote_b = from_slots(vec![(2, 1)], VoteSource::Tpu, &keypair_b, None); - latest_unprocessed_votes - .update_latest_vote(vote_a.clone(), false /* should replenish */); - latest_unprocessed_votes - .update_latest_vote(vote_b.clone(), false /* should replenish */); - - // Recache on epoch boundary and don't forward 0 stake accounts - latest_unprocessed_votes.cache_epoch_boundary_info(&bank); - let forwarded = latest_unprocessed_votes - .get_and_insert_forwardable_packets(bank, &mut forward_packet_batches_by_accounts); - assert_eq!(0, forwarded); - assert_eq!( - 0, - forward_packet_batches_by_accounts - .iter_batches() - .filter(|&batch| !batch.is_empty()) - .count() - ); - - let config = - genesis_utils::create_genesis_config_with_vote_accounts(100, &[keypair_a], vec![200]) - .genesis_config; - let bank_0 = Bank::new_for_tests(&config); - let bank = Bank::new_from_parent( - Arc::new(bank_0), - &Pubkey::new_unique(), - 2 * MINIMUM_SLOTS_PER_EPOCH, - ); - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); - - // Don't forward votes from gossip - latest_unprocessed_votes.cache_epoch_boundary_info(&bank); - latest_unprocessed_votes - .update_latest_vote(vote_a.clone(), false /* should replenish */); - latest_unprocessed_votes - .update_latest_vote(vote_b.clone(), false /* should replenish */); - let forwarded = latest_unprocessed_votes.get_and_insert_forwardable_packets( - Arc::new(bank), - &mut forward_packet_batches_by_accounts, - ); - - assert_eq!(0, forwarded); - assert_eq!( - 0, - forward_packet_batches_by_accounts - .iter_batches() - .filter(|&batch| !batch.is_empty()) - .count() - ); - - let config = - genesis_utils::create_genesis_config_with_vote_accounts(100, &[keypair_b], vec![200]) - .genesis_config; - let bank_0 = Bank::new_for_tests(&config); - let bank = Arc::new(Bank::new_from_parent( - Arc::new(bank_0), - &Pubkey::new_unique(), - 3 * MINIMUM_SLOTS_PER_EPOCH, - )); - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); - - // Forward from TPU - latest_unprocessed_votes.cache_epoch_boundary_info(&bank); - latest_unprocessed_votes.update_latest_vote(vote_a, false /* should replenish */); - latest_unprocessed_votes.update_latest_vote(vote_b, false /* should replenish */); - let forwarded = latest_unprocessed_votes.get_and_insert_forwardable_packets( - bank.clone(), - &mut forward_packet_batches_by_accounts, - ); - - assert_eq!(1, forwarded); - assert_eq!( - 1, - forward_packet_batches_by_accounts - .iter_batches() - .filter(|&batch| !batch.is_empty()) - .count() - ); - - // Don't forward again - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); - let forwarded = latest_unprocessed_votes - .get_and_insert_forwardable_packets(bank, &mut forward_packet_batches_by_accounts); - - assert_eq!(0, forwarded); - assert_eq!( - 0, - forward_packet_batches_by_accounts - .iter_batches() - .filter(|&batch| !batch.is_empty()) - .count() - ); - } - - #[test] - fn test_clear_forwarded_packets() { - let keypair_a = ValidatorVoteKeypairs::new_rand(); - let keypair_b = ValidatorVoteKeypairs::new_rand(); - let keypair_c = ValidatorVoteKeypairs::new_rand(); - let keypair_d = ValidatorVoteKeypairs::new_rand(); - let latest_unprocessed_votes = LatestUnprocessedVotes::new_for_tests(&[ - keypair_a.vote_keypair.pubkey(), - keypair_b.vote_keypair.pubkey(), - keypair_c.vote_keypair.pubkey(), - keypair_d.vote_keypair.pubkey(), - ]); - - let vote_a = from_slots(vec![(1, 1)], VoteSource::Gossip, &keypair_a, None); - let mut vote_b = from_slots(vec![(2, 1)], VoteSource::Tpu, &keypair_b, None); - vote_b.forwarded = true; - let vote_c = from_slots(vec![(3, 1)], VoteSource::Tpu, &keypair_c, None); - let vote_d = from_slots(vec![(4, 1)], VoteSource::Gossip, &keypair_d, None); - - latest_unprocessed_votes.update_latest_vote(vote_a, false /* should replenish */); - latest_unprocessed_votes.update_latest_vote(vote_b, false /* should replenish */); - latest_unprocessed_votes.update_latest_vote(vote_c, false /* should replenish */); - latest_unprocessed_votes.update_latest_vote(vote_d, false /* should replenish */); - assert_eq!(4, latest_unprocessed_votes.len()); - - latest_unprocessed_votes.clear_forwarded_packets(); - assert_eq!(1, latest_unprocessed_votes.len()); - - assert_eq!( - Some(1), - latest_unprocessed_votes.get_latest_vote_slot(keypair_a.vote_keypair.pubkey()) - ); - assert_eq!( - Some(2), - latest_unprocessed_votes.get_latest_vote_slot(keypair_b.vote_keypair.pubkey()) - ); - assert_eq!( - Some(3), - latest_unprocessed_votes.get_latest_vote_slot(keypair_c.vote_keypair.pubkey()) - ); - assert_eq!( - Some(4), - latest_unprocessed_votes.get_latest_vote_slot(keypair_d.vote_keypair.pubkey()) - ); - } - #[test] fn test_insert_batch_unstaked() { let keypair_a = ValidatorVoteKeypairs::new_rand(); diff --git a/core/src/banking_stage/leader_slot_metrics.rs b/core/src/banking_stage/leader_slot_metrics.rs index ce232a370b2a5f..bc796b6689b912 100644 --- a/core/src/banking_stage/leader_slot_metrics.rs +++ b/core/src/banking_stage/leader_slot_metrics.rs @@ -221,23 +221,6 @@ struct LeaderSlotPacketCountMetrics { // already counted in `self.retrayble_errored_transaction_count`. cost_model_throttled_transactions_count: u64, - // total number of forwardsable packets that failed forwarding - failed_forwarded_packets_count: u64, - - // total number of forwardsable packets that were successfully forwarded - successful_forwarded_packets_count: u64, - - // total number of attempted forwards that failed. Note this is not a count of the number of packets - // that failed, just the total number of batches of packets that failed forwarding - packet_batch_forward_failure_count: u64, - - // total number of valid unprocessed packets in the buffer that were removed after being forwarded - cleared_from_buffer_after_forward_count: u64, - - // total number of forwardable batches that were attempted for forwarding. A forwardable batch - // is defined in `ForwardPacketBatchesByAccounts` in `forward_packet_batches_by_accounts.rs` - forwardable_batches_count: u64, - // min prioritization fees for scheduled transactions min_prioritization_fees: u64, // max prioritization fees for scheduled transactions @@ -354,31 +337,6 @@ impl LeaderSlotPacketCountMetrics { self.cost_model_throttled_transactions_count, i64 ), - ( - "failed_forwarded_packets_count", - self.failed_forwarded_packets_count, - i64 - ), - ( - "successful_forwarded_packets_count", - self.successful_forwarded_packets_count, - i64 - ), - ( - "packet_batch_forward_failure_count", - self.packet_batch_forward_failure_count, - i64 - ), - ( - "cleared_from_buffer_after_forward_count", - self.cleared_from_buffer_after_forward_count, - i64 - ), - ( - "forwardable_batches_count", - self.forwardable_batches_count, - i64 - ), ( "end_of_slot_unprocessed_buffer_len", self.end_of_slot_unprocessed_buffer_len, @@ -886,61 +844,6 @@ impl LeaderSlotMetricsTracker { } } - pub(crate) fn increment_failed_forwarded_packets_count(&mut self, count: u64) { - if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .packet_count_metrics - .failed_forwarded_packets_count, - count - ); - } - } - - pub(crate) fn increment_successful_forwarded_packets_count(&mut self, count: u64) { - if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .packet_count_metrics - .successful_forwarded_packets_count, - count - ); - } - } - - pub(crate) fn increment_packet_batch_forward_failure_count(&mut self, count: u64) { - if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .packet_count_metrics - .packet_batch_forward_failure_count, - count - ); - } - } - - pub(crate) fn increment_cleared_from_buffer_after_forward_count(&mut self, count: u64) { - if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .packet_count_metrics - .cleared_from_buffer_after_forward_count, - count - ); - } - } - - pub(crate) fn increment_forwardable_batches_count(&mut self, count: u64) { - if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .packet_count_metrics - .forwardable_batches_count, - count - ); - } - } - pub(crate) fn increment_retryable_packets_count(&mut self, count: u64) { if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { saturating_add_assign!( @@ -1017,30 +920,6 @@ impl LeaderSlotMetricsTracker { } } - pub(crate) fn increment_forward_us(&mut self, us: u64) { - if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .timing_metrics - .process_buffered_packets_timings - .forward_us, - us - ); - } - } - - pub(crate) fn increment_forward_and_hold_us(&mut self, us: u64) { - if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .timing_metrics - .process_buffered_packets_timings - .forward_and_hold_us, - us - ); - } - } - pub(crate) fn increment_process_packets_transactions_us(&mut self, us: u64) { if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { saturating_add_assign!( diff --git a/core/src/banking_stage/leader_slot_timing_metrics.rs b/core/src/banking_stage/leader_slot_timing_metrics.rs index 31b3dc0a24e7ca..97851fddcb92df 100644 --- a/core/src/banking_stage/leader_slot_timing_metrics.rs +++ b/core/src/banking_stage/leader_slot_timing_metrics.rs @@ -184,8 +184,6 @@ impl OuterLoopTimings { pub(crate) struct ProcessBufferedPacketsTimings { pub make_decision_us: u64, pub consume_buffered_packets_us: u64, - pub forward_us: u64, - pub forward_and_hold_us: u64, } impl ProcessBufferedPacketsTimings { fn report(&self, id: &str, slot: Slot) { @@ -199,8 +197,6 @@ impl ProcessBufferedPacketsTimings { self.consume_buffered_packets_us as i64, i64 ), - ("forward_us", self.forward_us as i64, i64), - ("forward_and_hold_us", self.forward_and_hold_us as i64, i64), ); } } diff --git a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs index 9f6fcc8388a364..2ce23faac8550d 100644 --- a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs +++ b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs @@ -591,18 +591,15 @@ fn try_schedule_transaction( mod tests { use { super::*, - crate::banking_stage::{ - consumer::TARGET_NUM_TRANSACTIONS_PER_BATCH, - immutable_deserialized_packet::ImmutableDeserializedPacket, - }, + crate::banking_stage::consumer::TARGET_NUM_TRANSACTIONS_PER_BATCH, crossbeam_channel::{unbounded, Receiver}, itertools::Itertools, solana_sdk::{ clock::Slot, compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, - packet::Packet, pubkey::Pubkey, signature::Keypair, signer::Signer, system_instruction, + pubkey::Pubkey, signature::Keypair, signer::Signer, system_instruction, transaction::Transaction, }, - std::{borrow::Borrow, sync::Arc}, + std::borrow::Borrow, }; macro_rules! txid { @@ -677,12 +674,6 @@ mod tests { lamports, compute_unit_price, ); - let packet = Arc::new( - ImmutableDeserializedPacket::new( - Packet::from_data(None, transaction.to_versioned_transaction()).unwrap(), - ) - .unwrap(), - ); let transaction_ttl = SanitizedTransactionTTL { transaction, max_age: MaxAge { @@ -694,7 +685,6 @@ mod tests { container.insert_new_transaction( id, transaction_ttl, - packet, compute_unit_price, TEST_TRANSACTION_COST, ); diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index ddec4ec90711c8..d632f3043a971b 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -16,11 +16,10 @@ use { consume_worker::ConsumeWorkerMetrics, consumer::Consumer, decision_maker::{BufferedPacketsDecision, DecisionMaker}, - forwarder::Forwarder, immutable_deserialized_packet::ImmutableDeserializedPacket, packet_deserializer::PacketDeserializer, scheduler_messages::MaxAge, - ForwardOption, LikeClusterInfo, TOTAL_BUFFERED_PACKETS, + TOTAL_BUFFERED_PACKETS, }, arrayvec::ArrayVec, crossbeam_channel::RecvTimeoutError, @@ -32,7 +31,7 @@ use { solana_sdk::{ self, address_lookup_table::state::estimate_last_valid_slot, - clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, MAX_PROCESSING_AGE}, + clock::{Slot, MAX_PROCESSING_AGE}, fee::FeeBudgetLimits, saturating_add_assign, transaction::SanitizedTransaction, @@ -41,12 +40,12 @@ use { solana_svm_transaction::svm_message::SVMMessage, std::{ sync::{Arc, RwLock}, - time::{Duration, Instant}, + time::Duration, }, }; /// Controls packet and transaction flow into scheduler, and scheduling execution. -pub(crate) struct SchedulerController { +pub(crate) struct SchedulerController { /// Decision maker for determining what should be done with transactions. decision_maker: DecisionMaker, /// Packet/Transaction ingress. @@ -69,18 +68,15 @@ pub(crate) struct SchedulerController { timing_metrics: SchedulerTimingMetrics, /// Metric report handles for the worker threads. worker_metrics: Vec>, - /// State for forwarding packets to the leader, if enabled. - forwarder: Option>, } -impl SchedulerController { +impl SchedulerController { pub fn new( decision_maker: DecisionMaker, packet_deserializer: PacketDeserializer, bank_forks: Arc>, scheduler: PrioGraphScheduler, worker_metrics: Vec>, - forwarder: Option>, ) -> Self { Self { decision_maker, @@ -93,7 +89,6 @@ impl SchedulerController { count_metrics: SchedulerCountMetrics::default(), timing_metrics: SchedulerTimingMetrics::default(), worker_metrics, - forwarder, } } @@ -151,7 +146,6 @@ impl SchedulerController { &mut self, decision: &BufferedPacketsDecision, ) -> Result<(), SchedulerError> { - let forwarding_enabled = self.forwarder.is_some(); match decision { BufferedPacketsDecision::Consume(bank_start) => { let (scheduling_summary, schedule_time_us) = measure_us!(self.scheduler.schedule( @@ -191,30 +185,16 @@ impl SchedulerController { }); } BufferedPacketsDecision::Forward => { - if forwarding_enabled { - let (_, forward_time_us) = measure_us!(self.forward_packets(false)); - self.timing_metrics.update(|timing_metrics| { - saturating_add_assign!(timing_metrics.forward_time_us, forward_time_us); - }); - } else { - let (_, clear_time_us) = measure_us!(self.clear_container()); - self.timing_metrics.update(|timing_metrics| { - saturating_add_assign!(timing_metrics.clear_time_us, clear_time_us); - }); - } + let (_, clear_time_us) = measure_us!(self.clear_container()); + self.timing_metrics.update(|timing_metrics| { + saturating_add_assign!(timing_metrics.clear_time_us, clear_time_us); + }); } BufferedPacketsDecision::ForwardAndHold => { - if forwarding_enabled { - let (_, forward_time_us) = measure_us!(self.forward_packets(true)); - self.timing_metrics.update(|timing_metrics| { - saturating_add_assign!(timing_metrics.forward_time_us, forward_time_us); - }); - } else { - let (_, clean_time_us) = measure_us!(self.clean_queue()); - self.timing_metrics.update(|timing_metrics| { - saturating_add_assign!(timing_metrics.clean_time_us, clean_time_us); - }); - } + let (_, clean_time_us) = measure_us!(self.clean_queue()); + self.timing_metrics.update(|timing_metrics| { + saturating_add_assign!(timing_metrics.clean_time_us, clean_time_us); + }); } BufferedPacketsDecision::Hold => {} } @@ -247,106 +227,6 @@ impl SchedulerController { } } - /// Forward packets to the next leader. - fn forward_packets(&mut self, hold: bool) { - const MAX_FORWARDING_DURATION: Duration = Duration::from_millis(100); - let start = Instant::now(); - let bank = self.bank_forks.read().unwrap().working_bank(); - let feature_set = &bank.feature_set; - let forwarder = self.forwarder.as_mut().expect("forwarder must exist"); - - // Pop from the container in chunks, filter using bank checks, then attempt to forward. - // This doubles as a way to clean the queue as well as forwarding transactions. - const CHUNK_SIZE: usize = 64; - let mut num_forwarded: usize = 0; - let mut ids_to_add_back = Vec::new(); - let mut max_time_reached = false; - while !self.container.is_empty() { - let mut filter_array = [true; CHUNK_SIZE]; - let mut ids = Vec::with_capacity(CHUNK_SIZE); - let mut txs = Vec::with_capacity(CHUNK_SIZE); - - for _ in 0..CHUNK_SIZE { - if let Some(id) = self.container.pop() { - ids.push(id); - } else { - break; - } - } - let chunk_size = ids.len(); - ids.iter().for_each(|id| { - let transaction = self.container.get_transaction_ttl(&id.id).unwrap(); - txs.push(&transaction.transaction); - }); - - // use same filter we use for processing transactions: - // age, already processed, fee-check. - Self::pre_graph_filter( - &txs, - &mut filter_array, - &bank, - MAX_PROCESSING_AGE - .saturating_sub(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET as usize), - ); - - for (id, filter_result) in ids.iter().zip(&filter_array[..chunk_size]) { - if !*filter_result { - self.container.remove_by_id(&id.id); - continue; - } - - ids_to_add_back.push(*id); // add back to the queue at end - let state = self.container.get_mut_transaction_state(&id.id).unwrap(); - let sanitized_transaction = &state.transaction_ttl().transaction; - let immutable_packet = state.packet().clone(); - - // If not already forwarded and can be forwarded, add to forwardable packets. - if state.should_forward() - && forwarder.try_add_packet( - sanitized_transaction, - immutable_packet, - feature_set, - ) - { - saturating_add_assign!(num_forwarded, 1); - state.mark_forwarded(); - } - } - - if start.elapsed() >= MAX_FORWARDING_DURATION { - max_time_reached = true; - break; - } - } - - // Forward each batch of transactions - forwarder.forward_batched_packets(&ForwardOption::ForwardTransaction); - forwarder.clear_batches(); - - // If we hit the time limit. Drop everything that was not checked/processed. - // If we cannot run these simple checks in time, then we cannot run them during - // leader slot. - if max_time_reached { - while let Some(id) = self.container.pop() { - self.container.remove_by_id(&id.id); - } - } - - if hold { - for priority_id in ids_to_add_back { - self.container.push_id_into_queue(priority_id); - } - } else { - for priority_id in ids_to_add_back { - self.container.remove_by_id(&priority_id.id); - } - } - - self.count_metrics.update(|count_metrics| { - saturating_add_assign!(count_metrics.num_forwarded, num_forwarded); - }); - } - /// Clears the transaction state container. /// This only clears pending transactions, and does **not** clear in-flight transactions. fn clear_container(&mut self) { @@ -450,7 +330,7 @@ impl SchedulerController { }, true, ), - BufferedPacketsDecision::Forward => (MAX_PACKET_RECEIVE_TIME, self.forwarder.is_some()), + BufferedPacketsDecision::Forward => (MAX_PACKET_RECEIVE_TIME, false), BufferedPacketsDecision::ForwardAndHold | BufferedPacketsDecision::Hold => { (MAX_PACKET_RECEIVE_TIME, true) } @@ -518,7 +398,6 @@ impl SchedulerController { const CHUNK_SIZE: usize = 128; let lock_results: [_; CHUNK_SIZE] = core::array::from_fn(|_| Ok(())); - let mut arc_packets = ArrayVec::<_, CHUNK_SIZE>::new(); let mut transactions = ArrayVec::<_, CHUNK_SIZE>::new(); let mut max_ages = ArrayVec::<_, CHUNK_SIZE>::new(); let mut fee_budget_limits_vec = ArrayVec::<_, CHUNK_SIZE>::new(); @@ -552,8 +431,7 @@ impl SchedulerController { }) .ok() }) - .for_each(|(packet, tx, deactivation_slot, fee_budget_limits)| { - arc_packets.push(packet); + .for_each(|(_packet, tx, deactivation_slot, fee_budget_limits)| { transactions.push(tx); max_ages.push(calculate_max_age( last_slot_in_epoch, @@ -563,12 +441,7 @@ impl SchedulerController { fee_budget_limits_vec.push(fee_budget_limits); }); - let check_results: Vec< - Result< - solana_svm::account_loader::CheckedTransactionDetails, - solana_sdk::transaction::TransactionError, - >, - > = working_bank.check_transactions( + let check_results = working_bank.check_transactions( &transactions, &lock_results[..transactions.len()], MAX_PROCESSING_AGE, @@ -579,14 +452,12 @@ impl SchedulerController { let mut post_transaction_check_count: usize = 0; let mut num_dropped_on_capacity: usize = 0; let mut num_buffered: usize = 0; - for ((((packet, transaction), max_age), fee_budget_limits), _check_result) in - arc_packets - .drain(..) - .zip(transactions.drain(..)) - .zip(max_ages.drain(..)) - .zip(fee_budget_limits_vec.drain(..)) - .zip(check_results) - .filter(|(_, check_result)| check_result.is_ok()) + for (((transaction, max_age), fee_budget_limits), _check_result) in transactions + .drain(..) + .zip(max_ages.drain(..)) + .zip(fee_budget_limits_vec.drain(..)) + .zip(check_results) + .filter(|(_, check_result)| check_result.is_ok()) { saturating_add_assign!(post_transaction_check_count, 1); let transaction_id = self.transaction_id_generator.next(); @@ -604,7 +475,6 @@ impl SchedulerController { if self.container.insert_new_transaction( transaction_id, transaction_ttl, - packet, priority, cost, ) { @@ -726,7 +596,6 @@ mod tests { }, crossbeam_channel::{unbounded, Receiver, Sender}, itertools::Itertools, - solana_gossip::cluster_info::ClusterInfo, solana_ledger::{ blockstore::Blockstore, genesis_utils::GenesisConfigInfo, get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, @@ -762,7 +631,7 @@ mod tests { finished_consume_work_sender: Sender, } - fn create_test_frame(num_threads: usize) -> (TestFrame, SchedulerController>) { + fn create_test_frame(num_threads: usize) -> (TestFrame, SchedulerController) { let GenesisConfigInfo { mut genesis_config, mint_keypair, @@ -812,7 +681,6 @@ mod tests { bank_forks, PrioGraphScheduler::new(consume_work_senders, finished_consume_work_receiver), vec![], // no actual workers with metrics to report, this can be empty - None, ); (test_frame, scheduler_controller) @@ -856,9 +724,7 @@ mod tests { // in order to keep the decision as recent as possible for processing. // In the tests, the decision will not become stale, so it is more convenient // to receive first and then schedule. - fn test_receive_then_schedule( - scheduler_controller: &mut SchedulerController>, - ) { + fn test_receive_then_schedule(scheduler_controller: &mut SchedulerController) { let decision = scheduler_controller .decision_maker .make_consume_or_forward_decision(); diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs index bb8cbbe617396a..1f1650248740fe 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs @@ -59,8 +59,6 @@ pub struct SchedulerCountMetricsInner { pub num_finished: usize, /// Number of transactions that were retryable. pub num_retryable: usize, - /// Number of transactions that were scheduled to be forwarded. - pub num_forwarded: usize, /// Number of transactions that were immediately dropped on receive. pub num_dropped_on_receive: usize, @@ -124,7 +122,6 @@ impl SchedulerCountMetricsInner { ), ("num_finished", self.num_finished, i64), ("num_retryable", self.num_retryable, i64), - ("num_forwarded", self.num_forwarded, i64), ("num_dropped_on_receive", self.num_dropped_on_receive, i64), ( "num_dropped_on_sanitization", @@ -165,7 +162,6 @@ impl SchedulerCountMetricsInner { || self.num_schedule_filtered_out != 0 || self.num_finished != 0 || self.num_retryable != 0 - || self.num_forwarded != 0 || self.num_dropped_on_receive != 0 || self.num_dropped_on_sanitization != 0 || self.num_dropped_on_validate_locks != 0 @@ -183,7 +179,6 @@ impl SchedulerCountMetricsInner { self.num_schedule_filtered_out = 0; self.num_finished = 0; self.num_retryable = 0; - self.num_forwarded = 0; self.num_dropped_on_receive = 0; self.num_dropped_on_sanitization = 0; self.num_dropped_on_validate_locks = 0; @@ -275,8 +270,6 @@ pub struct SchedulerTimingMetricsInner { pub clear_time_us: u64, /// Time spent cleaning expired or processed transactions from the container. pub clean_time_us: u64, - /// Time spent forwarding transactions. - pub forward_time_us: u64, /// Time spent receiving completed transactions. pub receive_completed_time_us: u64, } @@ -318,7 +311,6 @@ impl SchedulerTimingMetricsInner { ("schedule_time_us", self.schedule_time_us, i64), ("clear_time_us", self.clear_time_us, i64), ("clean_time_us", self.clean_time_us, i64), - ("forward_time_us", self.forward_time_us, i64), ( "receive_completed_time_us", self.receive_completed_time_us, @@ -339,7 +331,6 @@ impl SchedulerTimingMetricsInner { self.schedule_time_us = 0; self.clear_time_us = 0; self.clean_time_us = 0; - self.forward_time_us = 0; self.receive_completed_time_us = 0; } } diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state.rs b/core/src/banking_stage/transaction_scheduler/transaction_state.rs index efb59be1b8b5b5..6d7dfba85b7eb9 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state.rs @@ -1,9 +1,5 @@ use { - crate::banking_stage::{ - immutable_deserialized_packet::ImmutableDeserializedPacket, scheduler_messages::MaxAge, - }, - solana_sdk::transaction::SanitizedTransaction, - std::sync::Arc, + crate::banking_stage::scheduler_messages::MaxAge, solana_sdk::transaction::SanitizedTransaction, }; /// Simple wrapper type to tie a sanitized transaction to max age slot. @@ -36,38 +32,22 @@ pub(crate) enum TransactionState { /// The transaction is available for scheduling. Unprocessed { transaction_ttl: SanitizedTransactionTTL, - packet: Arc, priority: u64, cost: u64, - should_forward: bool, }, /// The transaction is currently scheduled or being processed. - Pending { - packet: Arc, - priority: u64, - cost: u64, - should_forward: bool, - }, + Pending { priority: u64, cost: u64 }, /// Only used during transition. Transitioning, } impl TransactionState { /// Creates a new `TransactionState` in the `Unprocessed` state. - pub(crate) fn new( - transaction_ttl: SanitizedTransactionTTL, - packet: Arc, - priority: u64, - cost: u64, - ) -> Self { - let should_forward = !packet.original_packet().meta().forwarded() - && packet.original_packet().meta().is_from_staked_node(); + pub(crate) fn new(transaction_ttl: SanitizedTransactionTTL, priority: u64, cost: u64) -> Self { Self::Unprocessed { transaction_ttl, - packet, priority, cost, - should_forward, } } @@ -91,40 +71,6 @@ impl TransactionState { } } - /// Return whether packet should be attempted to be forwarded. - pub(crate) fn should_forward(&self) -> bool { - match self { - Self::Unprocessed { - should_forward: forwarded, - .. - } => *forwarded, - Self::Pending { - should_forward: forwarded, - .. - } => *forwarded, - Self::Transitioning => unreachable!(), - } - } - - /// Mark the packet as forwarded. - /// This is used to prevent the packet from being forwarded multiple times. - pub(crate) fn mark_forwarded(&mut self) { - match self { - Self::Unprocessed { should_forward, .. } => *should_forward = false, - Self::Pending { should_forward, .. } => *should_forward = false, - Self::Transitioning => unreachable!(), - } - } - - /// Return the packet of the transaction. - pub(crate) fn packet(&self) -> &Arc { - match self { - Self::Unprocessed { packet, .. } => packet, - Self::Pending { packet, .. } => packet, - Self::Transitioning => unreachable!(), - } - } - /// Intended to be called when a transaction is scheduled. This method will /// transition the transaction from `Unprocessed` to `Pending` and return the /// `SanitizedTransactionTTL` for processing. @@ -136,17 +82,10 @@ impl TransactionState { match self.take() { TransactionState::Unprocessed { transaction_ttl, - packet, priority, cost, - should_forward: forwarded, } => { - *self = TransactionState::Pending { - packet, - priority, - cost, - should_forward: forwarded, - }; + *self = TransactionState::Pending { priority, cost }; transaction_ttl } TransactionState::Pending { .. } => { @@ -165,18 +104,11 @@ impl TransactionState { pub(crate) fn transition_to_unprocessed(&mut self, transaction_ttl: SanitizedTransactionTTL) { match self.take() { TransactionState::Unprocessed { .. } => panic!("already unprocessed"), - TransactionState::Pending { - packet, - priority, - cost, - should_forward: forwarded, - } => { + TransactionState::Pending { priority, cost } => { *self = Self::Unprocessed { transaction_ttl, - packet, priority, cost, - should_forward: forwarded, } } Self::Transitioning => unreachable!(), @@ -210,8 +142,7 @@ mod tests { super::*, solana_sdk::{ clock::Slot, compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, - packet::Packet, signature::Keypair, signer::Signer, system_instruction, - transaction::Transaction, + signature::Keypair, signer::Signer, system_instruction, transaction::Transaction, }, }; @@ -228,9 +159,6 @@ mod tests { let message = Message::new(&ixs, Some(&from_keypair.pubkey())); let tx = Transaction::new(&[&from_keypair], message, Hash::default()); - let packet = Arc::new( - ImmutableDeserializedPacket::new(Packet::from_data(None, tx.clone()).unwrap()).unwrap(), - ); let transaction_ttl = SanitizedTransactionTTL { transaction: SanitizedTransaction::from_transaction_for_tests(tx), max_age: MaxAge { @@ -239,12 +167,7 @@ mod tests { }, }; const TEST_TRANSACTION_COST: u64 = 5000; - TransactionState::new( - transaction_ttl, - packet, - compute_unit_price, - TEST_TRANSACTION_COST, - ) + TransactionState::new(transaction_ttl, compute_unit_price, TEST_TRANSACTION_COST) } #[test] diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs index 7d40c66ec1b673..e8ae2bc99d3d56 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs @@ -3,13 +3,10 @@ use { transaction_priority_id::TransactionPriorityId, transaction_state::{SanitizedTransactionTTL, TransactionState}, }, - crate::banking_stage::{ - immutable_deserialized_packet::ImmutableDeserializedPacket, - scheduler_messages::TransactionId, - }, + crate::banking_stage::scheduler_messages::TransactionId, itertools::MinMaxResult, min_max_heap::MinMaxHeap, - std::{collections::HashMap, sync::Arc}, + std::collections::HashMap, }; /// This structure will hold `TransactionState` for the entirety of a @@ -90,14 +87,13 @@ impl TransactionStateContainer { &mut self, transaction_id: TransactionId, transaction_ttl: SanitizedTransactionTTL, - packet: Arc, priority: u64, cost: u64, ) -> bool { let priority_id = TransactionPriorityId::new(priority, transaction_id); self.id_to_transaction_state.insert( transaction_id, - TransactionState::new(transaction_ttl, packet, priority, cost), + TransactionState::new(transaction_ttl, priority, cost), ); self.push_id_into_queue(priority_id) } @@ -158,7 +154,6 @@ mod tests { compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, - packet::Packet, signature::Keypair, signer::Signer, slot_history::Slot, @@ -168,14 +163,7 @@ mod tests { }; /// Returns (transaction_ttl, priority, cost) - fn test_transaction( - priority: u64, - ) -> ( - SanitizedTransactionTTL, - Arc, - u64, - u64, - ) { + fn test_transaction(priority: u64) -> (SanitizedTransactionTTL, u64, u64) { let from_keypair = Keypair::new(); let ixs = vec![ system_instruction::transfer( @@ -191,12 +179,6 @@ mod tests { message, Hash::default(), )); - let packet = Arc::new( - ImmutableDeserializedPacket::new( - Packet::from_data(None, tx.to_versioned_transaction()).unwrap(), - ) - .unwrap(), - ); let transaction_ttl = SanitizedTransactionTTL { transaction: tx, max_age: MaxAge { @@ -205,17 +187,16 @@ mod tests { }, }; const TEST_TRANSACTION_COST: u64 = 5000; - (transaction_ttl, packet, priority, TEST_TRANSACTION_COST) + (transaction_ttl, priority, TEST_TRANSACTION_COST) } fn push_to_container(container: &mut TransactionStateContainer, num: usize) { for id in 0..num as u64 { let priority = id; - let (transaction_ttl, packet, priority, cost) = test_transaction(priority); + let (transaction_ttl, priority, cost) = test_transaction(priority); container.insert_new_transaction( TransactionId::new(id), transaction_ttl, - packet, priority, cost, ); diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index 56e814acea9219..98babcb8b37825 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -1,7 +1,6 @@ use { super::{ consumer::Consumer, - forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, immutable_deserialized_packet::ImmutableDeserializedPacket, latest_unprocessed_votes::{ LatestUnprocessedVotes, LatestValidatorVotePacket, VoteBatchInsertionMetrics, @@ -13,18 +12,14 @@ use { unprocessed_packet_batches::{ DeserializedPacket, PacketBatchInsertionMetrics, UnprocessedPacketBatches, }, - BankingStageStats, FilterForwardingResults, ForwardOption, + BankingStageStats, }, itertools::Itertools, min_max_heap::MinMaxHeap, solana_accounts_db::account_locks::validate_account_locks, - solana_feature_set::FeatureSet, solana_measure::measure_us, solana_runtime::bank::Bank, - solana_sdk::{ - clock::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, hash::Hash, saturating_add_assign, - transaction::SanitizedTransaction, - }, + solana_sdk::{hash::Hash, transaction::SanitizedTransaction}, solana_svm::transaction_error_metrics::TransactionErrorMetrics, std::{ collections::HashMap, @@ -334,22 +329,6 @@ impl UnprocessedTransactionStorage { } } - pub fn forward_option(&self) -> ForwardOption { - match self { - Self::VoteStorage(vote_storage) => vote_storage.forward_option(), - Self::LocalTransactionStorage(transaction_storage) => { - transaction_storage.forward_option() - } - } - } - - pub fn clear_forwarded_packets(&mut self) { - match self { - Self::LocalTransactionStorage(transaction_storage) => transaction_storage.clear(), // Since we set everything as forwarded this is the same - Self::VoteStorage(vote_storage) => vote_storage.clear_forwarded_packets(), - } - } - pub(crate) fn insert_batch( &mut self, deserialized_packets: Vec, @@ -364,25 +343,6 @@ impl UnprocessedTransactionStorage { } } - pub fn filter_forwardable_packets_and_add_batches( - &mut self, - bank: Arc, - forward_packet_batches_by_accounts: &mut ForwardPacketBatchesByAccounts, - ) -> FilterForwardingResults { - match self { - Self::LocalTransactionStorage(transaction_storage) => transaction_storage - .filter_forwardable_packets_and_add_batches( - bank, - forward_packet_batches_by_accounts, - ), - Self::VoteStorage(vote_storage) => vote_storage - .filter_forwardable_packets_and_add_batches( - bank, - forward_packet_batches_by_accounts, - ), - } - } - /// The processing function takes a stream of packets ready to process, and returns the indices /// of the unprocessed packets that are eligible for retry. A return value of None means that /// all packets are unprocessed and eligible for retry. @@ -417,6 +377,15 @@ impl UnprocessedTransactionStorage { } } + pub fn clear(&mut self) { + match self { + Self::LocalTransactionStorage(transaction_storage) => { + transaction_storage.unprocessed_packet_batches.clear() + } + Self::VoteStorage(_vote_storage) => {} + } + } + pub(crate) fn cache_epoch_boundary_info(&mut self, bank: &Bank) { match self { Self::LocalTransactionStorage(_) => (), @@ -438,17 +407,6 @@ impl VoteStorage { MAX_NUM_VOTES_RECEIVE } - fn forward_option(&self) -> ForwardOption { - match self.vote_source { - VoteSource::Tpu => ForwardOption::ForwardTpuVote, - VoteSource::Gossip => ForwardOption::NotForward, - } - } - - fn clear_forwarded_packets(&mut self) { - self.latest_unprocessed_votes.clear_forwarded_packets(); - } - fn insert_batch( &mut self, deserialized_packets: Vec, @@ -469,23 +427,6 @@ impl VoteStorage { ) } - fn filter_forwardable_packets_and_add_batches( - &mut self, - bank: Arc, - forward_packet_batches_by_accounts: &mut ForwardPacketBatchesByAccounts, - ) -> FilterForwardingResults { - if matches!(self.vote_source, VoteSource::Tpu) { - let total_forwardable_packets = self - .latest_unprocessed_votes - .get_and_insert_forwardable_packets(bank, forward_packet_batches_by_accounts); - return FilterForwardingResults { - total_forwardable_packets, - ..FilterForwardingResults::default() - }; - } - FilterForwardingResults::default() - } - // returns `true` if the end of slot is reached fn process_packets( &mut self, @@ -605,18 +546,6 @@ impl ThreadLocalUnprocessedPackets { self.unprocessed_packet_batches.iter_mut() } - fn forward_option(&self) -> ForwardOption { - match self.thread_type { - ThreadType::Transactions => ForwardOption::ForwardTransaction, - ThreadType::Voting(VoteSource::Tpu) => ForwardOption::ForwardTpuVote, - ThreadType::Voting(VoteSource::Gossip) => ForwardOption::NotForward, - } - } - - fn clear(&mut self) { - self.unprocessed_packet_batches.clear(); - } - fn insert_batch( &mut self, deserialized_packets: Vec, @@ -628,135 +557,6 @@ impl ThreadLocalUnprocessedPackets { ) } - /// Filter out packets that fail to sanitize, or are no longer valid (could be - /// too old, a duplicate of something already processed). Doing this in batches to avoid - /// checking bank's blockhash and status cache per transaction which could be bad for performance. - /// Added valid and sanitized packets to forwarding queue. - fn filter_forwardable_packets_and_add_batches( - &mut self, - bank: Arc, - forward_buffer: &mut ForwardPacketBatchesByAccounts, - ) -> FilterForwardingResults { - let mut total_forwardable_tracer_packets: usize = 0; - let mut total_tracer_packets_in_buffer: usize = 0; - let mut total_forwardable_packets: usize = 0; - let mut total_packet_conversion_us: u64 = 0; - let mut total_filter_packets_us: u64 = 0; - let mut total_dropped_packets: usize = 0; - - let mut original_priority_queue = self.take_priority_queue(); - let original_capacity = original_priority_queue.capacity(); - let mut new_priority_queue = MinMaxHeap::with_capacity(original_capacity); - - // indicates if `forward_buffer` still accept more packets, see details at - // `ForwardPacketBatchesByAccounts.rs`. - let mut accepting_packets = true; - // batch iterate through self.unprocessed_packet_batches in desc priority order - new_priority_queue.extend( - original_priority_queue - .drain_desc() - .chunks(UNPROCESSED_BUFFER_STEP_SIZE) - .into_iter() - .flat_map(|packets_to_process| { - // Only process packets not yet forwarded - let (forwarded_packets, packets_to_forward, is_tracer_packet) = self - .prepare_packets_to_forward( - packets_to_process, - &mut total_tracer_packets_in_buffer, - ); - - [ - forwarded_packets, - if accepting_packets { - let ( - (sanitized_transactions, transaction_to_packet_indexes), - packet_conversion_us, - ) = measure_us!(self.sanitize_unforwarded_packets( - &packets_to_forward, - &bank, - &mut total_dropped_packets - )); - saturating_add_assign!( - total_packet_conversion_us, - packet_conversion_us - ); - - let (forwardable_transaction_indexes, filter_packets_us) = - measure_us!(Self::filter_invalid_transactions( - &sanitized_transactions, - &bank, - &mut total_dropped_packets - )); - saturating_add_assign!(total_filter_packets_us, filter_packets_us); - - for forwardable_transaction_index in &forwardable_transaction_indexes { - saturating_add_assign!(total_forwardable_packets, 1); - let forwardable_packet_index = - transaction_to_packet_indexes[*forwardable_transaction_index]; - if is_tracer_packet[forwardable_packet_index] { - saturating_add_assign!(total_forwardable_tracer_packets, 1); - } - } - - let accepted_packet_indexes = - Self::add_filtered_packets_to_forward_buffer( - forward_buffer, - &packets_to_forward, - &sanitized_transactions, - &transaction_to_packet_indexes, - &forwardable_transaction_indexes, - &mut total_dropped_packets, - &bank.feature_set, - ); - accepting_packets = accepted_packet_indexes.len() - == forwardable_transaction_indexes.len(); - - self.unprocessed_packet_batches - .mark_accepted_packets_as_forwarded( - &packets_to_forward, - &accepted_packet_indexes, - ); - - Self::collect_retained_packets( - &mut self.unprocessed_packet_batches.message_hash_to_transaction, - &packets_to_forward, - &Self::prepare_filtered_packet_indexes( - &transaction_to_packet_indexes, - &forwardable_transaction_indexes, - ), - ) - } else { - // skip sanitizing and filtering if not longer able to add more packets for forwarding - saturating_add_assign!(total_dropped_packets, packets_to_forward.len()); - packets_to_forward - }, - ] - .concat() - }), - ); - - // replace packet priority queue - self.unprocessed_packet_batches.packet_priority_queue = new_priority_queue; - self.verify_priority_queue(original_capacity); - - // Assert unprocessed queue is still consistent - assert_eq!( - self.unprocessed_packet_batches.packet_priority_queue.len(), - self.unprocessed_packet_batches - .message_hash_to_transaction - .len() - ); - - FilterForwardingResults { - total_forwardable_packets, - total_tracer_packets_in_buffer, - total_forwardable_tracer_packets, - total_dropped_packets, - total_packet_conversion_us, - total_filter_packets_us, - } - } - /// Take self.unprocessed_packet_batches's priority_queue out, leave empty MinMaxHeap in its place. fn take_priority_queue(&mut self) -> MinMaxHeap> { std::mem::replace( @@ -782,106 +582,6 @@ impl ThreadLocalUnprocessedPackets { ); } - /// sanitize un-forwarded packet into SanitizedTransaction for validation and forwarding. - fn sanitize_unforwarded_packets( - &mut self, - packets_to_process: &[Arc], - bank: &Bank, - total_dropped_packets: &mut usize, - ) -> (Vec, Vec) { - // Get ref of ImmutableDeserializedPacket - let deserialized_packets = packets_to_process.iter().map(|p| &**p); - let (transactions, transaction_to_packet_indexes): (Vec, Vec) = - deserialized_packets - .enumerate() - .filter_map(|(packet_index, deserialized_packet)| { - deserialized_packet - .build_sanitized_transaction( - bank.vote_only_bank(), - bank, - bank.get_reserved_account_keys(), - ) - .map(|(transaction, _deactivation_slot)| (transaction, packet_index)) - }) - .unzip(); - - let filtered_count = packets_to_process.len().saturating_sub(transactions.len()); - saturating_add_assign!(*total_dropped_packets, filtered_count); - - (transactions, transaction_to_packet_indexes) - } - - /// Checks sanitized transactions against bank, returns valid transaction indexes - fn filter_invalid_transactions( - transactions: &[SanitizedTransaction], - bank: &Bank, - total_dropped_packets: &mut usize, - ) -> Vec { - let filter = vec![Ok(()); transactions.len()]; - let results = bank.check_transactions_with_forwarding_delay( - transactions, - &filter, - FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, - ); - - let filtered_count = transactions.len().saturating_sub(results.len()); - saturating_add_assign!(*total_dropped_packets, filtered_count); - - results - .iter() - .enumerate() - .filter_map(|(tx_index, result)| result.as_ref().ok().map(|_| tx_index)) - .collect_vec() - } - - fn prepare_filtered_packet_indexes( - transaction_to_packet_indexes: &[usize], - retained_transaction_indexes: &[usize], - ) -> Vec { - retained_transaction_indexes - .iter() - .map(|tx_index| transaction_to_packet_indexes[*tx_index]) - .collect_vec() - } - - /// try to add filtered forwardable and valid packets to forward buffer; - /// returns vector of packet indexes that were accepted for forwarding. - fn add_filtered_packets_to_forward_buffer( - forward_buffer: &mut ForwardPacketBatchesByAccounts, - packets_to_process: &[Arc], - transactions: &[SanitizedTransaction], - transaction_to_packet_indexes: &[usize], - forwardable_transaction_indexes: &[usize], - total_dropped_packets: &mut usize, - feature_set: &FeatureSet, - ) -> Vec { - let mut added_packets_count: usize = 0; - let mut accepted_packet_indexes = Vec::with_capacity(transaction_to_packet_indexes.len()); - for forwardable_transaction_index in forwardable_transaction_indexes { - let sanitized_transaction = &transactions[*forwardable_transaction_index]; - let forwardable_packet_index = - transaction_to_packet_indexes[*forwardable_transaction_index]; - let immutable_deserialized_packet = - packets_to_process[forwardable_packet_index].clone(); - if !forward_buffer.try_add_packet( - sanitized_transaction, - immutable_deserialized_packet, - feature_set, - ) { - break; - } - accepted_packet_indexes.push(forwardable_packet_index); - saturating_add_assign!(added_packets_count, 1); - } - - let filtered_count = forwardable_transaction_indexes - .len() - .saturating_sub(added_packets_count); - saturating_add_assign!(*total_dropped_packets, filtered_count); - - accepted_packet_indexes - } - fn collect_retained_packets( message_hash_to_transaction: &mut HashMap, packets_to_process: &[Arc], @@ -974,45 +674,6 @@ impl ThreadLocalUnprocessedPackets { reached_end_of_slot } - - /// Prepare a chunk of packets for forwarding, filter out already forwarded packets while - /// counting tracers. - /// Returns Vec of unforwarded packets, and Vec of same size each indicates corresponding - /// packet is tracer packet. - fn prepare_packets_to_forward( - &self, - packets_to_forward: impl Iterator>, - total_tracer_packets_in_buffer: &mut usize, - ) -> ( - Vec>, - Vec>, - Vec, - ) { - let mut forwarded_packets: Vec> = vec![]; - let (forwardable_packets, is_tracer_packet) = packets_to_forward - .into_iter() - .filter_map(|immutable_deserialized_packet| { - let is_tracer_packet = immutable_deserialized_packet - .original_packet() - .meta() - .is_tracer_packet(); - if is_tracer_packet { - saturating_add_assign!(*total_tracer_packets_in_buffer, 1); - } - if !self - .unprocessed_packet_batches - .is_forwarded(&immutable_deserialized_packet) - { - Some((immutable_deserialized_packet, is_tracer_packet)) - } else { - forwarded_packets.push(immutable_deserialized_packet); - None - } - }) - .unzip(); - - (forwarded_packets, forwardable_packets, is_tracer_packet) - } } #[cfg(test)] @@ -1020,14 +681,12 @@ mod tests { use { super::*, itertools::iproduct, - solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}, solana_perf::packet::{Packet, PacketFlags}, solana_runtime::genesis_utils, solana_sdk::{ hash::Hash, signature::{Keypair, Signer}, system_transaction, - transaction::Transaction, }, solana_vote_program::{ vote_state::TowerSync, vote_transaction::new_tower_sync_transaction, @@ -1086,145 +745,6 @@ mod tests { assert_eq!(non_retryable_indexes, vec![(0, 1), (4, 5), (6, 8)]); } - #[test] - fn test_filter_and_forward_with_account_limits() { - solana_logger::setup(); - let GenesisConfigInfo { - genesis_config, - mint_keypair, - .. - } = create_genesis_config(10); - let (current_bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - - let simple_transactions: Vec = (0..256) - .map(|_id| { - // packets are deserialized upon receiving, failed packets will not be - // forwarded; Therefore we need to create real packets here. - let key1 = Keypair::new(); - system_transaction::transfer( - &mint_keypair, - &key1.pubkey(), - genesis_config.rent.minimum_balance(0), - genesis_config.hash(), - ) - }) - .collect_vec(); - - let mut packets: Vec = simple_transactions - .iter() - .enumerate() - .map(|(packets_id, transaction)| { - let mut p = Packet::from_data(None, transaction).unwrap(); - p.meta_mut().port = packets_id as u16; - p.meta_mut().set_tracer(true); - DeserializedPacket::new(p).unwrap() - }) - .collect_vec(); - - // all packets are forwarded - { - let buffered_packet_batches: UnprocessedPacketBatches = - UnprocessedPacketBatches::from_iter(packets.clone(), packets.len()); - let mut transaction_storage = UnprocessedTransactionStorage::new_transaction_storage( - buffered_packet_batches, - ThreadType::Transactions, - ); - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); - - let FilterForwardingResults { - total_forwardable_packets, - total_tracer_packets_in_buffer, - total_forwardable_tracer_packets, - .. - } = transaction_storage.filter_forwardable_packets_and_add_batches( - current_bank.clone(), - &mut forward_packet_batches_by_accounts, - ); - assert_eq!(total_forwardable_packets, 256); - assert_eq!(total_tracer_packets_in_buffer, 256); - assert_eq!(total_forwardable_tracer_packets, 256); - - // packets in a batch are forwarded in arbitrary order; verify the ports match after - // sorting - let expected_ports: Vec<_> = (0..256).collect(); - let mut forwarded_ports: Vec<_> = forward_packet_batches_by_accounts - .iter_batches() - .flat_map(|batch| batch.get_forwardable_packets().map(|p| p.meta().port)) - .collect(); - forwarded_ports.sort_unstable(); - assert_eq!(expected_ports, forwarded_ports); - } - - // some packets are forwarded - { - let num_already_forwarded = 16; - for packet in &mut packets[0..num_already_forwarded] { - packet.forwarded = true; - } - let buffered_packet_batches: UnprocessedPacketBatches = - UnprocessedPacketBatches::from_iter(packets.clone(), packets.len()); - let mut transaction_storage = UnprocessedTransactionStorage::new_transaction_storage( - buffered_packet_batches, - ThreadType::Transactions, - ); - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); - let FilterForwardingResults { - total_forwardable_packets, - total_tracer_packets_in_buffer, - total_forwardable_tracer_packets, - .. - } = transaction_storage.filter_forwardable_packets_and_add_batches( - current_bank.clone(), - &mut forward_packet_batches_by_accounts, - ); - assert_eq!( - total_forwardable_packets, - packets.len() - num_already_forwarded - ); - assert_eq!(total_tracer_packets_in_buffer, packets.len()); - assert_eq!( - total_forwardable_tracer_packets, - packets.len() - num_already_forwarded - ); - } - - // some packets are invalid (already processed) - { - let num_already_processed = 16; - for tx in &simple_transactions[0..num_already_processed] { - assert_eq!(current_bank.process_transaction(tx), Ok(())); - } - let buffered_packet_batches: UnprocessedPacketBatches = - UnprocessedPacketBatches::from_iter(packets.clone(), packets.len()); - let mut transaction_storage = UnprocessedTransactionStorage::new_transaction_storage( - buffered_packet_batches, - ThreadType::Transactions, - ); - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); - let FilterForwardingResults { - total_forwardable_packets, - total_tracer_packets_in_buffer, - total_forwardable_tracer_packets, - .. - } = transaction_storage.filter_forwardable_packets_and_add_batches( - current_bank, - &mut forward_packet_batches_by_accounts, - ); - assert_eq!( - total_forwardable_packets, - packets.len() - num_already_processed - ); - assert_eq!(total_tracer_packets_in_buffer, packets.len()); - assert_eq!( - total_forwardable_tracer_packets, - packets.len() - num_already_processed - ); - } - } - #[test] fn test_unprocessed_transaction_storage_insert() -> Result<(), Box> { let keypair = Keypair::new(); @@ -1353,123 +873,4 @@ mod tests { assert_eq!(1, transaction_storage.len()); Ok(()) } - - #[test] - fn test_prepare_packets_to_forward() { - solana_logger::setup(); - let GenesisConfigInfo { - genesis_config, - mint_keypair, - .. - } = create_genesis_config(10); - - let simple_transactions: Vec = (0..256) - .map(|_id| { - // packets are deserialized upon receiving, failed packets will not be - // forwarded; Therefore we need to create real packets here. - let key1 = Keypair::new(); - system_transaction::transfer( - &mint_keypair, - &key1.pubkey(), - genesis_config.rent.minimum_balance(0), - genesis_config.hash(), - ) - }) - .collect_vec(); - - let mut packets: Vec = simple_transactions - .iter() - .enumerate() - .map(|(packets_id, transaction)| { - let mut p = Packet::from_data(None, transaction).unwrap(); - p.meta_mut().port = packets_id as u16; - p.meta_mut().set_tracer(true); - DeserializedPacket::new(p).unwrap() - }) - .collect_vec(); - - // test preparing buffered packets for forwarding - let test_prepareing_buffered_packets_for_forwarding = - |buffered_packet_batches: UnprocessedPacketBatches| -> (usize, usize, usize) { - let mut total_tracer_packets_in_buffer: usize = 0; - let mut total_packets_to_forward: usize = 0; - let mut total_tracer_packets_to_forward: usize = 0; - - let mut unprocessed_transactions = ThreadLocalUnprocessedPackets { - unprocessed_packet_batches: buffered_packet_batches, - thread_type: ThreadType::Transactions, - }; - - let mut original_priority_queue = unprocessed_transactions.take_priority_queue(); - let _ = original_priority_queue - .drain_desc() - .chunks(128usize) - .into_iter() - .flat_map(|packets_to_process| { - let (_, packets_to_forward, is_tracer_packet) = unprocessed_transactions - .prepare_packets_to_forward( - packets_to_process, - &mut total_tracer_packets_in_buffer, - ); - total_packets_to_forward += packets_to_forward.len(); - total_tracer_packets_to_forward += is_tracer_packet.len(); - packets_to_forward - }) - .collect::>>(); - ( - total_tracer_packets_in_buffer, - total_packets_to_forward, - total_tracer_packets_to_forward, - ) - }; - - // all tracer packets are forwardable - { - let buffered_packet_batches: UnprocessedPacketBatches = - UnprocessedPacketBatches::from_iter(packets.clone(), packets.len()); - let ( - total_tracer_packets_in_buffer, - total_packets_to_forward, - total_tracer_packets_to_forward, - ) = test_prepareing_buffered_packets_for_forwarding(buffered_packet_batches); - assert_eq!(total_tracer_packets_in_buffer, 256); - assert_eq!(total_packets_to_forward, 256); - assert_eq!(total_tracer_packets_to_forward, 256); - } - - // some packets are forwarded - { - let num_already_forwarded = 16; - for packet in &mut packets[0..num_already_forwarded] { - packet.forwarded = true; - } - let buffered_packet_batches: UnprocessedPacketBatches = - UnprocessedPacketBatches::from_iter(packets.clone(), packets.len()); - let ( - total_tracer_packets_in_buffer, - total_packets_to_forward, - total_tracer_packets_to_forward, - ) = test_prepareing_buffered_packets_for_forwarding(buffered_packet_batches); - assert_eq!(total_tracer_packets_in_buffer, 256); - assert_eq!(total_packets_to_forward, 256 - num_already_forwarded); - assert_eq!(total_tracer_packets_to_forward, 256 - num_already_forwarded); - } - - // all packets are forwarded - { - for packet in &mut packets { - packet.forwarded = true; - } - let buffered_packet_batches: UnprocessedPacketBatches = - UnprocessedPacketBatches::from_iter(packets.clone(), packets.len()); - let ( - total_tracer_packets_in_buffer, - total_packets_to_forward, - total_tracer_packets_to_forward, - ) = test_prepareing_buffered_packets_for_forwarding(buffered_packet_batches); - assert_eq!(total_tracer_packets_in_buffer, 256); - assert_eq!(total_packets_to_forward, 0); - assert_eq!(total_tracer_packets_to_forward, 0); - } - } } diff --git a/core/src/forwarding_stage.rs b/core/src/forwarding_stage.rs new file mode 100644 index 00000000000000..38df863add78af --- /dev/null +++ b/core/src/forwarding_stage.rs @@ -0,0 +1,396 @@ +use { + crate::{ + banking_stage::LikeClusterInfo, + banking_trace::BankingPacketBatch, + next_leader::{next_leader, next_leader_tpu_vote}, + }, + crossbeam_channel::Receiver, + solana_client::connection_cache::ConnectionCache, + solana_connection_cache::client_connection::ClientConnection, + solana_gossip::contact_info::Protocol, + solana_perf::data_budget::DataBudget, + solana_poh::poh_recorder::PohRecorder, + solana_sdk::timing::AtomicInterval, + solana_streamer::sendmmsg::batch_send, + std::{ + iter::repeat, + net::{SocketAddr, UdpSocket}, + sync::{Arc, RwLock}, + thread::{Builder, JoinHandle}, + }, +}; + +pub trait ForwardAddressGetter: Send + Sync + 'static { + fn get_forwarding_address(&self, tpu_vote: bool, protocol: Protocol) -> Option; +} + +impl ForwardAddressGetter for (T, Arc>) { + fn get_forwarding_address(&self, tpu_vote: bool, protocol: Protocol) -> Option { + if tpu_vote { + next_leader_tpu_vote(&self.0, &self.1) + } else { + next_leader(&self.0, &self.1, |node| node.tpu_forwards(protocol)) + } + .map(|(_, addr)| addr) + } +} + +pub struct ForwardingStage { + receiver: Receiver<(BankingPacketBatch, bool)>, + forward_address_getter: T, + connection_cache: Arc, + data_budget: DataBudget, + udp_socket: UdpSocket, + metrics: ForwardingStageMetrics, +} + +impl ForwardingStage { + pub fn spawn( + receiver: Receiver<(BankingPacketBatch, bool)>, + forward_address_getter: T, + connection_cache: Arc, + ) -> JoinHandle<()> { + let forwarding_stage = Self::new(receiver, forward_address_getter, connection_cache); + Builder::new() + .name("solFwdStage".to_string()) + .spawn(move || forwarding_stage.run()) + .unwrap() + } + + fn new( + receiver: Receiver<(BankingPacketBatch, bool)>, + forward_address_getter: T, + connection_cache: Arc, + ) -> Self { + Self { + receiver, + forward_address_getter, + connection_cache, + data_budget: DataBudget::default(), + udp_socket: UdpSocket::bind("0.0.0.0:0").unwrap(), + metrics: ForwardingStageMetrics::default(), + } + } + + fn run(mut self) { + while let Ok((packet_batches, tpu_vote_batch)) = self.receiver.recv() { + self.metrics.maybe_report(); + self.handle_batches(packet_batches, tpu_vote_batch); + } + } + + fn handle_batches(&mut self, packet_batches: BankingPacketBatch, tpu_vote_batch: bool) { + // Count the number of packets received + let num_packets_received: usize = packet_batches.0.iter().map(|batch| batch.len()).sum(); + if tpu_vote_batch { + self.metrics.vote_packets_received += num_packets_received; + } else { + self.metrics.non_vote_packets_received += num_packets_received; + } + + let Some(addr) = self + .forward_address_getter + .get_forwarding_address(tpu_vote_batch, self.connection_cache.protocol()) + else { + return; + }; + + self.update_data_budget(); + self.forward_batches(packet_batches, tpu_vote_batch, addr); + } + + fn forward_batches( + &mut self, + packet_batches: BankingPacketBatch, + tpu_vote_batch: bool, + addr: SocketAddr, + ) { + let filtered_packets = packet_batches + .0 + .iter() + .flat_map(|batch| batch.iter()) + .filter(|p| !p.meta().discard()) + .filter(|p| !p.meta().forwarded()) + .filter(|p| p.meta().is_from_staked_node()) + .filter(|p| self.data_budget.take(p.meta().size)) + .filter_map(|p| p.data(..).map(|data| data.to_vec())); + + let (success, num_packets) = if tpu_vote_batch { + let packets: Vec<_> = filtered_packets.into_iter().zip(repeat(addr)).collect(); + let num_packets = packets.len(); + let res = batch_send(&self.udp_socket, &packets); + (res.is_ok(), num_packets) + } else { + let packets: Vec<_> = filtered_packets.collect(); + let num_packets = packets.len(); + let conn = self.connection_cache.get_connection(&addr); + let res = conn.send_data_batch_async(packets); + (res.is_ok(), num_packets) + }; + + if tpu_vote_batch { + self.metrics.vote_packets_attempted_forward += num_packets; + if success { + self.metrics.vote_packets_forwarded += num_packets; + } + } else { + self.metrics.non_vote_packets_attempted_forward += num_packets; + if success { + self.metrics.non_vote_packets_forwarded += num_packets; + } + } + } + + /// Re-fill the data budget if enough time has passed + fn update_data_budget(&self) { + const INTERVAL_MS: u64 = 100; + // 12 MB outbound limit per second + const MAX_BYTES_PER_SECOND: usize = 12_000_000; + const MAX_BYTES_PER_INTERVAL: usize = MAX_BYTES_PER_SECOND * INTERVAL_MS as usize / 1000; + const MAX_BYTES_BUDGET: usize = MAX_BYTES_PER_INTERVAL * 5; + self.data_budget.update(INTERVAL_MS, |bytes| { + std::cmp::min( + bytes.saturating_add(MAX_BYTES_PER_INTERVAL), + MAX_BYTES_BUDGET, + ) + }); + } +} + +#[derive(Default)] +struct ForwardingStageMetrics { + interval: AtomicInterval, + + vote_packets_received: usize, + vote_packets_attempted_forward: usize, + vote_packets_forwarded: usize, + + non_vote_packets_received: usize, + non_vote_packets_attempted_forward: usize, + non_vote_packets_forwarded: usize, +} + +impl ForwardingStageMetrics { + fn maybe_report(&mut self) { + const REPORT_INTERVAL_MS: u64 = 5000; + if self.interval.should_update(REPORT_INTERVAL_MS) { + datapoint_info!( + "forwarding_stage", + ( + "vote_packets_received", + core::mem::replace(&mut self.vote_packets_received, 0), + i64 + ), + ( + "vote_packets_attempted_forward", + core::mem::replace(&mut self.vote_packets_attempted_forward, 0), + i64 + ), + ( + "vote_packets_forwarded", + core::mem::replace(&mut self.vote_packets_forwarded, 0), + i64 + ), + ( + "non_vote_packets_received", + core::mem::replace(&mut self.non_vote_packets_received, 0), + i64 + ), + ( + "non_vote_packets_attempted_forward", + core::mem::replace(&mut self.non_vote_packets_attempted_forward, 0), + i64 + ), + ( + "non_vote_packets_forwarded", + core::mem::replace(&mut self.non_vote_packets_forwarded, 0), + i64 + ), + ); + } + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_perf::packet::PacketBatch, + solana_sdk::packet::{Packet, PacketFlags}, + std::time::Duration, + }; + + struct UnimplementedForwardAddressGetter; + impl ForwardAddressGetter for UnimplementedForwardAddressGetter { + fn get_forwarding_address( + &self, + _tpu_vote: bool, + _protocol: Protocol, + ) -> Option { + unimplemented!("UnimplementedForwardAddressGetter:get_forwarding_address") + } + } + + struct DummyForwardAddressGetter { + vote_addr: SocketAddr, + non_vote_addr: SocketAddr, + } + impl ForwardAddressGetter for DummyForwardAddressGetter { + fn get_forwarding_address( + &self, + tpu_vote: bool, + _protocol: Protocol, + ) -> Option { + Some(if tpu_vote { + self.vote_addr + } else { + self.non_vote_addr + }) + } + } + + // Create simple dummy packets with different flags + // to filter out packets that should not be forwarded. + fn create_dummy_packet(num_bytes: usize, flags: PacketFlags) -> Packet { + let mut packet = Packet::default(); + packet.meta_mut().size = num_bytes; + packet.meta_mut().flags = flags; + packet + } + + #[test] + fn test_forward_batches() { + let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); + let addr = socket.local_addr().unwrap(); + + let (_sender, receiver) = crossbeam_channel::unbounded(); + let dummy_forward_address_getter = UnimplementedForwardAddressGetter; + let connection_cache = ConnectionCache::with_udp("connection_cache_test", 1); + + let mut forwarding_stage = ForwardingStage::new( + receiver, + dummy_forward_address_getter, + Arc::new(connection_cache), + ); + + const DUMMY_PACKET_SIZE: usize = 8; + let create_banking_packet_batch = |flags| { + BankingPacketBatch::new(( + vec![PacketBatch::new(vec![create_dummy_packet( + DUMMY_PACKET_SIZE, + flags, + )])], + None, + )) + }; + + socket + .set_read_timeout(Some(Duration::from_millis(10))) + .unwrap(); + let recv_buffer = &mut [0; 1024]; + + // Data Budget is 0, so no packets should be sent + for tpu_vote in [false, true] { + forwarding_stage.forward_batches( + create_banking_packet_batch(PacketFlags::FROM_STAKED_NODE), + tpu_vote, + addr, + ); + assert!(socket.recv_from(recv_buffer).is_err()); + } + + for tpu_vote in [false, true] { + // Packet is valid, so it should be sent + forwarding_stage.update_data_budget(); + forwarding_stage.forward_batches( + create_banking_packet_batch(PacketFlags::FROM_STAKED_NODE), + tpu_vote, + addr, + ); + assert_eq!(socket.recv_from(recv_buffer).unwrap().0, DUMMY_PACKET_SIZE,); + + // Packet is marked for discard, so it should not be sent + forwarding_stage.update_data_budget(); + forwarding_stage.forward_batches( + create_banking_packet_batch(PacketFlags::FROM_STAKED_NODE | PacketFlags::DISCARD), + tpu_vote, + addr, + ); + assert!(socket.recv_from(recv_buffer).is_err()); + + // Packet is not from staked node, so it should not be sent + forwarding_stage.update_data_budget(); + forwarding_stage.forward_batches( + create_banking_packet_batch(PacketFlags::empty()), + tpu_vote, + addr, + ); + assert!(socket.recv_from(recv_buffer).is_err()); + + // Packet is already forwarded, so it should not be sent + forwarding_stage.update_data_budget(); + forwarding_stage.forward_batches( + create_banking_packet_batch(PacketFlags::FROM_STAKED_NODE | PacketFlags::FORWARDED), + tpu_vote, + addr, + ); + assert!(socket.recv_from(recv_buffer).is_err()); + } + } + + #[test] + fn test_handle_batches() { + let vote_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); + let non_vote_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); + + let (_sender, receiver) = crossbeam_channel::unbounded(); + let dummy_forward_address_getter = DummyForwardAddressGetter { + vote_addr: vote_socket.local_addr().unwrap(), + non_vote_addr: non_vote_socket.local_addr().unwrap(), + }; + let connection_cache = ConnectionCache::with_udp("connection_cache_test", 1); + + let mut forwarding_stage = ForwardingStage::new( + receiver, + dummy_forward_address_getter, + Arc::new(connection_cache), + ); + + // packet sizes should be unique and paired with + // `expected_received` below so we can verify the correct packets + // were forwarded. + let banking_packet_batches = BankingPacketBatch::new(( + vec![ + PacketBatch::new(vec![ + create_dummy_packet(1, PacketFlags::empty()), // invalid + create_dummy_packet(2, PacketFlags::FROM_STAKED_NODE), // valid + create_dummy_packet(3, PacketFlags::DISCARD), // invalid + create_dummy_packet(4, PacketFlags::FORWARDED), // invalid + ]), + PacketBatch::new(vec![ + create_dummy_packet(5, PacketFlags::FROM_STAKED_NODE), // valid + create_dummy_packet(6, PacketFlags::DISCARD), // invalid + create_dummy_packet(7, PacketFlags::FROM_STAKED_NODE), // valid + create_dummy_packet(8, PacketFlags::FORWARDED), // invalid + create_dummy_packet(9, PacketFlags::FROM_STAKED_NODE | PacketFlags::DISCARD), // invalid + ]), + ], + None, + )); + let expected_received = [2, 5, 7]; + + let recv_buffer = &mut [0; 1024]; + for (is_tpu_vote, socket) in [(true, vote_socket), (false, non_vote_socket)] { + socket + .set_read_timeout(Some(Duration::from_millis(10))) + .unwrap(); + + forwarding_stage.handle_batches(banking_packet_batches.clone(), is_tpu_vote); + + for expected_size in &expected_received { + assert_eq!(socket.recv_from(recv_buffer).unwrap().0, *expected_size); + } + assert!(socket.recv_from(recv_buffer).is_err()); // nothing more to receive + } + } +} diff --git a/core/src/lib.rs b/core/src/lib.rs index c88488f0876667..9befe3b5f8b385 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -22,6 +22,7 @@ pub mod consensus; pub mod cost_update_service; pub mod drop_bank_service; pub mod fetch_stage; +mod forwarding_stage; pub mod gen_keys; pub mod next_leader; pub mod optimistic_confirmation_verifier; diff --git a/core/src/sigverify.rs b/core/src/sigverify.rs index 18984ecc4ef836..fbd8be72c9c394 100644 --- a/core/src/sigverify.rs +++ b/core/src/sigverify.rs @@ -12,6 +12,7 @@ use { banking_trace::{BankingPacketBatch, BankingPacketSender}, sigverify_stage::{SigVerifier, SigVerifyServiceError}, }, + crossbeam_channel::Sender, solana_perf::{cuda_runtime::PinnedVec, packet::PacketBatch, recycler::Recycler, sigverify}, solana_sdk::{packet::Packet, saturating_add_assign}, }; @@ -56,7 +57,8 @@ impl SigverifyTracerPacketStats { } pub struct TransactionSigVerifier { - packet_sender: BankingPacketSender, + banking_stage_sender: BankingPacketSender, + forward_stage_sender: Option>, tracer_packet_stats: SigverifyTracerPacketStats, recycler: Recycler, recycler_out: Recycler>, @@ -64,16 +66,23 @@ pub struct TransactionSigVerifier { } impl TransactionSigVerifier { - pub fn new_reject_non_vote(packet_sender: BankingPacketSender) -> Self { - let mut new_self = Self::new(packet_sender); + pub fn new_reject_non_vote( + banking_stage_sender: BankingPacketSender, + forward_stage_sender: Option>, + ) -> Self { + let mut new_self = Self::new(banking_stage_sender, forward_stage_sender); new_self.reject_non_vote = true; new_self } - pub fn new(packet_sender: BankingPacketSender) -> Self { + pub fn new( + banking_stage_sender: BankingPacketSender, + forward_stage_sender: Option>, + ) -> Self { init(); Self { - packet_sender, + banking_stage_sender, + forward_stage_sender, tracer_packet_stats: SigverifyTracerPacketStats::default(), recycler: Recycler::warmed(50, 4096), recycler_out: Recycler::warmed(50, 4096), @@ -127,10 +136,15 @@ impl SigVerifier for TransactionSigVerifier { packet_batches: Vec, ) -> Result<(), SigVerifyServiceError> { let tracer_packet_stats_to_send = std::mem::take(&mut self.tracer_packet_stats); - self.packet_sender.send(BankingPacketBatch::new(( - packet_batches, - Some(tracer_packet_stats_to_send), - )))?; + let banking_packet_batch = + BankingPacketBatch::new((packet_batches, Some(tracer_packet_stats_to_send))); + if let Some(forward_stage_sender) = &self.forward_stage_sender { + self.banking_stage_sender + .send(banking_packet_batch.clone())?; + let _ = forward_stage_sender.try_send((banking_packet_batch, self.reject_non_vote)); + } else { + self.banking_stage_sender.send(banking_packet_batch)?; + } Ok(()) } diff --git a/core/src/sigverify_stage.rs b/core/src/sigverify_stage.rs index ac7d9889db0ed8..95fb29467527a4 100644 --- a/core/src/sigverify_stage.rs +++ b/core/src/sigverify_stage.rs @@ -548,7 +548,7 @@ mod tests { trace!("start"); let (packet_s, packet_r) = unbounded(); let (verified_s, verified_r) = BankingTracer::channel_for_test(); - let verifier = TransactionSigVerifier::new(verified_s); + let verifier = TransactionSigVerifier::new(verified_s, None); let stage = SigVerifyStage::new(packet_r, verifier, "solSigVerTest", "test"); let now = Instant::now(); diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 05982c9c3edf29..fa8ed37dcf3d00 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -11,6 +11,7 @@ use { VerifiedVoteSender, VoteTracker, }, fetch_stage::FetchStage, + forwarding_stage::ForwardingStage, sigverify::TransactionSigVerifier, sigverify_stage::SigVerifyStage, staked_nodes_updater_service::StakedNodesUpdaterService, @@ -18,7 +19,7 @@ use { validator::{BlockProductionMethod, GeneratorConfig}, }, bytes::Bytes, - crossbeam_channel::{unbounded, Receiver}, + crossbeam_channel::{bounded, unbounded, Receiver}, solana_client::connection_cache::ConnectionCache, solana_gossip::cluster_info::ClusterInfo, solana_ledger::{ @@ -71,6 +72,7 @@ pub struct Tpu { sigverify_stage: SigVerifyStage, vote_sigverify_stage: SigVerifyStage, banking_stage: BankingStage, + forwarding_stage: thread::JoinHandle<()>, cluster_info_vote_listener: ClusterInfoVoteListener, broadcast_stage: BroadcastStage, tpu_quic_t: thread::JoinHandle<()>, @@ -199,15 +201,22 @@ impl Tpu { ) .unwrap(); + let (forward_stage_sender, forward_stage_receiver) = bounded(64); let sigverify_stage = { - let verifier = TransactionSigVerifier::new(non_vote_sender); + let verifier = TransactionSigVerifier::new( + non_vote_sender, + enable_block_production_forwarding.then(|| forward_stage_sender.clone()), // only forward non-vote transactions if enabled + ); SigVerifyStage::new(packet_receiver, verifier, "solSigVerTpu", "tpu-verifier") }; let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); let vote_sigverify_stage = { - let verifier = TransactionSigVerifier::new_reject_non_vote(tpu_vote_sender); + let verifier = TransactionSigVerifier::new_reject_non_vote( + tpu_vote_sender, + Some(forward_stage_sender), + ); SigVerifyStage::new( vote_packet_receiver, verifier, @@ -243,10 +252,14 @@ impl Tpu { transaction_status_sender, replay_vote_sender, log_messages_bytes_limit, - connection_cache.clone(), bank_forks.clone(), prioritization_fee_cache, - enable_block_production_forwarding, + ); + + let forwarding_stage = ForwardingStage::spawn( + forward_stage_receiver, + (cluster_info.clone(), poh_recorder.clone()), + connection_cache.clone(), ); let (entry_receiver, tpu_entry_notifier) = @@ -281,6 +294,7 @@ impl Tpu { sigverify_stage, vote_sigverify_stage, banking_stage, + forwarding_stage, cluster_info_vote_listener, broadcast_stage, tpu_quic_t, @@ -300,6 +314,7 @@ impl Tpu { self.vote_sigverify_stage.join(), self.cluster_info_vote_listener.join(), self.banking_stage.join(), + self.forwarding_stage.join(), self.staked_nodes_updater_service.join(), self.tpu_quic_t.join(), self.tpu_forwards_quic_t.join(), diff --git a/core/src/validator.rs b/core/src/validator.rs index 280fd1c80bb7f3..c62bf0b9f4a8b1 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -278,7 +278,7 @@ pub struct ValidatorConfig { pub banking_trace_dir_byte_limit: banking_trace::DirByteLimit, pub block_verification_method: BlockVerificationMethod, pub block_production_method: BlockProductionMethod, - pub enable_block_production_forwarding: bool, + pub enable_non_vote_forwarding: bool, pub generator_config: Option, pub use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup, pub wen_restart_proto_path: Option, @@ -352,7 +352,7 @@ impl Default for ValidatorConfig { banking_trace_dir_byte_limit: 0, block_verification_method: BlockVerificationMethod::default(), block_production_method: BlockProductionMethod::default(), - enable_block_production_forwarding: false, + enable_non_vote_forwarding: false, generator_config: None, use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup::default(), wen_restart_proto_path: None, @@ -373,7 +373,7 @@ impl ValidatorConfig { enforce_ulimit_nofile: false, rpc_config: JsonRpcConfig::default_for_test(), block_production_method: BlockProductionMethod::default(), - enable_block_production_forwarding: true, // enable forwarding by default for tests + enable_non_vote_forwarding: true, // enable forwarding by default for tests replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), replay_transactions_threads: NonZeroUsize::new(get_max_thread_count()) .expect("thread count is non-zero"), @@ -1488,7 +1488,7 @@ impl Validator { tpu_max_connections_per_ipaddr_per_minute, &prioritization_fee_cache, config.block_production_method.clone(), - config.enable_block_production_forwarding, + config.enable_non_vote_forwarding, config.generator_config.clone(), ); diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 786d2e39e57aa4..63f3482a942789 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -64,7 +64,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { banking_trace_dir_byte_limit: config.banking_trace_dir_byte_limit, block_verification_method: config.block_verification_method.clone(), block_production_method: config.block_production_method.clone(), - enable_block_production_forwarding: config.enable_block_production_forwarding, + enable_non_vote_forwarding: config.enable_non_vote_forwarding, generator_config: config.generator_config.clone(), use_snapshot_archives_at_startup: config.use_snapshot_archives_at_startup, wen_restart_proto_path: config.wen_restart_proto_path.clone(), diff --git a/validator/src/main.rs b/validator/src/main.rs index 74ad2d0926eae2..88252a796b4693 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1801,7 +1801,7 @@ pub fn main() { BlockProductionMethod ) .unwrap_or_default(); - validator_config.enable_block_production_forwarding = staked_nodes_overrides_path.is_some(); + validator_config.enable_non_vote_forwarding = staked_nodes_overrides_path.is_some(); validator_config.unified_scheduler_handler_threads = value_t!(matches, "unified_scheduler_handler_threads", usize).ok();