diff --git a/Cargo.lock b/Cargo.lock index dd37ca8a23..bbf975fa93 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1444,15 +1444,15 @@ checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" [[package]] name = "derive_more" -version = "0.99.17" +version = "0.99.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "convert_case", "proc-macro2 1.0.66", "quote 1.0.32", "rustc_version", - "syn 1.0.109", + "syn 2.0.28", ] [[package]] @@ -5133,9 +5133,9 @@ checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" [[package]] name = "serde" -version = "1.0.183" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" dependencies = [ "serde_derive", ] @@ -5180,9 +5180,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.183" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", diff --git a/binary_port/src/binary_request.rs b/binary_port/src/binary_request.rs index da4d0fc10f..db8f53911f 100644 --- a/binary_port/src/binary_request.rs +++ b/binary_port/src/binary_request.rs @@ -243,7 +243,7 @@ impl TryFrom for BinaryRequestTag { 0 => Ok(BinaryRequestTag::Get), 1 => Ok(BinaryRequestTag::TryAcceptTransaction), 2 => Ok(BinaryRequestTag::TrySpeculativeExec), - _ => Err(InvalidBinaryRequestTag(value)), + _ => Err(InvalidBinaryRequestTag), } } } @@ -255,7 +255,7 @@ impl From for u8 { } /// Error raised when trying to convert an invalid u8 into a `BinaryRequestTag`. -pub struct InvalidBinaryRequestTag(u8); +pub struct InvalidBinaryRequestTag; #[cfg(test)] mod tests { diff --git a/binary_port/src/error_code.rs b/binary_port/src/error_code.rs index 22cad2e942..4b0e7b794d 100644 --- a/binary_port/src/error_code.rs +++ b/binary_port/src/error_code.rs @@ -271,9 +271,12 @@ pub enum ErrorCode { /// Invalid transaction kind #[error("invalid transaction kind")] InvalidTransactionInvalidTransactionKind = 84, + /// Gas price tolerance too low + #[error("gas price tolerance too low")] + GasPriceToleranceTooLow = 85, /// Received V1 Transaction for spec exec. #[error("received v1 transaction for speculative execution")] - ReceivedV1Transaction = 85, + ReceivedV1Transaction = 86, } impl TryFrom for ErrorCode { @@ -366,7 +369,8 @@ impl TryFrom for ErrorCode { 82 => Ok(ErrorCode::DeployMissingModuleBytes), 83 => Ok(ErrorCode::InvalidTransactionEntryPointCannotBeCall), 84 => Ok(ErrorCode::InvalidTransactionInvalidTransactionKind), - 85 => Ok(ErrorCode::ReceivedV1Transaction), + 85 => Ok(ErrorCode::GasPriceToleranceTooLow), + 86 => Ok(ErrorCode::ReceivedV1Transaction), _ => Err(UnknownErrorCode), } } @@ -437,6 +441,7 @@ impl From for ErrorCode { InvalidDeploy::UnableToCalculateGasCost => { ErrorCode::InvalidDeployUnableToCalculateGasCost } + InvalidDeploy::GasPriceToleranceTooLow { .. } => ErrorCode::GasPriceToleranceTooLow, _ => ErrorCode::InvalidDeployUnspecified, } } @@ -501,6 +506,9 @@ impl From for ErrorCode { InvalidTransactionV1::InvalidTransactionKind(_) => { ErrorCode::InvalidTransactionInvalidTransactionKind } + InvalidTransactionV1::GasPriceToleranceTooLow { .. } => { + ErrorCode::GasPriceToleranceTooLow + } _ => ErrorCode::InvalidTransactionUnspecified, } } diff --git a/execution_engine/src/runtime/mod.rs b/execution_engine/src/runtime/mod.rs index 035c828a95..e4ec17a4d1 100644 --- a/execution_engine/src/runtime/mod.rs +++ b/execution_engine/src/runtime/mod.rs @@ -809,7 +809,7 @@ where self.context.access_rights_extend(&urefs); { let transfers = self.context.transfers_mut(); - *transfers = mint_runtime.context.transfers().to_owned(); + mint_runtime.context.transfers().clone_into(transfers); } Ok(ret) } @@ -888,7 +888,7 @@ where self.context.access_rights_extend(&urefs); { let transfers = self.context.transfers_mut(); - *transfers = runtime.context.transfers().to_owned(); + runtime.context.transfers().clone_into(transfers); } Ok(ret) } @@ -1138,7 +1138,7 @@ where self.context.access_rights_extend(&urefs); { let transfers = self.context.transfers_mut(); - *transfers = runtime.context.transfers().to_owned(); + runtime.context.transfers().clone_into(transfers); } Ok(ret) @@ -1622,7 +1622,7 @@ where self.context .set_emit_message_cost(runtime.context.emit_message_cost()); let transfers = self.context.transfers_mut(); - *transfers = runtime.context.transfers().to_owned(); + runtime.context.transfers().clone_into(transfers); return match result { Ok(_) => { diff --git a/execution_engine/src/runtime_context/mod.rs b/execution_engine/src/runtime_context/mod.rs index 0cb0625322..1a62a3a5bb 100644 --- a/execution_engine/src/runtime_context/mod.rs +++ b/execution_engine/src/runtime_context/mod.rs @@ -1274,7 +1274,7 @@ where "Contract".to_string(), other.type_name(), ))), - None => Err(TrackingCopyError::KeyNotFound(key)).map_err(Into::into), + None => Err(TrackingCopyError::KeyNotFound(key).into()), }, } } diff --git a/execution_engine_testing/tests/Cargo.toml b/execution_engine_testing/tests/Cargo.toml index 047fe63e9e..5598ee5a4e 100644 --- a/execution_engine_testing/tests/Cargo.toml +++ b/execution_engine_testing/tests/Cargo.toml @@ -9,7 +9,7 @@ base16 = "0.2.1" casper-engine-test-support = { path = "../test_support" } casper-execution-engine = { path = "../../execution_engine", features = ["test-support"] } casper-storage = { path = "../../storage" } -casper-types = { path = "../../types", default_features = false, features = ["datasize", "json-schema"] } +casper-types = { path = "../../types", default-features = false, features = ["datasize", "json-schema"] } casper-wasm = "0.46.0" clap = "2" fs_extra = "1.2.0" diff --git a/execution_engine_testing/tests/src/test/check_transfer_success.rs b/execution_engine_testing/tests/src/test/check_transfer_success.rs index da90701b71..e865691ab9 100644 --- a/execution_engine_testing/tests/src/test/check_transfer_success.rs +++ b/execution_engine_testing/tests/src/test/check_transfer_success.rs @@ -1,4 +1,3 @@ -use core::convert::TryFrom; use std::path::PathBuf; use casper_engine_test_support::{ @@ -39,7 +38,7 @@ fn test_check_transfer_success_with_source_only() { ); // Doing a transfer from main purse to create new purse and store URef under NEW_PURSE_NAME. - let transfer_amount = U512::try_from(FIRST_TRANSFER_AMOUNT).expect("U512 from u64"); + let transfer_amount = U512::from(FIRST_TRANSFER_AMOUNT); let path = PathBuf::from(TRANSFER_WASM); let session_args = runtime_args! { ARG_DESTINATION => NEW_PURSE_NAME, @@ -100,9 +99,9 @@ fn test_check_transfer_success_with_source_only_errors() { ); // Doing a transfer from main purse to create new purse and store Uref under NEW_PURSE_NAME. - let transfer_amount = U512::try_from(FIRST_TRANSFER_AMOUNT).expect("U512 from u64"); + let transfer_amount = U512::from(FIRST_TRANSFER_AMOUNT); // Setup mismatch between transfer_amount performed and given to trigger assertion. - let wrong_transfer_amount = transfer_amount - U512::try_from(100u64).expect("U512 from 64"); + let wrong_transfer_amount = transfer_amount - U512::from(100u64); let path = PathBuf::from(TRANSFER_WASM); let session_args = runtime_args! { @@ -160,7 +159,7 @@ fn test_check_transfer_success_with_source_and_target() { DEFAULT_CHAINSPEC_REGISTRY.clone(), ); - let transfer_amount = U512::try_from(SECOND_TRANSFER_AMOUNT).expect("U512 from u64"); + let transfer_amount = U512::from(SECOND_TRANSFER_AMOUNT); // Doing a transfer from main purse to create new purse and store URef under NEW_PURSE_NAME. let path = PathBuf::from(TRANSFER_WASM); let session_args = runtime_args! { diff --git a/execution_engine_testing/tests/src/test/contract_messages.rs b/execution_engine_testing/tests/src/test/contract_messages.rs index ba48434730..a9b36f02a0 100644 --- a/execution_engine_testing/tests/src/test/contract_messages.rs +++ b/execution_engine_testing/tests/src/test/contract_messages.rs @@ -960,7 +960,7 @@ fn should_produce_per_block_message_ordering() { .get_last_exec_result() .unwrap() .messages() - .get(0) + .first() .unwrap() .block_index(), expected_index diff --git a/execution_engine_testing/tests/src/test/regression/ee_1120.rs b/execution_engine_testing/tests/src/test/regression/ee_1120.rs index 9447e0a121..258318d1ae 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_1120.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_1120.rs @@ -311,9 +311,9 @@ fn should_run_ee_1120_slash_delegators() { let unbond_purses_after: UnbondingPurses = builder.get_unbonds(); assert_ne!(unbond_purses_before, unbond_purses_after); - assert!(unbond_purses_after.get(&VALIDATOR_1_ADDR).is_none()); - assert!(unbond_purses_after.get(&DELEGATOR_1_ADDR).is_some()); - assert!(unbond_purses_after.get(&VALIDATOR_2_ADDR).is_some()); + assert!(!unbond_purses_after.contains_key(&VALIDATOR_1_ADDR)); + assert!(unbond_purses_after.contains_key(&DELEGATOR_1_ADDR)); + assert!(unbond_purses_after.contains_key(&VALIDATOR_2_ADDR)); // slash validator 1 to clear remaining bids and unbonding purses let slash_request_2 = ExecuteRequestBuilder::contract_call_by_hash( diff --git a/execution_engine_testing/tests/src/test/system_contracts/auction_bidding.rs b/execution_engine_testing/tests/src/test/system_contracts/auction_bidding.rs index 7e26a1051f..5baf314141 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/auction_bidding.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/auction_bidding.rs @@ -171,7 +171,7 @@ fn should_run_successful_bond_and_unbond_and_slashing() { builder.exec(exec_request_5).expect_success().commit(); let unbond_purses: UnbondingPurses = builder.get_unbonds(); - assert!(unbond_purses.get(&*DEFAULT_ACCOUNT_ADDR).is_none()); + assert!(!unbond_purses.contains_key(&*DEFAULT_ACCOUNT_ADDR)); let bids = builder.get_bids(); assert!(bids.validator_bid(&DEFAULT_ACCOUNT_PUBLIC_KEY).is_none()); @@ -460,7 +460,7 @@ fn should_run_successful_bond_and_unbond_with_release() { ); let unbond_purses: UnbondingPurses = builder.get_unbonds(); - assert!(unbond_purses.get(&*DEFAULT_ACCOUNT_ADDR).is_none()); + assert!(!unbond_purses.contains_key(&*DEFAULT_ACCOUNT_ADDR)); let bids = builder.get_bids(); assert!(!bids.is_empty()); @@ -650,7 +650,7 @@ fn should_run_successful_unbond_funds_after_changing_unbonding_delay() { ); let unbond_purses: UnbondingPurses = builder.get_unbonds(); - assert!(unbond_purses.get(&*DEFAULT_ACCOUNT_ADDR).is_none()); + assert!(!unbond_purses.contains_key(&*DEFAULT_ACCOUNT_ADDR)); let bids = builder.get_bids(); assert!(!bids.is_empty()); diff --git a/node/src/cli/arglang.rs b/node/src/cli/arglang.rs index 78fcba8c56..2d39d7a5f8 100644 --- a/node/src/cli/arglang.rs +++ b/node/src/cli/arglang.rs @@ -130,45 +130,39 @@ fn parse_stream(tokens: &mut Peekable) -> Result where I: Iterator, { - loop { - match tokens.next() { - Some(Token::String(value)) => return Ok(Value::String(value)), - Some(Token::I64(value)) => return Ok(Value::Integer(value)), - Some(Token::Boolean(value)) => return Ok(Value::Boolean(value)), - Some(Token::OpenBracket) => { - // Special case for empty list. - if tokens.peek() == Some(&Token::CloseBracket) { - tokens.next(); - return Ok(Value::Array(Vec::new())); - } + match tokens.next() { + Some(Token::String(value)) => Ok(Value::String(value)), + Some(Token::I64(value)) => Ok(Value::Integer(value)), + Some(Token::Boolean(value)) => Ok(Value::Boolean(value)), + Some(Token::OpenBracket) => { + // Special case for empty list. + if tokens.peek() == Some(&Token::CloseBracket) { + tokens.next(); + return Ok(Value::Array(Vec::new())); + } - let mut items = Vec::new(); - loop { - items.push(parse_stream(tokens)?); + let mut items = Vec::new(); + loop { + items.push(parse_stream(tokens)?); - match tokens.next() { - Some(Token::CloseBracket) => { - return Ok(Value::Array(items)); - } - Some(Token::Comma) => { - // Continue parsing next time. - } - Some(t) => { - return Err(Error::UnexpectedToken(t)); - } - None => { - return Err(Error::UnexpectedEndOfInput); - } + match tokens.next() { + Some(Token::CloseBracket) => { + return Ok(Value::Array(items)); + } + Some(Token::Comma) => { + // Continue parsing next time. + } + Some(t) => { + return Err(Error::UnexpectedToken(t)); + } + None => { + return Err(Error::UnexpectedEndOfInput); } } } - Some(t @ Token::CloseBracket) | Some(t @ Token::Comma) => { - return Err(Error::UnexpectedToken(t)); - } - None => { - return Err(Error::UnexpectedEndOfInput); - } } + Some(t @ Token::CloseBracket) | Some(t @ Token::Comma) => Err(Error::UnexpectedToken(t)), + None => Err(Error::UnexpectedEndOfInput), } } diff --git a/node/src/components.rs b/node/src/components.rs index 2750d8915d..a6db462083 100644 --- a/node/src/components.rs +++ b/node/src/components.rs @@ -155,10 +155,6 @@ pub(crate) trait InitializedComponent: Component { self.state() == &ComponentState::Uninitialized } - fn is_initialized(&self) -> bool { - self.state() == &ComponentState::Initialized - } - fn is_fatal(&self) -> bool { matches!(self.state(), ComponentState::Fatal(_)) } diff --git a/node/src/components/block_accumulator/tests.rs b/node/src/components/block_accumulator/tests.rs index 3ff9adcd73..21fd8d98be 100644 --- a/node/src/components/block_accumulator/tests.rs +++ b/node/src/components/block_accumulator/tests.rs @@ -2126,7 +2126,7 @@ async fn block_accumulator_doesnt_purge_with_delayed_block_execution() { // block can be delayed. Since we would purge an acceptor if the purge interval has passed, // we want to simulate a situation in which the purge interval was exceeded in order to test // the special case that if an acceptor that had sufficient finality, it is not purged. - tokio::time::sleep( + time::sleep( Duration::from(runner.reactor().block_accumulator.purge_interval) + Duration::from_secs(1), ) .await; diff --git a/node/src/components/block_synchronizer/execution_results_acquisition.rs b/node/src/components/block_synchronizer/execution_results_acquisition.rs index 02d6cd08e2..833575ed41 100644 --- a/node/src/components/block_synchronizer/execution_results_acquisition.rs +++ b/node/src/components/block_synchronizer/execution_results_acquisition.rs @@ -87,7 +87,7 @@ pub(crate) enum Error { } impl Display for Error { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { Error::BlockHashMismatch { expected, actual } => { write!( diff --git a/node/src/components/block_synchronizer/global_state_synchronizer.rs b/node/src/components/block_synchronizer/global_state_synchronizer.rs index d3ac30061d..1e38057106 100644 --- a/node/src/components/block_synchronizer/global_state_synchronizer.rs +++ b/node/src/components/block_synchronizer/global_state_synchronizer.rs @@ -118,7 +118,7 @@ pub(crate) enum Event { result: PutTrieResult, }, #[from] - TrieAccumulatorEvent(TrieAccumulatorEvent), + TrieAccumulator(TrieAccumulatorEvent), } #[derive(Debug, DataSize)] @@ -651,8 +651,8 @@ where raw: trie_raw, result: put_trie_result, } => self.handle_put_trie_result(trie_raw.hash(), put_trie_result, effect_builder), - Event::TrieAccumulatorEvent(event) => reactor::wrap_effects( - Event::TrieAccumulatorEvent, + Event::TrieAccumulator(event) => reactor::wrap_effects( + Event::TrieAccumulator, self.trie_accumulator .handle_event(effect_builder, rng, event), ), diff --git a/node/src/components/block_synchronizer/signature_acquisition.rs b/node/src/components/block_synchronizer/signature_acquisition.rs index 371c7aa1f1..1be8ad9fab 100644 --- a/node/src/components/block_synchronizer/signature_acquisition.rs +++ b/node/src/components/block_synchronizer/signature_acquisition.rs @@ -205,7 +205,7 @@ mod tests { ); // Signature for the validator #0 weighting 1: - let (public_0, secret_0) = validators.get(0).unwrap(); + let (public_0, secret_0) = validators.first().unwrap(); let finality_signature = FinalitySignatureV2::create( block_hash, block_height, @@ -427,7 +427,7 @@ mod tests { ); // Set the validator #0 weighting 1 as pending: - let (public_0, secret_0) = validators.get(0).unwrap(); + let (public_0, secret_0) = validators.first().unwrap(); signature_acquisition.register_pending(public_0.clone()); assert_iter_equal!(signature_acquisition.have_signatures(), []); assert_iter_equal!(signature_acquisition.not_vacant(), [public_0]); diff --git a/node/src/components/block_synchronizer/tests.rs b/node/src/components/block_synchronizer/tests.rs index 83d619ffa1..6e97fb9254 100644 --- a/node/src/components/block_synchronizer/tests.rs +++ b/node/src/components/block_synchronizer/tests.rs @@ -42,21 +42,21 @@ const STRICT_FINALITY_REQUIRED_VERSION: ProtocolVersion = ProtocolVersion::from_ /// Event for the mock reactor. #[derive(Debug, From)] enum MockReactorEvent { - MarkBlockCompletedRequest(MarkBlockCompletedRequest), + MarkBlockCompletedRequest(#[allow(dead_code)] MarkBlockCompletedRequest), BlockFetcherRequest(FetcherRequest), BlockHeaderFetcherRequest(FetcherRequest), LegacyDeployFetcherRequest(FetcherRequest), TransactionFetcherRequest(FetcherRequest), FinalitySignatureFetcherRequest(FetcherRequest), - TrieOrChunkFetcherRequest(FetcherRequest), + TrieOrChunkFetcherRequest(#[allow(dead_code)] FetcherRequest), BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest), - SyncLeapFetcherRequest(FetcherRequest), + SyncLeapFetcherRequest(#[allow(dead_code)] FetcherRequest), ApprovalsHashesFetcherRequest(FetcherRequest), NetworkInfoRequest(NetworkInfoRequest), BlockAccumulatorRequest(BlockAccumulatorRequest), - PeerBehaviorAnnouncement(PeerBehaviorAnnouncement), + PeerBehaviorAnnouncement(#[allow(dead_code)] PeerBehaviorAnnouncement), StorageRequest(StorageRequest), - TrieAccumulatorRequest(TrieAccumulatorRequest), + TrieAccumulatorRequest(#[allow(dead_code)] TrieAccumulatorRequest), ContractRuntimeRequest(ContractRuntimeRequest), SyncGlobalStateRequest(SyncGlobalStateRequest), MakeBlockExecutableRequest(MakeBlockExecutableRequest), @@ -275,7 +275,7 @@ impl BlockSynchronizer { Arc::new(Chainspec::random(rng)), MAX_SIMULTANEOUS_PEERS, validator_matrix, - &prometheus::Registry::new(), + &Registry::new(), ) .expect("Failed to create BlockSynchronizer"); @@ -2445,7 +2445,7 @@ async fn historical_sync_skips_exec_results_and_deploys_if_block_empty() { Event::GlobalStateSynced { block_hash: *block.hash(), result: Ok(GlobalStateSynchronizerResponse::new( - super::global_state_synchronizer::RootHash::new(*block.state_root_hash()), + global_state_synchronizer::RootHash::new(*block.state_root_hash()), vec![], )), }, @@ -2552,7 +2552,7 @@ async fn historical_sync_no_legacy_block() { Event::GlobalStateSynced { block_hash: *block.hash(), result: Ok(GlobalStateSynchronizerResponse::new( - super::global_state_synchronizer::RootHash::new(*block.state_root_hash()), + global_state_synchronizer::RootHash::new(*block.state_root_hash()), vec![], )), }, @@ -2780,7 +2780,7 @@ async fn historical_sync_legacy_block_strict_finality() { Event::GlobalStateSynced { block_hash: *block.hash(), result: Ok(GlobalStateSynchronizerResponse::new( - super::global_state_synchronizer::RootHash::new(*block.state_root_hash()), + global_state_synchronizer::RootHash::new(*block.state_root_hash()), vec![], )), }, @@ -2982,7 +2982,7 @@ async fn historical_sync_legacy_block_weak_finality() { Event::GlobalStateSynced { block_hash: *block.hash(), result: Ok(GlobalStateSynchronizerResponse::new( - super::global_state_synchronizer::RootHash::new(*block.state_root_hash()), + global_state_synchronizer::RootHash::new(*block.state_root_hash()), vec![], )), }, @@ -3195,7 +3195,7 @@ async fn historical_sync_legacy_block_any_finality() { Event::GlobalStateSynced { block_hash: *block.hash(), result: Ok(GlobalStateSynchronizerResponse::new( - super::global_state_synchronizer::RootHash::new(*block.state_root_hash()), + global_state_synchronizer::RootHash::new(*block.state_root_hash()), vec![], )), }, @@ -3865,7 +3865,7 @@ async fn historical_sync_latch_should_not_decrement_for_old_deploy_fetch_respons Event::GlobalStateSynced { block_hash: *block.hash(), result: Ok(GlobalStateSynchronizerResponse::new( - super::global_state_synchronizer::RootHash::new(*block.state_root_hash()), + global_state_synchronizer::RootHash::new(*block.state_root_hash()), vec![], )), }, @@ -4136,7 +4136,7 @@ async fn historical_sync_latch_should_not_decrement_for_old_execution_results() Event::GlobalStateSynced { block_hash: *block.hash(), result: Ok(GlobalStateSynchronizerResponse::new( - super::global_state_synchronizer::RootHash::new(*block.state_root_hash()), + global_state_synchronizer::RootHash::new(*block.state_root_hash()), vec![], )), }, diff --git a/node/src/components/block_synchronizer/trie_accumulator/tests.rs b/node/src/components/block_synchronizer/trie_accumulator/tests.rs index 05d13d80ef..e957acd156 100644 --- a/node/src/components/block_synchronizer/trie_accumulator/tests.rs +++ b/node/src/components/block_synchronizer/trie_accumulator/tests.rs @@ -12,7 +12,7 @@ use futures::channel::oneshot; #[derive(Debug)] enum ReactorEvent { FetcherRequest(FetcherRequest), - PeerBehaviorAnnouncement(PeerBehaviorAnnouncement), + PeerBehaviorAnnouncement(#[allow(dead_code)] PeerBehaviorAnnouncement), } impl From for ReactorEvent { diff --git a/node/src/components/block_validator/tests.rs b/node/src/components/block_validator/tests.rs index 5f580f5343..663eafb81e 100644 --- a/node/src/components/block_validator/tests.rs +++ b/node/src/components/block_validator/tests.rs @@ -38,7 +38,7 @@ enum ReactorEvent { #[from] Storage(StorageRequest), #[from] - FatalAnnouncement(FatalAnnouncement), + FatalAnnouncement(#[allow(dead_code)] FatalAnnouncement), } impl From for ReactorEvent { @@ -148,34 +148,13 @@ pub(super) fn new_proposed_block_with_cited_signatures( let block_context = BlockContext::new(timestamp, vec![]); let transactions = { let mut ret = BTreeMap::new(); - ret.insert( - MINT_LANE_ID, - transfer - .into_iter() - .map(|(txn_hash, approvals)| (txn_hash, approvals)) - .collect(), - ); - ret.insert( - AUCTION_LANE_ID, - staking - .into_iter() - .map(|(txn_hash, approvals)| (txn_hash, approvals)) - .collect(), - ); + ret.insert(MINT_LANE_ID, transfer.into_iter().collect()); + ret.insert(AUCTION_LANE_ID, staking.into_iter().collect()); ret.insert( INSTALL_UPGRADE_LANE_ID, - install_upgrade - .into_iter() - .map(|(txn_hash, approvals)| (txn_hash, approvals)) - .collect(), - ); - ret.insert( - LARGE_LANE_ID, - standard - .into_iter() - .map(|(txn_hash, approvals)| (txn_hash, approvals)) - .collect(), + install_upgrade.into_iter().collect(), ); + ret.insert(LARGE_LANE_ID, standard.into_iter().collect()); ret }; let block_payload = BlockPayload::new(transactions, vec![], cited_signatures, true); diff --git a/node/src/components/consensus.rs b/node/src/components/consensus.rs index c553cf9d52..0062386ed6 100644 --- a/node/src/components/consensus.rs +++ b/node/src/components/consensus.rs @@ -183,7 +183,7 @@ pub(crate) enum Event { } impl Debug for ConsensusMessage { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { ConsensusMessage::Protocol { era_id, payload: _ } => { write!(f, "Protocol {{ era_id: {:?}, .. }}", era_id) @@ -218,7 +218,7 @@ impl Display for ConsensusMessage { } impl Debug for ConsensusRequestMessage { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!( f, "ConsensusRequestMessage {{ era_id: {:?}, .. }}", diff --git a/node/src/components/consensus/era_supervisor.rs b/node/src/components/consensus/era_supervisor.rs index 4b4efc6dd1..29b3febac3 100644 --- a/node/src/components/consensus/era_supervisor.rs +++ b/node/src/components/consensus/era_supervisor.rs @@ -142,7 +142,7 @@ impl EraSupervisor { registry: &Registry, ) -> Result { let unit_files_folder = storage_dir.join("unit_files"); - std::fs::create_dir_all(&unit_files_folder)?; + fs::create_dir_all(&unit_files_folder)?; info!(our_id = %validator_matrix.public_signing_key(), "EraSupervisor pubkey",); let metrics = Metrics::new(registry)?; diff --git a/node/src/components/consensus/highway_core/highway_testing.rs b/node/src/components/consensus/highway_core/highway_testing.rs index 97a96bf494..d41b533199 100644 --- a/node/src/components/consensus/highway_core/highway_testing.rs +++ b/node/src/components/consensus/highway_core/highway_testing.rs @@ -71,7 +71,7 @@ enum HighwayMessage { } impl Debug for HighwayMessage { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { HighwayMessage::Timer(t) => f.debug_tuple("Timer").field(&t.millis()).finish(), HighwayMessage::RequestBlock(bc) => f @@ -153,7 +153,7 @@ pub(crate) enum TestRunError { } impl Display for TestRunError { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { TestRunError::NoMessages => write!( f, @@ -1024,7 +1024,7 @@ impl Debug for HashWrapper { } impl Display for HashWrapper { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { Debug::fmt(self, f) } } @@ -1139,8 +1139,8 @@ mod test_harness { ( v.finalized_values().cloned().collect::>(), v.messages_produced() + .filter(|&hwm| hwm.is_new_unit()) .cloned() - .filter(|hwm| hwm.is_new_unit()) .count(), ) }) diff --git a/node/src/components/consensus/protocols/highway/tests.rs b/node/src/components/consensus/protocols/highway/tests.rs index 3ff64c52ff..2f3b17c009 100644 --- a/node/src/components/consensus/protocols/highway/tests.rs +++ b/node/src/components/consensus/protocols/highway/tests.rs @@ -48,7 +48,7 @@ where highway_testing::TEST_ENDORSEMENT_EVIDENCE_LIMIT, ); let weights = weights.into_iter().map(|w| w.into()).collect::>(); - state::State::new(weights, params, vec![], vec![]) + State::new(weights, params, vec![], vec![]) } const INSTANCE_ID_DATA: &[u8; 1] = &[123u8; 1]; diff --git a/node/src/components/consensus/protocols/zug/des_testing.rs b/node/src/components/consensus/protocols/zug/des_testing.rs index e21938820e..4726c05ad9 100644 --- a/node/src/components/consensus/protocols/zug/des_testing.rs +++ b/node/src/components/consensus/protocols/zug/des_testing.rs @@ -166,7 +166,7 @@ pub(crate) enum TestRunError { } impl Display for TestRunError { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { TestRunError::NoMessages => write!( f, @@ -999,7 +999,7 @@ impl Debug for HashWrapper { } impl Display for HashWrapper { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { Debug::fmt(self, f) } } @@ -1112,8 +1112,8 @@ mod test_harness { ( v.finalized_values().cloned().collect::>(), v.messages_produced() + .filter(|&zm| zm.is_signed_gossip_message() || zm.is_proposal()) .cloned() - .filter(|zm| zm.is_signed_gossip_message() || zm.is_proposal()) .count(), ) }) diff --git a/node/src/components/consensus/protocols/zug/wal.rs b/node/src/components/consensus/protocols/zug/wal.rs index f529d4bc98..70fa606eeb 100644 --- a/node/src/components/consensus/protocols/zug/wal.rs +++ b/node/src/components/consensus/protocols/zug/wal.rs @@ -115,6 +115,7 @@ impl ReadWal { pub(crate) fn new(wal_path: &PathBuf) -> Result { let file = OpenOptions::new() .create(true) + .truncate(false) .read(true) .write(true) .open(wal_path) diff --git a/node/src/components/contract_runtime/exec_queue.rs b/node/src/components/contract_runtime/exec_queue.rs index 95efd19f31..e843b59ec1 100644 --- a/node/src/components/contract_runtime/exec_queue.rs +++ b/node/src/components/contract_runtime/exec_queue.rs @@ -44,7 +44,7 @@ impl ExecQueue { *locked_queue = locked_queue.split_off(&height); - core::convert::TryInto::try_into(locked_queue.len()).unwrap_or(i64::MIN) + TryInto::try_into(locked_queue.len()).unwrap_or(i64::MIN) } } diff --git a/node/src/components/contract_runtime/utils.rs b/node/src/components/contract_runtime/utils.rs index 53797185a4..cfc35eaa47 100644 --- a/node/src/components/contract_runtime/utils.rs +++ b/node/src/components/contract_runtime/utils.rs @@ -246,12 +246,7 @@ pub(super) async fn exec_or_requeue( None }; - let BlockAndExecutionArtifacts { - block, - approvals_hashes, - execution_artifacts, - step_outcome: maybe_step_outcome, - } = match run_intensive_task(move || { + let task = move || { debug!("ContractRuntime: execute_finalized_block"); execute_finalized_block( data_access_layer.as_ref(), @@ -265,9 +260,13 @@ pub(super) async fn exec_or_requeue( maybe_next_era_gas_price, last_switch_block_hash, ) - }) - .await - { + }; + let BlockAndExecutionArtifacts { + block, + approvals_hashes, + execution_artifacts, + step_outcome: maybe_step_outcome, + } = match run_intensive_task(task).await { Ok(ret) => ret, Err(error) => { error!(%error, "failed to execute block"); diff --git a/node/src/components/diagnostics_port/tasks.rs b/node/src/components/diagnostics_port/tasks.rs index 0e3c34461b..ee6136c482 100644 --- a/node/src/components/diagnostics_port/tasks.rs +++ b/node/src/components/diagnostics_port/tasks.rs @@ -395,7 +395,7 @@ impl Session { let tempdir = tempfile::tempdir().map_err(ObtainDumpError::CreateTempDir)?; let tempfile_path = tempdir.path().join("queue-dump"); - let tempfile = fs::File::create(&tempfile_path).map_err(ObtainDumpError::CreateTempFile)?; + let tempfile = File::create(&tempfile_path).map_err(ObtainDumpError::CreateTempFile)?; effect_builder .diagnostics_port_dump_queue(self.create_queue_dump_format(tempfile)) @@ -403,7 +403,7 @@ impl Session { // We can now reopen the file and return it. let reopened_tempfile = - fs::File::open(tempfile_path).map_err(ObtainDumpError::ReopenTempFile)?; + File::open(tempfile_path).map_err(ObtainDumpError::ReopenTempFile)?; Ok(reopened_tempfile) } diff --git a/node/src/components/gossiper/gossip_table.rs b/node/src/components/gossiper/gossip_table.rs index 1e46b01770..7b09cba5dc 100644 --- a/node/src/components/gossiper/gossip_table.rs +++ b/node/src/components/gossiper/gossip_table.rs @@ -673,14 +673,14 @@ mod tests { // Check same complete data from other source causes `Noop` to be returned since we still // have all gossip requests in flight. Check it updates holders. - gossip_table.register_infection_attempt(&data_id, std::iter::once(&node_ids[0])); + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[0])); let action = gossip_table.new_complete_data(&data_id, Some(node_ids[0]), GossipTarget::All); assert_eq!(GossipAction::Noop, action); check_holders(&node_ids[..1], &gossip_table, &data_id); // Check receiving a gossip response, causes `ShouldGossip` to be returned and holders // updated. - gossip_table.register_infection_attempt(&data_id, std::iter::once(&node_ids[1])); + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[1])); let action = gossip_table.already_infected(&data_id, node_ids[1]); let expected = GossipAction::ShouldGossip(ShouldGossip { count: 1, @@ -691,7 +691,7 @@ mod tests { assert_eq!(expected, action); check_holders(&node_ids[..2], &gossip_table, &data_id); - gossip_table.register_infection_attempt(&data_id, std::iter::once(&node_ids[2])); + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[2])); let action = gossip_table.new_complete_data(&data_id, Some(node_ids[2]), GossipTarget::All); assert_eq!(GossipAction::Noop, action); check_holders(&node_ids[..3], &gossip_table, &data_id); @@ -700,7 +700,7 @@ mod tests { // causes `Noop` to be returned and holders cleared. let limit = 3 + EXPECTED_DEFAULT_INFECTION_TARGET; for node_id in &node_ids[3..limit] { - gossip_table.register_infection_attempt(&data_id, std::iter::once(node_id)); + gossip_table.register_infection_attempt(&data_id, iter::once(node_id)); let _ = gossip_table.we_infected(&data_id, *node_id); } let action = gossip_table.new_complete_data(&data_id, None, GossipTarget::All); @@ -739,7 +739,7 @@ mod tests { let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All); let limit = EXPECTED_DEFAULT_INFECTION_TARGET - 1; for node_id in node_ids.iter().take(limit) { - gossip_table.register_infection_attempt(&data_id, std::iter::once(node_id)); + gossip_table.register_infection_attempt(&data_id, iter::once(node_id)); let action = gossip_table.we_infected(&data_id, *node_id); assert_eq!(GossipAction::Noop, action); assert!(!gossip_table.finished.contains(&data_id)); @@ -747,7 +747,7 @@ mod tests { // Check recording an infection from an already-recorded infectee doesn't cause us to stop // gossiping. - gossip_table.register_infection_attempt(&data_id, std::iter::once(&node_ids[limit - 1])); + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[limit - 1])); let action = gossip_table.we_infected(&data_id, node_ids[limit - 1]); let expected = GossipAction::ShouldGossip(ShouldGossip { count: 1, @@ -759,7 +759,7 @@ mod tests { assert!(!gossip_table.finished.contains(&data_id)); // Check third new infection does cause us to stop gossiping. - gossip_table.register_infection_attempt(&data_id, std::iter::once(&node_ids[limit])); + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[limit])); let action = gossip_table.we_infected(&data_id, node_ids[limit]); assert_eq!(GossipAction::AnnounceFinished, action); assert!(gossip_table.finished.contains(&data_id)); @@ -818,7 +818,7 @@ mod tests { let limit = EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT - 1; for node_id in node_ids.iter().take(limit) { let _ = gossip_table.new_complete_data(&data_id, Some(*node_id), GossipTarget::All); - gossip_table.register_infection_attempt(&data_id, std::iter::once(node_id)); + gossip_table.register_infection_attempt(&data_id, iter::once(node_id)); assert!(!gossip_table.finished.contains(&data_id)); } @@ -826,7 +826,7 @@ mod tests { // `finished` collection. gossip_table.register_infection_attempt( &data_id, - std::iter::once(&node_ids[EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT]), + iter::once(&node_ids[EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT]), ); let action = gossip_table.check_timeout( &data_id, @@ -875,7 +875,7 @@ mod tests { let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All); let limit = EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT - 1; for (index, node_id) in node_ids.iter().enumerate().take(limit) { - gossip_table.register_infection_attempt(&data_id, std::iter::once(node_id)); + gossip_table.register_infection_attempt(&data_id, iter::once(node_id)); let action = gossip_table.already_infected(&data_id, *node_id); let expected = GossipAction::ShouldGossip(ShouldGossip { count: 1, @@ -888,7 +888,7 @@ mod tests { // Check recording a non-infection from an already-recorded holder doesn't cause us to stop // gossiping. - gossip_table.register_infection_attempt(&data_id, std::iter::once(&node_ids[0])); + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[0])); let action = gossip_table.already_infected(&data_id, node_ids[0]); let expected = GossipAction::ShouldGossip(ShouldGossip { count: 1, @@ -899,7 +899,7 @@ mod tests { assert_eq!(expected, action); // Check 15th non-infection does cause us to stop gossiping. - gossip_table.register_infection_attempt(&data_id, std::iter::once(&node_ids[limit])); + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[limit])); let action = gossip_table.we_infected(&data_id, node_ids[limit]); assert_eq!(GossipAction::AnnounceFinished, action); } @@ -917,19 +917,19 @@ mod tests { let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All); let infection_limit = EXPECTED_DEFAULT_INFECTION_TARGET - 1; for node_id in &node_ids[0..infection_limit] { - gossip_table.register_infection_attempt(&data_id, std::iter::once(node_id)); + gossip_table.register_infection_attempt(&data_id, iter::once(node_id)); let _ = gossip_table.we_infected(&data_id, *node_id); } let attempted_to_infect = EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT - 2; for node_id in &node_ids[infection_limit..attempted_to_infect] { - gossip_table.register_infection_attempt(&data_id, std::iter::once(node_id)); + gossip_table.register_infection_attempt(&data_id, iter::once(node_id)); let _ = gossip_table.already_infected(&data_id, *node_id); } // Check adding 12th non-infection doesn't cause us to stop gossiping. gossip_table - .register_infection_attempt(&data_id, std::iter::once(&node_ids[attempted_to_infect])); + .register_infection_attempt(&data_id, iter::once(&node_ids[attempted_to_infect])); let action = gossip_table.already_infected(&data_id, node_ids[attempted_to_infect]); let expected = GossipAction::ShouldGossip(ShouldGossip { count: 1, @@ -958,11 +958,11 @@ mod tests { // check_timeout for node 0 should return Noop, and for node 1 it should represent a timed // out response and return ShouldGossip. - gossip_table.register_infection_attempt(&data_id, std::iter::once(&node_ids[0])); + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[0])); let action = gossip_table.check_timeout(&data_id, node_ids[0]); assert_eq!(GossipAction::Noop, action); - gossip_table.register_infection_attempt(&data_id, std::iter::once(&node_ids[1])); + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[1])); let action = gossip_table.check_timeout(&data_id, node_ids[1]); let expected = GossipAction::ShouldGossip(ShouldGossip { count: 1, diff --git a/node/src/components/storage/object_pool.rs b/node/src/components/storage/object_pool.rs index 46f70cab39..03d09ca10f 100644 --- a/node/src/components/storage/object_pool.rs +++ b/node/src/components/storage/object_pool.rs @@ -101,10 +101,10 @@ where } /// Retrieves an object from the pool, if present. - pub(super) fn get(&self, id: &Q) -> Option> + pub(super) fn get(&self, id: &Q) -> Option> where I: Borrow, - Q: Hash + Eq, + Q: Hash + Eq + ?Sized, { self.items.get(id).and_then(Weak::upgrade) } diff --git a/node/src/components/storage/tests.rs b/node/src/components/storage/tests.rs index ed04c1f8fd..962ea3979c 100644 --- a/node/src/components/storage/tests.rs +++ b/node/src/components/storage/tests.rs @@ -2278,7 +2278,7 @@ fn check_force_resync_with_marker_file() { ); drop(storage); // Remove the marker file. - std::fs::remove_file(&force_resync_file_path).unwrap(); + fs::remove_file(&force_resync_file_path).unwrap(); assert!(!force_resync_file_path.exists()); // Reinitialize storage with force resync enabled. diff --git a/node/src/components/sync_leaper/leap_activity.rs b/node/src/components/sync_leaper/leap_activity.rs index 5c04b51d26..988674ddbf 100644 --- a/node/src/components/sync_leaper/leap_activity.rs +++ b/node/src/components/sync_leaper/leap_activity.rs @@ -216,7 +216,7 @@ mod tests { let (actual_sync_leap, actual_peers) = leap_activity.best_response().unwrap(); assert!(!actual_peers.is_empty()); - assert_eq!(actual_peers.get(0).unwrap(), &peer_1.0); + assert_eq!(actual_peers.first().unwrap(), &peer_1.0); assert_eq!(actual_sync_leap, sync_leap); // Adding peers in other states does not change the result. @@ -232,7 +232,7 @@ mod tests { let (actual_sync_leap, actual_peers) = leap_activity.best_response().unwrap(); assert_eq!(actual_peers.len(), 1); - assert_eq!(actual_peers.get(0).unwrap(), &peer_1.0); + assert_eq!(actual_peers.first().unwrap(), &peer_1.0); assert_eq!(actual_sync_leap, sync_leap); } @@ -276,7 +276,7 @@ mod tests { // Expect only a single peer with the best sync leap. assert_eq!(actual_peers.len(), 1); - assert_eq!(actual_peers.get(0).unwrap(), &peer_1_best_node_id); + assert_eq!(actual_peers.first().unwrap(), &peer_1_best_node_id); assert_eq!(actual_sync_leap, best_sync_leap); // Add two more peers with even better response. diff --git a/node/src/components/transaction_acceptor/tests.rs b/node/src/components/transaction_acceptor/tests.rs index 0d2081f16a..c588c9cdaa 100644 --- a/node/src/components/transaction_acceptor/tests.rs +++ b/node/src/components/transaction_acceptor/tests.rs @@ -212,6 +212,8 @@ enum TestScenario { DeployWithoutTransferAmount, BalanceCheckForDeploySentByPeer, InvalidPricingModeForTransactionV1, + TooLowGasPriceToleranceForTransactionV1, + TooLowGasPriceToleranceForDeploy, } impl TestScenario { @@ -252,7 +254,9 @@ impl TestScenario { | TestScenario::FromClientSignedByAdmin(_) | TestScenario::DeployWithEmptySessionModuleBytes | TestScenario::DeployWithNativeTransferInPayment - | TestScenario::InvalidPricingModeForTransactionV1 => Source::Client, + | TestScenario::InvalidPricingModeForTransactionV1 + | TestScenario::TooLowGasPriceToleranceForTransactionV1 + | TestScenario::TooLowGasPriceToleranceForDeploy => Source::Client, } } @@ -566,6 +570,24 @@ impl TestScenario { .expect("must create classic mode transaction"); Transaction::from(classic_mode_transaction) } + TestScenario::TooLowGasPriceToleranceForTransactionV1 => { + const TOO_LOW_GAS_PRICE_TOLERANCE: u8 = 0; + + let fixed_mode_transaction = TransactionV1Builder::new_random(rng) + .with_pricing_mode(PricingMode::Fixed { + gas_price_tolerance: TOO_LOW_GAS_PRICE_TOLERANCE, + }) + .with_chain_name("casper-example") + .build() + .expect("must create fixed mode transaction"); + Transaction::from(fixed_mode_transaction) + } + TestScenario::TooLowGasPriceToleranceForDeploy => { + const TOO_LOW_GAS_PRICE_TOLERANCE: u64 = 0; + + let deploy = Deploy::random_with_gas_price(rng, TOO_LOW_GAS_PRICE_TOLERANCE); + Transaction::from(deploy) + } } } @@ -620,6 +642,8 @@ impl TestScenario { } } TestScenario::InvalidPricingModeForTransactionV1 => false, + TestScenario::TooLowGasPriceToleranceForTransactionV1 => false, + TestScenario::TooLowGasPriceToleranceForDeploy => false, } } @@ -1156,7 +1180,9 @@ async fn run_transaction_acceptor_without_timeout( | TestScenario::DeployWithoutTransferTarget | TestScenario::DeployWithoutTransferAmount | TestScenario::InvalidPricingModeForTransactionV1 - | TestScenario::FromClientExpired(_) => { + | TestScenario::FromClientExpired(_) + | TestScenario::TooLowGasPriceToleranceForTransactionV1 + | TestScenario::TooLowGasPriceToleranceForDeploy => { matches!( event, Event::TransactionAcceptorAnnouncement( @@ -2380,3 +2406,27 @@ async fn should_reject_transaction_v1_with_invalid_pricing_mode() { ))) )) } + +#[tokio::test] +async fn should_reject_transaction_v1_with_too_low_gas_price_tolerance() { + let test_scenario = TestScenario::TooLowGasPriceToleranceForTransactionV1; + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::GasPriceToleranceTooLow { .. } + ))) + )) +} + +#[tokio::test] +async fn should_reject_deploy_with_too_low_gas_price_tolerance() { + let test_scenario = TestScenario::TooLowGasPriceToleranceForDeploy; + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction( + InvalidTransaction::Deploy(InvalidDeploy::GasPriceToleranceTooLow { .. }) + )) + )) +} diff --git a/node/src/components/transaction_buffer/tests.rs b/node/src/components/transaction_buffer/tests.rs index ac05429a91..4119141304 100644 --- a/node/src/components/transaction_buffer/tests.rs +++ b/node/src/components/transaction_buffer/tests.rs @@ -368,7 +368,7 @@ fn get_appendable_block_when_transfers_are_of_one_category() { get_appendable_block( &mut rng, &mut transaction_buffer, - std::iter::repeat_with(|| MINT_LANE_ID), + iter::repeat_with(|| MINT_LANE_ID), transaction_config .transaction_v1_config .get_max_transaction_count(MINT_LANE_ID) as usize @@ -439,7 +439,7 @@ fn get_appendable_block_when_standards_are_of_one_category() { get_appendable_block( &mut rng, &mut transaction_buffer, - std::iter::repeat_with(|| large_lane_id), + iter::repeat_with(|| large_lane_id), transaction_config .transaction_v1_config .get_max_transaction_count(large_lane_id) as usize @@ -828,12 +828,12 @@ fn register_transactions_and_blocks() { // try to register held transactions again. let mut held_transactions = valid_transactions .iter() - .cloned() - .filter(|transaction| { + .filter(|&transaction| { appendable_block .transaction_hashes() .contains(&transaction.hash()) }) + .cloned() .peekable(); assert!(held_transactions.peek().is_some()); held_transactions.for_each(|transaction| transaction_buffer.register_transaction(transaction)); @@ -847,7 +847,7 @@ fn register_transactions_and_blocks() { // test if transactions held for proposed blocks which did not get finalized in time // are eligible again let count = rng.gen_range(1..11); - let txns: Vec<_> = std::iter::repeat_with(|| Transaction::Deploy(Deploy::random(&mut rng))) + let txns: Vec<_> = iter::repeat_with(|| Transaction::Deploy(Deploy::random(&mut rng))) .take(count) .collect(); let block = FinalizedBlock::random_with_specifics( @@ -871,7 +871,7 @@ fn register_transactions_and_blocks() { #[derive(Debug)] enum ReactorEvent { TransactionBufferAnnouncement(TransactionBufferAnnouncement), - Event(Event), + Event(#[allow(dead_code)] Event), } impl From for ReactorEvent { @@ -1401,7 +1401,7 @@ fn register_random_deploys_unique_hashes( num_deploys: usize, rng: &mut TestRng, ) { - let deploys = std::iter::repeat_with(|| { + let deploys = iter::repeat_with(|| { let name = format!("{}", rng.gen::()); let call = format!("{}", rng.gen::()); Deploy::random_contract_by_name( @@ -1424,7 +1424,7 @@ fn register_random_deploys_same_hash( num_deploys: usize, rng: &mut TestRng, ) { - let deploys = std::iter::repeat_with(|| { + let deploys = iter::repeat_with(|| { let name = "test".to_owned(); let call = "test".to_owned(); Deploy::random_contract_by_name( diff --git a/node/src/components/upgrade_watcher.rs b/node/src/components/upgrade_watcher.rs index aaeddd05ea..0169f1d591 100644 --- a/node/src/components/upgrade_watcher.rs +++ b/node/src/components/upgrade_watcher.rs @@ -329,7 +329,7 @@ struct UpgradePoint { impl UpgradePoint { /// Parses a chainspec file at the given path as an `UpgradePoint`. - fn from_chainspec_path + std::fmt::Debug>(path: P) -> Result { + fn from_chainspec_path + fmt::Debug>(path: P) -> Result { let bytes = file_utils::read_file(path.as_ref().join(CHAINSPEC_FILENAME)) .map_err(Error::LoadUpgradePoint)?; Ok(toml::from_str(std::str::from_utf8(&bytes).unwrap())?) @@ -349,8 +349,7 @@ fn next_installed_version( dir: &Path, current_version: ProtocolVersion, ) -> Result { - let max_version = - ProtocolVersion::from_parts(u32::max_value(), u32::max_value(), u32::max_value()); + let max_version = ProtocolVersion::from_parts(u32::MAX, u32::MAX, u32::MAX); let mut next_version = max_version; let mut read_version = false; diff --git a/node/src/lib.rs b/node/src/lib.rs index a2922d1913..d28c365039 100644 --- a/node/src/lib.rs +++ b/node/src/lib.rs @@ -145,10 +145,7 @@ mod tests { let (version, sha) = prefix.split_once('-').unwrap_or((prefix, "")); assert_eq!(version, env!("CARGO_PKG_VERSION")); - assert_eq!( - sha, - std::env::var("NODE_GIT_SHA").unwrap_or_default().as_str() - ); + assert_eq!(sha, env::var("NODE_GIT_SHA").unwrap_or_default().as_str()); if env!("NODE_BUILD_PROFILE") == "release" { assert_eq!(profile, ""); } else { @@ -163,10 +160,7 @@ mod tests { let (version, sha) = prefix.split_once('-').unwrap_or((prefix, "")); assert_eq!(version, env!("CARGO_PKG_VERSION")); - assert_eq!( - sha, - std::env::var("NODE_GIT_SHA").unwrap_or_default().as_str() - ); + assert_eq!(sha, env::var("NODE_GIT_SHA").unwrap_or_default().as_str()); if env!("NODE_BUILD_PROFILE") == "release" { assert_eq!(profile, ""); } else { diff --git a/node/src/logging.rs b/node/src/logging.rs index b73f4300c9..40162c5f4e 100644 --- a/node/src/logging.rs +++ b/node/src/logging.rs @@ -311,12 +311,12 @@ pub fn display_global_env_filter() -> anyhow::Result { } /// Type alias for the formatting function used. -pub type FormatDebugFn = fn(&mut Writer, &Field, &dyn std::fmt::Debug) -> fmt::Result; +pub type FormatDebugFn = fn(&mut Writer, &Field, &dyn fmt::Debug) -> fmt::Result; fn format_into_debug_writer( writer: &mut Writer, field: &Field, - value: &dyn std::fmt::Debug, + value: &dyn fmt::Debug, ) -> fmt::Result { match field.name() { LOG_FIELD_MESSAGE => write!(writer, "{:?}", value), @@ -346,7 +346,7 @@ pub fn init_with_config(config: &LoggingConfig) -> anyhow::Result<()> { // Setup a new tracing-subscriber writing to `stdout` for logging. LoggingFormat::Text => { let builder = tracing_subscriber::fmt() - .with_writer(io::stdout as fn() -> std::io::Stdout) + .with_writer(io::stdout as fn() -> io::Stdout) .with_env_filter(filter) .fmt_fields(formatter) .event_format(FmtEvent::new(config.color, config.abbreviate_modules)) @@ -360,7 +360,7 @@ pub fn init_with_config(config: &LoggingConfig) -> anyhow::Result<()> { // JSON logging writes to `stdout` as well but uses the JSON format. LoggingFormat::Json => { let builder = tracing_subscriber::fmt() - .with_writer(io::stdout as fn() -> std::io::Stdout) + .with_writer(io::stdout as fn() -> io::Stdout) .with_env_filter(filter) .json() .with_filter_reloading(); diff --git a/node/src/reactor/main_reactor/catch_up.rs b/node/src/reactor/main_reactor/catch_up.rs index 0cae092353..f98134a566 100644 --- a/node/src/reactor/main_reactor/catch_up.rs +++ b/node/src/reactor/main_reactor/catch_up.rs @@ -117,7 +117,7 @@ impl MainReactor { match self .storage .read_highest_switch_block_headers(1) - .map(|headers| headers.get(0).cloned()) + .map(|headers| headers.first().cloned()) { Ok(Some(_)) => { // no trusted hash, no local block, no error, must be waiting for genesis diff --git a/node/src/reactor/main_reactor/event.rs b/node/src/reactor/main_reactor/event.rs index d14584ab4f..d67497c5b2 100644 --- a/node/src/reactor/main_reactor/event.rs +++ b/node/src/reactor/main_reactor/event.rs @@ -562,7 +562,7 @@ impl From for MainEvent { impl From for MainEvent { fn from(request: TrieAccumulatorRequest) -> Self { MainEvent::BlockSynchronizer(block_synchronizer::Event::GlobalStateSynchronizer( - block_synchronizer::GlobalStateSynchronizerEvent::TrieAccumulatorEvent(request.into()), + GlobalStateSynchronizerEvent::TrieAccumulator(request.into()), )) } } diff --git a/node/src/reactor/main_reactor/tests/transactions.rs b/node/src/reactor/main_reactor/tests/transactions.rs index bddad6eeef..09baf1f6ac 100644 --- a/node/src/reactor/main_reactor/tests/transactions.rs +++ b/node/src/reactor/main_reactor/tests/transactions.rs @@ -8,7 +8,7 @@ use casper_types::{ addressable_entity::NamedKeyAddr, runtime_args, system::mint::{ARG_AMOUNT, ARG_TARGET}, - AddressableEntity, Digest, EntityAddr, GasLimited, TransactionCategory, + AddressableEntity, Digest, EntityAddr, ExecutionInfo, GasLimited, TransactionCategory, }; use once_cell::sync::Lazy; @@ -3650,3 +3650,188 @@ async fn out_of_gas_txn_does_not_produce_effects() { get_entity_named_key(&mut test.fixture, state_root_hash, bob_addr, "new_key").is_none() ); } + +#[tokio::test] +async fn gas_holds_accumulate_for_multiple_transactions_in_the_same_block() { + let config = SingleTransactionTestCase::default_test_config() + .with_min_gas_price(MIN_GAS_PRICE) + .with_max_gas_price(MIN_GAS_PRICE) + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_balance_hold_interval(TimeDiff::from_seconds(5)); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + const TRANSFER_AMOUNT: u64 = 30_000_000_000; + + let chain_name = test.fixture.chainspec.network_config.name.clone(); + let txn_pricing_mode = PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + }; + let expected_transfer_gas = test.chainspec().system_costs_config.mint_costs().transfer; + let expected_transfer_cost: U512 = U512::from(expected_transfer_gas) * MIN_GAS_PRICE; + + let mut txn_1 = Transaction::from( + TransactionV1Builder::new_transfer(TRANSFER_AMOUNT, None, CHARLIE_PUBLIC_KEY.clone(), None) + .unwrap() + .with_initiator_addr(ALICE_PUBLIC_KEY.clone()) + .with_pricing_mode(txn_pricing_mode.clone()) + .with_chain_name(chain_name.clone()) + .build() + .unwrap(), + ); + txn_1.sign(&ALICE_SECRET_KEY); + let txn_1_hash = txn_1.hash(); + + let mut txn_2 = Transaction::from( + TransactionV1Builder::new_transfer( + 2 * TRANSFER_AMOUNT, + None, + CHARLIE_PUBLIC_KEY.clone(), + None, + ) + .unwrap() + .with_initiator_addr(ALICE_PUBLIC_KEY.clone()) + .with_pricing_mode(txn_pricing_mode.clone()) + .with_chain_name(chain_name.clone()) + .build() + .unwrap(), + ); + txn_2.sign(&ALICE_SECRET_KEY); + let txn_2_hash = txn_2.hash(); + + let mut txn_3 = Transaction::from( + TransactionV1Builder::new_transfer( + 3 * TRANSFER_AMOUNT, + None, + CHARLIE_PUBLIC_KEY.clone(), + None, + ) + .unwrap() + .with_initiator_addr(ALICE_PUBLIC_KEY.clone()) + .with_pricing_mode(txn_pricing_mode) + .with_chain_name(chain_name) + .build() + .unwrap(), + ); + txn_3.sign(&ALICE_SECRET_KEY); + let txn_3_hash = txn_3.hash(); + + test.fixture.inject_transaction(txn_1).await; + test.fixture.inject_transaction(txn_2).await; + test.fixture.inject_transaction(txn_3).await; + + test.fixture + .run_until_executed_transaction(&txn_1_hash, TEN_SECS) + .await; + test.fixture + .run_until_executed_transaction(&txn_2_hash, TEN_SECS) + .await; + test.fixture + .run_until_executed_transaction(&txn_3_hash, TEN_SECS) + .await; + + let (_node_id, runner) = test.fixture.network.nodes().iter().next().unwrap(); + let ExecutionInfo { + block_height: txn_1_block_height, + execution_result: txn_1_exec_result, + .. + } = runner + .main_reactor() + .storage() + .read_execution_info(txn_1_hash) + .expect("Expected transaction to be included in a block."); + let ExecutionInfo { + block_height: txn_2_block_height, + execution_result: txn_2_exec_result, + .. + } = runner + .main_reactor() + .storage() + .read_execution_info(txn_2_hash) + .expect("Expected transaction to be included in a block."); + let ExecutionInfo { + block_height: txn_3_block_height, + execution_result: txn_3_exec_result, + .. + } = runner + .main_reactor() + .storage() + .read_execution_info(txn_3_hash) + .expect("Expected transaction to be included in a block."); + + let txn_1_exec_result = txn_1_exec_result.expect("Expected result for txn 1"); + let txn_2_exec_result = txn_2_exec_result.expect("Expected result for txn 2"); + let txn_3_exec_result = txn_3_exec_result.expect("Expected result for txn 3"); + + assert!(exec_result_is_success(&txn_1_exec_result)); + assert!(exec_result_is_success(&txn_2_exec_result)); + assert!(exec_result_is_success(&txn_3_exec_result)); + + assert_exec_result_cost( + txn_1_exec_result, + expected_transfer_cost, + expected_transfer_gas.into(), + ); + assert_exec_result_cost( + txn_2_exec_result, + expected_transfer_cost, + expected_transfer_gas.into(), + ); + assert_exec_result_cost( + txn_3_exec_result, + expected_transfer_cost, + expected_transfer_gas.into(), + ); + + let max_block_height = std::cmp::max( + std::cmp::max(txn_1_block_height, txn_2_block_height), + txn_3_block_height, + ); + let alice_total_holds: U512 = get_balance( + &mut test.fixture, + &ALICE_PUBLIC_KEY, + Some(max_block_height), + false, + ) + .proofs_result() + .expect("Expected Alice to proof results.") + .balance_holds() + .expect("Expected Alice to have holds.") + .values() + .map(|block_holds| block_holds.values().copied().sum()) + .sum(); + assert_eq!( + alice_total_holds, + expected_transfer_cost * 3, + "Total holds amount should be equal to the cost of the 3 transactions." + ); + + test.fixture + .run_until_block_height(max_block_height + 5, ONE_MIN) + .await; + let alice_total_holds: U512 = get_balance(&mut test.fixture, &ALICE_PUBLIC_KEY, None, false) + .proofs_result() + .expect("Expected Alice to proof results.") + .balance_holds() + .expect("Expected Alice to have holds.") + .values() + .map(|block_holds| block_holds.values().copied().sum()) + .sum(); + assert_eq!( + alice_total_holds, + U512::from(0), + "Holds should have expired." + ); +} diff --git a/node/src/testing.rs b/node/src/testing.rs index 3d81a7f96d..dcbda8d853 100644 --- a/node/src/testing.rs +++ b/node/src/testing.rs @@ -364,7 +364,7 @@ pub(crate) enum UnitTestEvent { FatalAnnouncement(FatalAnnouncement), /// A network request made by the component under test. #[from] - NetworkRequest(NetworkRequest), + NetworkRequest(#[allow(dead_code)] NetworkRequest), } impl ReactorEvent for UnitTestEvent { diff --git a/node/src/tls.rs b/node/src/tls.rs index cad3f18468..195dd1befd 100644 --- a/node/src/tls.rs +++ b/node/src/tls.rs @@ -710,7 +710,7 @@ fn generate_private_key() -> SslResult> { // TODO: Please verify this for accuracy! let ec_group = ec::EcGroup::from_curve_name(SIGNATURE_CURVE)?; - let ec_key = ec::EcKey::generate(ec_group.as_ref())?; + let ec_key = EcKey::generate(ec_group.as_ref())?; PKey::from_ec_key(ec_key) } diff --git a/node/src/types/exit_code.rs b/node/src/types/exit_code.rs index 8235090469..f9c46281fc 100644 --- a/node/src/types/exit_code.rs +++ b/node/src/types/exit_code.rs @@ -11,6 +11,7 @@ const SIGNAL_OFFSET: u8 = 128; /// Note that a panic will result in the Rust process producing an exit code of 101. #[derive(Clone, Copy, PartialEq, Eq, Debug, DataSize)] #[repr(u8)] +#[non_exhaustive] pub enum ExitCode { /// The process should exit with success. The launcher should proceed to run the next /// installed version of `casper-node`. diff --git a/node/src/types/sync_leap.rs b/node/src/types/sync_leap.rs index ab51f11534..62178da8e3 100644 --- a/node/src/types/sync_leap.rs +++ b/node/src/types/sync_leap.rs @@ -84,7 +84,7 @@ impl SyncLeapIdentifier { } impl Display for SyncLeapIdentifier { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!( f, "{} trusted_ancestor_only: {}", diff --git a/node/src/utils/specimen.rs b/node/src/utils/specimen.rs index e9424d7234..cfd8eee052 100644 --- a/node/src/utils/specimen.rs +++ b/node/src/utils/specimen.rs @@ -60,7 +60,7 @@ impl Cache { /// Retrieves a potentially memoized instance. pub(crate) fn get(&mut self) -> Option<&T> { self.get_all::() - .get(0) + .first() .map(|box_any| box_any.downcast_ref::().expect("cache corrupted")) } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 8142c30126..6f14058b2e 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.73.0" +channel = "1.77.2" diff --git a/smart_contracts/contract/src/contract_api/runtime.rs b/smart_contracts/contract/src/contract_api/runtime.rs index 981e1492ee..ad56d7b14a 100644 --- a/smart_contracts/contract/src/contract_api/runtime.rs +++ b/smart_contracts/contract/src/contract_api/runtime.rs @@ -222,7 +222,7 @@ pub fn try_get_named_arg(name: &str) -> Option { // Avoids allocation with 0 bytes and a call to get_named_arg Vec::new() }; - bytesrepr::deserialize(arg_bytes).unwrap_or_revert_with(ApiError::InvalidArgument) + bytesrepr::deserialize(arg_bytes).ok() } /// Returns the caller of the current context, i.e. the [`AccountHash`] of the account which made diff --git a/storage/src/block_store/lmdb/lmdb_ext.rs b/storage/src/block_store/lmdb/lmdb_ext.rs index f3c0f83dae..1e8bbc4f6a 100644 --- a/storage/src/block_store/lmdb/lmdb_ext.rs +++ b/storage/src/block_store/lmdb/lmdb_ext.rs @@ -13,7 +13,9 @@ use std::{any::TypeId, collections::BTreeSet}; use lmdb::{Database, RwTransaction, Transaction, WriteFlags}; -use serde::{de::DeserializeOwned, Serialize}; +use serde::de::DeserializeOwned; +#[cfg(test)] +use serde::Serialize; use thiserror::Error; use tracing::warn; @@ -121,6 +123,7 @@ pub(super) trait WriteTransactionExt { /// Returns `true` if the value has actually been written, `false` if the key already existed. /// /// Setting `overwrite` to true will cause the value to always be written instead. + #[cfg(test)] fn put_value, V: 'static + Serialize>( &mut self, db: Database, @@ -206,6 +209,7 @@ where /// function to provide compatibility with the legacy version of the `UnbondingPurse` struct. /// See [`serialize_unbonding_purse`] for more details. // TODO: Get rid of the 'static bound. +#[cfg(test)] pub(crate) fn serialize_internal( value: &V, ) -> Result, LmdbExtError> { @@ -232,6 +236,7 @@ pub(crate) fn deserialize_internal( } impl WriteTransactionExt for RwTransaction<'_> { + #[cfg(test)] fn put_value, V: 'static + Serialize>( &mut self, db: Database, @@ -343,6 +348,7 @@ pub(super) fn deserialize_unbonding_purse( } /// Serializes into a buffer. +#[cfg(test)] #[inline(always)] pub(super) fn serialize(value: &T) -> Result, LmdbExtError> { bincode::serialize(value).map_err(|err| LmdbExtError::Other(Box::new(err))) @@ -352,6 +358,7 @@ pub(super) fn serialize(value: &T) -> Result, LmdbExtError /// To provide backward compatibility with the previous version of the `UnbondingPurse`, /// the serialized bytes are prefixed with the "magic bytes", which will be used by the /// deserialization routine to detect the version of the `UnbondingPurse` struct. +#[cfg(test)] #[inline(always)] pub(super) fn serialize_unbonding_purse(value: &T) -> Result, LmdbExtError> { let mut serialized = UNBONDING_PURSE_V2_MAGIC_BYTES.to_vec(); diff --git a/storage/src/data_access_layer/balance_hold.rs b/storage/src/data_access_layer/balance_hold.rs index 7ef35b4232..f97e65d382 100644 --- a/storage/src/data_access_layer/balance_hold.rs +++ b/storage/src/data_access_layer/balance_hold.rs @@ -6,7 +6,7 @@ use casper_types::{ account::AccountHash, execution::Effects, system::mint::{BalanceHoldAddr, BalanceHoldAddrTag}, - Digest, ProtocolVersion, U512, + Digest, ProtocolVersion, StoredValue, U512, }; use std::fmt::{Display, Formatter}; use thiserror::Error; @@ -159,7 +159,8 @@ pub enum BalanceHoldError { TrackingCopy(TrackingCopyError), Balance(BalanceFailure), InsufficientBalance { remaining_balance: U512 }, - UnexpectedWildcardVariant, // programmer error + UnexpectedWildcardVariant, // programmer error, + UnexpectedHoldValue(StoredValue), } impl From for BalanceHoldError { @@ -190,6 +191,9 @@ impl Display for BalanceHoldError { ) } BalanceHoldError::Balance(be) => Display::fmt(be, f), + BalanceHoldError::UnexpectedHoldValue(value) => { + write!(f, "Found an unexpected hold value in storage: {:?}", value,) + } } } } diff --git a/storage/src/data_access_layer/genesis.rs b/storage/src/data_access_layer/genesis.rs index ec85a6506a..163270c76f 100644 --- a/storage/src/data_access_layer/genesis.rs +++ b/storage/src/data_access_layer/genesis.rs @@ -1,3 +1,4 @@ +#[cfg(any(feature = "testing", test))] use rand::{ distributions::{Distribution, Standard}, Rng, @@ -53,6 +54,7 @@ impl GenesisRequest { } } +#[cfg(any(feature = "testing", test))] impl Distribution for Standard { fn sample(&self, rng: &mut R) -> GenesisRequest { let input: [u8; 32] = rng.gen(); diff --git a/storage/src/global_state/state/mod.rs b/storage/src/global_state/state/mod.rs index 943217460c..725d4244b1 100644 --- a/storage/src/global_state/state/mod.rs +++ b/storage/src/global_state/state/mod.rs @@ -865,14 +865,6 @@ pub trait StateProvider { remaining_balance } }; - let cl_value = match CLValue::from_t(held_amount) { - Ok(cl_value) => cl_value, - Err(cve) => { - return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy( - TrackingCopyError::CLValue(cve), - )); - } - }; let balance_hold_addr = match tag { BalanceHoldAddrTag::Gas => BalanceHoldAddr::Gas { @@ -886,7 +878,39 @@ pub trait StateProvider { }; let hold_key = Key::BalanceHold(balance_hold_addr); - tc.write(hold_key, StoredValue::CLValue(cl_value)); + let hold_value = match tc.get(&hold_key) { + Ok(Some(StoredValue::CLValue(cl_value))) => { + // There was a previous hold on this balance. We need to add the new hold to + // the old one. + match cl_value.clone().into_t::() { + Ok(prev_hold) => prev_hold.saturating_add(held_amount), + Err(cve) => { + return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy( + TrackingCopyError::CLValue(cve), + )); + } + } + } + Ok(Some(other_value_variant)) => { + return BalanceHoldResult::Failure(BalanceHoldError::UnexpectedHoldValue( + other_value_variant, + )) + } + Ok(None) => held_amount, // There was no previous hold. + Err(tce) => { + return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy(tce)); + } + }; + + let hold_cl_value = match CLValue::from_t(hold_value) { + Ok(cl_value) => cl_value, + Err(cve) => { + return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy( + TrackingCopyError::CLValue(cve), + )); + } + }; + tc.write(hold_key, StoredValue::CLValue(hold_cl_value)); let holds = vec![balance_hold_addr]; let available_balance = remaining_balance.saturating_sub(held_amount); diff --git a/storage/src/global_state/trie_store/operations/tests/prune.rs b/storage/src/global_state/trie_store/operations/tests/prune.rs index 2e95189dd3..1718010061 100644 --- a/storage/src/global_state/trie_store/operations/tests/prune.rs +++ b/storage/src/global_state/trie_store/operations/tests/prune.rs @@ -344,8 +344,8 @@ mod full_tries { let pairs_to_insert_less_pruned: Vec<(K, V)> = pairs_to_insert .iter() .rev() + .filter(|&(key, _value)| !keys_to_prune.contains(key)) .cloned() - .filter(|(key, _value)| !keys_to_prune.contains(key)) .collect(); let mut actual_root = *root; diff --git a/storage/src/system/handle_payment/handle_payment_native.rs b/storage/src/system/handle_payment/handle_payment_native.rs index 26c516faee..e426559b5e 100644 --- a/storage/src/system/handle_payment/handle_payment_native.rs +++ b/storage/src/system/handle_payment/handle_payment_native.rs @@ -20,8 +20,6 @@ use casper_types::{ use std::collections::BTreeSet; use tracing::error; -pub use casper_types::system::handle_payment::Error as HandlePaymentError; - impl MintProvider for RuntimeNative where S: StateReader, diff --git a/storage/src/tracking_copy/mod.rs b/storage/src/tracking_copy/mod.rs index 88043a5017..971578c7a2 100644 --- a/storage/src/tracking_copy/mod.rs +++ b/storage/src/tracking_copy/mod.rs @@ -226,7 +226,7 @@ impl> TrackingCopyCache { /// Gets value from `key` in the cache. pub fn get(&mut self, key: &Key) -> Option<&StoredValue> { - if self.prunes_cached.get(key).is_some() { + if self.prunes_cached.contains(key) { // the item is marked for pruning and therefore // is no longer accessible. return None; diff --git a/types/src/account/error.rs b/types/src/account/error.rs index 35195fc740..a3862a2357 100644 --- a/types/src/account/error.rs +++ b/types/src/account/error.rs @@ -38,6 +38,3 @@ impl Display for FromStrError { } } } -/// Associated error type of `TryFrom<&[u8]>` for [`AccountHash`](super::AccountHash). -#[derive(Debug)] -pub struct TryFromSliceForAccountHashError(()); diff --git a/types/src/block/rewarded_signatures.rs b/types/src/block/rewarded_signatures.rs index 7d0f2e883c..e483f95a38 100644 --- a/types/src/block/rewarded_signatures.rs +++ b/types/src/block/rewarded_signatures.rs @@ -322,6 +322,18 @@ fn chunks_8(bits: impl Iterator) -> impl Iterator Self { + let mut bytes = vec![0; (n_validators + 7) / 8]; + + rand::RngCore::fill_bytes(rng, bytes.as_mut()); + + SingleBlockRewardedSignatures(bytes) + } +} + #[cfg(test)] mod tests { use super::{chunks_8, SingleBlockRewardedSignatures}; @@ -460,15 +472,3 @@ mod tests { assert_eq!(v(chunks.next()), Some(vec![26])); } } - -#[cfg(any(feature = "testing", test))] -impl SingleBlockRewardedSignatures { - /// Returns random data. - pub fn random(rng: &mut crate::testing::TestRng, n_validators: usize) -> Self { - let mut bytes = vec![0; (n_validators + 7) / 8]; - - rand::RngCore::fill_bytes(rng, bytes.as_mut()); - - SingleBlockRewardedSignatures(bytes) - } -} diff --git a/types/src/bytesrepr.rs b/types/src/bytesrepr.rs index 95f1dc6fa5..a5a749ea69 100644 --- a/types/src/bytesrepr.rs +++ b/types/src/bytesrepr.rs @@ -101,10 +101,10 @@ pub fn unchecked_allocate_buffer(to_be_serialized: &T) -> Vec { } /// Returns a `Vec` initialized with sufficient capacity to hold `to_be_serialized` after -/// serialization, or an error if the capacity would exceed `u32::max_value()`. +/// serialization, or an error if the capacity would exceed `u32::MAX`. pub fn allocate_buffer(to_be_serialized: &T) -> Result, Error> { let serialized_length = to_be_serialized.serialized_length(); - if serialized_length > u32::max_value() as usize { + if serialized_length > u32::MAX as usize { return Err(Error::OutOfMemory); } Ok(Vec::with_capacity(serialized_length)) @@ -1684,7 +1684,7 @@ mod proptests { bytesrepr::test_serialization_roundtrip(&t); } #[test] - fn test_ratio_u64(t in (any::(), 1..u64::max_value())) { + fn test_ratio_u64(t in (any::(), 1..u64::MAX)) { bytesrepr::test_serialization_roundtrip(&t); } } diff --git a/types/src/chainspec.rs b/types/src/chainspec.rs index a695b45336..74920d5474 100644 --- a/types/src/chainspec.rs +++ b/types/src/chainspec.rs @@ -44,15 +44,14 @@ pub use activation_point::ActivationPoint; pub use chainspec_raw_bytes::ChainspecRawBytes; #[cfg(any(feature = "testing", test))] pub use core_config::DEFAULT_FEE_HANDLING; +#[cfg(any(feature = "testing", test))] +pub use core_config::DEFAULT_GAS_HOLD_BALANCE_HANDLING; #[cfg(any(feature = "std", test))] pub use core_config::DEFAULT_REFUND_HANDLING; pub use core_config::{ - ConsensusProtocolName, CoreConfig, LegacyRequiredFinality, DEFAULT_GAS_HOLD_BALANCE_HANDLING, - DEFAULT_GAS_HOLD_INTERVAL, + ConsensusProtocolName, CoreConfig, LegacyRequiredFinality, DEFAULT_GAS_HOLD_INTERVAL, }; pub use fee_handling::FeeHandling; -#[cfg(any(feature = "testing", test))] -pub use genesis_config::DEFAULT_AUCTION_DELAY; #[cfg(any(feature = "std", test))] pub use genesis_config::{GenesisConfig, GenesisConfigBuilder}; pub use global_state_update::{GlobalStateUpdate, GlobalStateUpdateConfig, GlobalStateUpdateError}; diff --git a/types/src/chainspec/accounts_config/genesis.rs b/types/src/chainspec/accounts_config/genesis.rs index 8691578f93..a2941e8caa 100644 --- a/types/src/chainspec/accounts_config/genesis.rs +++ b/types/src/chainspec/accounts_config/genesis.rs @@ -306,11 +306,9 @@ impl GenesisAccount { | GenesisAccount::Delegator { .. } => { // This value represents a delegation rate in invalid state that system is supposed // to reject if used. - DelegationRate::max_value() - } - GenesisAccount::Administrator(AdministratorAccount { .. }) => { - DelegationRate::max_value() + DelegationRate::MAX } + GenesisAccount::Administrator(AdministratorAccount { .. }) => DelegationRate::MAX, } } diff --git a/types/src/chainspec/transaction_config.rs b/types/src/chainspec/transaction_config.rs index 8fc30b7f4b..eef2f32f6f 100644 --- a/types/src/chainspec/transaction_config.rs +++ b/types/src/chainspec/transaction_config.rs @@ -17,8 +17,10 @@ use crate::{ pub use deploy_config::DeployConfig; #[cfg(any(feature = "testing", test))] pub use deploy_config::DEFAULT_MAX_PAYMENT_MOTES; +pub use transaction_v1_config::TransactionV1Config; +#[cfg(any(feature = "testing", test))] pub use transaction_v1_config::{ - TransactionV1Config, DEFAULT_INSTALL_UPGRADE_GAS_LIMIT, DEFAULT_LARGE_TRANSACTION_GAS_LIMIT, + DEFAULT_INSTALL_UPGRADE_GAS_LIMIT, DEFAULT_LARGE_TRANSACTION_GAS_LIMIT, }; /// The default minimum number of motes that can be transferred. diff --git a/types/src/chainspec/transaction_config/transaction_v1_config.rs b/types/src/chainspec/transaction_config/transaction_v1_config.rs index b354ef1ffa..00ee80aa72 100644 --- a/types/src/chainspec/transaction_config/transaction_v1_config.rs +++ b/types/src/chainspec/transaction_config/transaction_v1_config.rs @@ -6,10 +6,11 @@ use serde::{Deserialize, Serialize}; #[cfg(any(feature = "testing", test))] use crate::testing::TestRng; +#[cfg(any(feature = "testing", test))] +use crate::INSTALL_UPGRADE_LANE_ID; use crate::{ bytesrepr::{self, FromBytes, ToBytes}, transaction::TransactionCategory, - INSTALL_UPGRADE_LANE_ID, }; /// Default gas limit of install / upgrade contracts diff --git a/types/src/chainspec/vm_config.rs b/types/src/chainspec/vm_config.rs index 0327071bfa..0d1bde8821 100644 --- a/types/src/chainspec/vm_config.rs +++ b/types/src/chainspec/vm_config.rs @@ -10,15 +10,20 @@ mod storage_costs; mod system_config; mod wasm_config; -pub use auction_costs::{AuctionCosts, DEFAULT_ADD_BID_COST, DEFAULT_DELEGATE_COST}; +pub use auction_costs::AuctionCosts; +#[cfg(any(feature = "testing", test))] +pub use auction_costs::{DEFAULT_ADD_BID_COST, DEFAULT_DELEGATE_COST}; pub use chainspec_registry::ChainspecRegistry; pub use handle_payment_costs::HandlePaymentCosts; +#[cfg(any(feature = "testing", test))] +pub use host_function_costs::DEFAULT_NEW_DICTIONARY_COST; pub use host_function_costs::{ - Cost as HostFunctionCost, HostFunction, HostFunctionCosts, - DEFAULT_HOST_FUNCTION_NEW_DICTIONARY, DEFAULT_NEW_DICTIONARY_COST, + Cost as HostFunctionCost, HostFunction, HostFunctionCosts, DEFAULT_HOST_FUNCTION_NEW_DICTIONARY, }; pub use message_limits::MessageLimits; -pub use mint_costs::{MintCosts, DEFAULT_TRANSFER_COST}; +pub use mint_costs::MintCosts; +#[cfg(any(feature = "testing", test))] +pub use mint_costs::DEFAULT_TRANSFER_COST; pub use opcode_costs::{BrTableCost, ControlFlowCosts, OpcodeCosts}; #[cfg(any(feature = "testing", test))] pub use opcode_costs::{ @@ -37,4 +42,6 @@ pub use opcode_costs::{ pub use standard_payment_costs::StandardPaymentCosts; pub use storage_costs::StorageCosts; pub use system_config::SystemConfig; -pub use wasm_config::{WasmConfig, DEFAULT_MAX_STACK_HEIGHT, DEFAULT_WASM_MAX_MEMORY}; +pub use wasm_config::WasmConfig; +#[cfg(any(feature = "testing", test))] +pub use wasm_config::{DEFAULT_MAX_STACK_HEIGHT, DEFAULT_WASM_MAX_MEMORY}; diff --git a/types/src/crypto/error.rs b/types/src/crypto/error.rs index 5fdb50c27c..b1b7f56fdb 100644 --- a/types/src/crypto/error.rs +++ b/types/src/crypto/error.rs @@ -152,8 +152,10 @@ mod serde_helpers { #[derive(Serialize)] #[serde(remote = "base64::DecodeError")] pub(super) enum Base64DecodeError { + #[allow(dead_code)] InvalidByte(usize, u8), InvalidLength, + #[allow(dead_code)] InvalidLastSymbol(usize, u8), } } diff --git a/types/src/era_id.rs b/types/src/era_id.rs index 0ba9c30078..61c855fa8b 100644 --- a/types/src/era_id.rs +++ b/types/src/era_id.rs @@ -33,7 +33,7 @@ pub struct EraId(u64); impl EraId { /// Maximum possible value an [`EraId`] can hold. - pub const MAX: EraId = EraId(u64::max_value()); + pub const MAX: EraId = EraId(u64::MAX); /// Creates new [`EraId`] instance. pub const fn new(value: u64) -> EraId { @@ -232,7 +232,7 @@ mod tests { let window: Vec = current_era.iter_inclusive(auction_delay).collect(); assert_eq!(window.len(), auction_delay as usize + 1); - assert_eq!(window.get(0), Some(¤t_era)); + assert_eq!(window.first(), Some(¤t_era)); assert_eq!( window.iter().next_back(), Some(&(current_era + auction_delay)) diff --git a/types/src/file_utils.rs b/types/src/file_utils.rs index 775a7315fa..2b220aaaa0 100644 --- a/types/src/file_utils.rs +++ b/types/src/file_utils.rs @@ -67,6 +67,7 @@ pub(crate) fn write_private_file, B: AsRef<[u8]>>( fs::OpenOptions::new() .write(true) .create(true) + .truncate(true) .mode(0o600) .open(path) .and_then(|mut file| file.write_all(data.as_ref())) diff --git a/types/src/testing.rs b/types/src/testing.rs index 24b7efd380..a81cd8677f 100644 --- a/types/src/testing.rs +++ b/types/src/testing.rs @@ -14,7 +14,7 @@ use rand::{ use rand_pcg::Pcg64Mcg; thread_local! { - static THIS_THREAD_HAS_RNG: RefCell = RefCell::new(false); + static THIS_THREAD_HAS_RNG: RefCell = const { RefCell::new(false) }; } const CL_TEST_SEED: &str = "CL_TEST_SEED"; diff --git a/types/src/transaction/deploy.rs b/types/src/transaction/deploy.rs index 3374efbe10..74a99c821f 100644 --- a/types/src/transaction/deploy.rs +++ b/types/src/transaction/deploy.rs @@ -424,6 +424,15 @@ impl Deploy { }); } + let min_gas_price = chainspec.vacancy_config.min_gas_price; + let gas_price_tolerance = self.gas_price_tolerance()?; + if gas_price_tolerance < min_gas_price { + return Err(InvalidDeploy::GasPriceToleranceTooLow { + min_gas_price_tolerance: min_gas_price, + provided_gas_price_tolerance: gas_price_tolerance, + }); + } + header.is_valid(config, timestamp_leeway, at, &self.hash)?; let max_associated_keys = chainspec.core_config.max_associated_keys; @@ -1008,6 +1017,25 @@ impl Deploy { ) } + /// Returns a random valid `Deploy` with specified gas price. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_gas_price(rng: &mut TestRng, gas_price: u64) -> Self { + let deploy = Self::random(rng); + let secret_key = SecretKey::random(rng); + + Deploy::new( + deploy.header.timestamp(), + deploy.header.ttl(), + gas_price, + deploy.header.dependencies().clone(), + deploy.header.chain_name().to_string(), + deploy.payment, + deploy.session, + &secret_key, + None, + ) + } + /// Creates an add bid deploy, for testing. #[cfg(any(all(feature = "std", feature = "testing"), test))] pub fn add_bid( @@ -1529,6 +1557,7 @@ mod tests { ttl: TimeDiff, dependency_count: usize, chain_name: &str, + gas_price: u64, ) -> Deploy { let secret_key = SecretKey::random(rng); let dependencies = iter::repeat_with(|| DeployHash::random(rng)) @@ -1544,7 +1573,7 @@ mod tests { Deploy::new( Timestamp::now(), ttl, - 1, + gas_price, dependencies, chain_name.to_string(), ExecutableDeployItem::ModuleBytes { @@ -1561,9 +1590,17 @@ mod tests { #[test] fn is_valid() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); - let deploy = create_deploy(&mut rng, TransactionConfig::default().max_ttl, 0, "net-1"); + let deploy = create_deploy( + &mut rng, + TransactionConfig::default().max_ttl, + 0, + "net-1", + GAS_PRICE_TOLERANCE as u64, + ); assert_eq!( deploy.is_valid.get(), None, @@ -1615,8 +1652,16 @@ mod tests { #[test] fn not_valid_due_to_invalid_body_hash() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); - let mut deploy = create_deploy(&mut rng, TransactionConfig::default().max_ttl, 0, "net-1"); + let mut deploy = create_deploy( + &mut rng, + TransactionConfig::default().max_ttl, + 0, + "net-1", + GAS_PRICE_TOLERANCE as u64, + ); deploy.session = ExecutableDeployItem::Transfer { args: runtime_args! { @@ -1628,8 +1673,16 @@ mod tests { #[test] fn not_valid_due_to_invalid_deploy_hash() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); - let mut deploy = create_deploy(&mut rng, TransactionConfig::default().max_ttl, 0, "net-1"); + let mut deploy = create_deploy( + &mut rng, + TransactionConfig::default().max_ttl, + 0, + "net-1", + GAS_PRICE_TOLERANCE as u64, + ); // deploy.header.gas_price = 2; deploy.invalidate(); @@ -1638,8 +1691,16 @@ mod tests { #[test] fn not_valid_due_to_empty_approvals() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); - let mut deploy = create_deploy(&mut rng, TransactionConfig::default().max_ttl, 0, "net-1"); + let mut deploy = create_deploy( + &mut rng, + TransactionConfig::default().max_ttl, + 0, + "net-1", + GAS_PRICE_TOLERANCE as u64, + ); deploy.approvals = BTreeSet::new(); assert!(deploy.approvals.is_empty()); check_is_not_valid(deploy, InvalidDeploy::EmptyApprovals) @@ -1647,8 +1708,16 @@ mod tests { #[test] fn not_valid_due_to_invalid_approval() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); - let mut deploy = create_deploy(&mut rng, TransactionConfig::default().max_ttl, 0, "net-1"); + let mut deploy = create_deploy( + &mut rng, + TransactionConfig::default().max_ttl, + 0, + "net-1", + GAS_PRICE_TOLERANCE as u64, + ); let deploy2 = Deploy::random(&mut rng); @@ -1673,6 +1742,8 @@ mod tests { #[test] fn is_acceptable() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); let chain_name = "net-1".to_string(); let mut chainspec = Chainspec::default(); @@ -1684,6 +1755,7 @@ mod tests { config.max_ttl, config.deploy_config.max_dependencies.into(), &chain_name, + GAS_PRICE_TOLERANCE as u64, ); let current_timestamp = deploy.header().timestamp(); deploy @@ -1693,6 +1765,8 @@ mod tests { #[test] fn not_acceptable_due_to_invalid_chain_name() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); let expected_chain_name = "net-1"; let wrong_chain_name = "net-2".to_string(); @@ -1706,6 +1780,7 @@ mod tests { config.max_ttl, config.deploy_config.max_dependencies.into(), &wrong_chain_name, + GAS_PRICE_TOLERANCE as u64, ); let expected_error = InvalidDeploy::InvalidChainName { @@ -1726,6 +1801,8 @@ mod tests { #[test] fn not_acceptable_due_to_excessive_dependencies() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); let chain_name = "net-1"; @@ -1735,7 +1812,13 @@ mod tests { let dependency_count = usize::from(config.deploy_config.max_dependencies + 1); - let deploy = create_deploy(&mut rng, config.max_ttl, dependency_count, chain_name); + let deploy = create_deploy( + &mut rng, + config.max_ttl, + dependency_count, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); let expected_error = InvalidDeploy::DependenciesNoLongerSupported; @@ -1752,6 +1835,8 @@ mod tests { #[test] fn not_acceptable_due_to_excessive_ttl() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); let chain_name = "net-1"; @@ -1766,6 +1851,7 @@ mod tests { ttl, config.deploy_config.max_dependencies.into(), chain_name, + GAS_PRICE_TOLERANCE as u64, ); let expected_error = InvalidDeploy::ExcessiveTimeToLive { @@ -1786,6 +1872,8 @@ mod tests { #[test] fn not_acceptable_due_to_timestamp_in_future() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); let chain_name = "net-1"; @@ -1799,6 +1887,7 @@ mod tests { config.max_ttl, config.deploy_config.max_dependencies.into(), chain_name, + GAS_PRICE_TOLERANCE as u64, ); let current_timestamp = deploy.header.timestamp() - leeway - TimeDiff::from_seconds(1); @@ -1820,6 +1909,8 @@ mod tests { #[test] fn acceptable_if_timestamp_slightly_in_future() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); let chain_name = "net-1"; @@ -1833,6 +1924,7 @@ mod tests { config.max_ttl, config.deploy_config.max_dependencies.into(), chain_name, + GAS_PRICE_TOLERANCE as u64, ); let current_timestamp = deploy.header.timestamp() - (leeway / 2); deploy @@ -1842,6 +1934,8 @@ mod tests { #[test] fn not_acceptable_due_to_missing_payment_amount() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); let chain_name = "net-1"; @@ -1868,6 +1962,7 @@ mod tests { config.max_ttl, config.deploy_config.max_dependencies.into(), chain_name, + GAS_PRICE_TOLERANCE as u64, ); deploy.payment = payment; @@ -1886,6 +1981,8 @@ mod tests { #[test] fn not_acceptable_due_to_mangled_payment_amount() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); let chain_name = "net-1"; @@ -1914,6 +2011,7 @@ mod tests { config.max_ttl, config.deploy_config.max_dependencies.into(), chain_name, + GAS_PRICE_TOLERANCE as u64, ); deploy.payment = payment; @@ -1932,6 +2030,8 @@ mod tests { #[test] fn not_acceptable_due_to_excessive_payment_amount() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); let chain_name = "net-1"; @@ -1961,6 +2061,7 @@ mod tests { config.max_ttl, config.deploy_config.max_dependencies.into(), chain_name, + GAS_PRICE_TOLERANCE as u64, ); deploy.payment = payment; @@ -1984,6 +2085,8 @@ mod tests { #[test] fn transfer_acceptable_regardless_of_excessive_payment_amount() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); let secret_key = SecretKey::random(&mut rng); let chain_name = "net-1"; @@ -2011,7 +2114,7 @@ mod tests { let deploy = Deploy::new( Timestamp::now(), config.max_ttl, - 1, + GAS_PRICE_TOLERANCE as u64, vec![], chain_name.to_string(), payment, @@ -2031,6 +2134,8 @@ mod tests { #[test] fn not_acceptable_due_to_excessive_approvals() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); let chain_name = "net-1"; @@ -2042,6 +2147,7 @@ mod tests { config.max_ttl, config.deploy_config.max_dependencies as usize, chain_name, + GAS_PRICE_TOLERANCE as u64, ); // This test is to ensure a given limit is being checked. // Therefore, set the limit to one less than the approvals in the deploy. @@ -2059,6 +2165,8 @@ mod tests { #[test] fn not_acceptable_due_to_missing_transfer_amount() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); let chain_name = "net-1"; let mut chainspec = Chainspec::default(); @@ -2070,6 +2178,7 @@ mod tests { config.max_ttl, config.deploy_config.max_dependencies as usize, chain_name, + GAS_PRICE_TOLERANCE as u64, ); let transfer_args = RuntimeArgs::default(); @@ -2087,6 +2196,8 @@ mod tests { #[test] fn not_acceptable_due_to_mangled_transfer_amount() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); let chain_name = "net-1"; let mut chainspec = Chainspec::default(); @@ -2098,6 +2209,7 @@ mod tests { config.max_ttl, config.deploy_config.max_dependencies as usize, chain_name, + GAS_PRICE_TOLERANCE as u64, ); let transfer_args = runtime_args! { @@ -2117,8 +2229,40 @@ mod tests { ) } + #[test] + fn not_acceptable_due_to_too_low_gas_price_tolerance() { + const GAS_PRICE_TOLERANCE: u8 = 0; + + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(chain_name.to_string()); + + let config = chainspec.transaction_config.clone(); + let deploy = create_deploy( + &mut rng, + config.max_ttl, + config.deploy_config.max_dependencies as usize, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + + let current_timestamp = deploy.header().timestamp(); + assert!(matches!( + deploy.is_config_compliant( + &chainspec, + TimeDiff::default(), + current_timestamp + ), + Err(InvalidDeploy::GasPriceToleranceTooLow { min_gas_price_tolerance, provided_gas_price_tolerance }) + if min_gas_price_tolerance == chainspec.vacancy_config.min_gas_price && provided_gas_price_tolerance == GAS_PRICE_TOLERANCE + )) + } + #[test] fn not_acceptable_due_to_insufficient_transfer_amount() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let mut rng = TestRng::new(); let chain_name = "net-1"; let mut chainspec = Chainspec::default(); @@ -2130,6 +2274,7 @@ mod tests { config.max_ttl, config.deploy_config.max_dependencies as usize, chain_name, + GAS_PRICE_TOLERANCE as u64, ); let amount = config.native_transfer_minimum_motes - 1; @@ -2151,12 +2296,14 @@ mod tests { minimum: Box::new(U512::from(config.native_transfer_minimum_motes)), attempted: Box::new(insufficient_amount), }), - deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp) + deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp,) ) } #[test] fn should_use_payment_amount_for_classic_payment() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let payment_amount = 500u64; let mut rng = TestRng::new(); let chain_name = "net-1"; @@ -2187,6 +2334,7 @@ mod tests { config.max_ttl, config.deploy_config.max_dependencies.into(), chain_name, + GAS_PRICE_TOLERANCE as u64, ); deploy.payment = payment; deploy.session = session; @@ -2215,6 +2363,8 @@ mod tests { #[test] fn should_use_cost_table_for_fixed_payment() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let payment_amount = 500u64; let mut rng = TestRng::new(); let chain_name = "net-1"; @@ -2245,6 +2395,7 @@ mod tests { config.max_ttl, config.deploy_config.max_dependencies.into(), chain_name, + GAS_PRICE_TOLERANCE as u64, ); deploy.payment = payment; deploy.session = session; diff --git a/types/src/transaction/deploy/error.rs b/types/src/transaction/deploy/error.rs index f846fee0b7..e0093aa15d 100644 --- a/types/src/transaction/deploy/error.rs +++ b/types/src/transaction/deploy/error.rs @@ -124,6 +124,14 @@ pub enum InvalidDeploy { /// Unable to calculate gas cost. UnableToCalculateGasCost, + + /// Gas price tolerance too low. + GasPriceToleranceTooLow { + /// The minimum gas price tolerance. + min_gas_price_tolerance: u8, + /// The provided gas price tolerance. + provided_gas_price_tolerance: u8, + }, } impl Display for InvalidDeploy { @@ -241,6 +249,11 @@ impl Display for InvalidDeploy { InvalidDeploy::UnableToCalculateGasCost => { write!(formatter, "unable to calculate gas cost",) } + InvalidDeploy::GasPriceToleranceTooLow { min_gas_price_tolerance, provided_gas_price_tolerance } => write!( + formatter, + "received a deploy with gas price tolerance {} but this chain will only go as low as {}", + provided_gas_price_tolerance, min_gas_price_tolerance + ), } } } @@ -274,7 +287,8 @@ impl StdError for InvalidDeploy { | InvalidDeploy::InsufficientTransferAmount { .. } | InvalidDeploy::ExcessiveApprovals { .. } | InvalidDeploy::UnableToCalculateGasLimit - | InvalidDeploy::UnableToCalculateGasCost => None, + | InvalidDeploy::UnableToCalculateGasCost + | InvalidDeploy::GasPriceToleranceTooLow { .. } => None, } } } diff --git a/types/src/transaction/transaction_v1.rs b/types/src/transaction/transaction_v1.rs index 40026233fb..7392069118 100644 --- a/types/src/transaction/transaction_v1.rs +++ b/types/src/transaction/transaction_v1.rs @@ -40,8 +40,10 @@ use crate::{ crypto, Digest, DisplayIter, RuntimeArgs, SecretKey, TimeDiff, Timestamp, TransactionRuntime, }; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::TransactionConfig; #[cfg(any(feature = "std", test))] -use crate::{Gas, Motes, TransactionConfig, U512}; +use crate::{Gas, Motes, U512}; pub use errors_v1::{ DecodeFromJsonErrorV1 as TransactionV1DecodeFromJsonError, ErrorV1 as TransactionV1Error, ExcessiveSizeErrorV1 as TransactionV1ExcessiveSizeError, @@ -276,7 +278,7 @@ impl TransactionV1 { /// Returns `true` if the serialized size of the transaction is not greater than /// `max_transaction_size`. - #[cfg(any(feature = "std", test))] + #[cfg(any(all(feature = "std", feature = "testing"), test))] fn is_valid_size( &self, max_transaction_size: u32, @@ -413,6 +415,15 @@ impl TransactionV1 { } } + let min_gas_price = chainspec.vacancy_config.min_gas_price; + let gas_price_tolerance = self.header.gas_price_tolerance(); + if gas_price_tolerance < min_gas_price { + return Err(InvalidTransactionV1::GasPriceToleranceTooLow { + min_gas_price_tolerance: min_gas_price, + provided_gas_price_tolerance: gas_price_tolerance, + }); + } + header.is_valid(&transaction_config, timestamp_leeway, at, &self.hash)?; let max_associated_keys = chainspec.core_config.max_associated_keys; diff --git a/types/src/transaction/transaction_v1/errors_v1.rs b/types/src/transaction/transaction_v1/errors_v1.rs index c6ad435c31..bc0ef906e2 100644 --- a/types/src/transaction/transaction_v1/errors_v1.rs +++ b/types/src/transaction/transaction_v1/errors_v1.rs @@ -156,6 +156,13 @@ pub enum InvalidTransaction { }, /// The transaction provided is not supported. InvalidTransactionKind(u8), + /// Gas price tolerance too low. + GasPriceToleranceTooLow { + /// The minimum gas price tolerance. + min_gas_price_tolerance: u8, + /// The provided gas price tolerance. + provided_gas_price_tolerance: u8, + }, } impl Display for InvalidTransaction { @@ -294,6 +301,16 @@ impl Display for InvalidTransaction { "received a transaction with an invalid kind {kind}" ) } + InvalidTransaction::GasPriceToleranceTooLow { + min_gas_price_tolerance, + provided_gas_price_tolerance, + } => { + write!( + formatter, + "received a transaction with gas price tolerance {} but this chain will only go as low as {}", + provided_gas_price_tolerance, min_gas_price_tolerance + ) + } } } } @@ -331,6 +348,7 @@ impl StdError for InvalidTransaction { | InvalidTransaction::UnableToCalculateGasLimit | InvalidTransaction::UnableToCalculateGasCost | InvalidTransaction::InvalidPricingMode { .. } + | InvalidTransaction::GasPriceToleranceTooLow { .. } | InvalidTransaction::InvalidTransactionKind(_) => None, } } diff --git a/types/src/transaction/transaction_v1/transaction_v1_body.rs b/types/src/transaction/transaction_v1/transaction_v1_body.rs index 8c589ad857..3eb28ef1b0 100644 --- a/types/src/transaction/transaction_v1/transaction_v1_body.rs +++ b/types/src/transaction/transaction_v1/transaction_v1_body.rs @@ -12,22 +12,22 @@ use rand::{Rng, RngCore}; use schemars::JsonSchema; #[cfg(any(feature = "std", test))] use serde::{Deserialize, Serialize}; -#[cfg(any(feature = "std", test))] +#[cfg(any(all(feature = "std", feature = "testing"), test))] use tracing::debug; use super::super::{RuntimeArgs, TransactionEntryPoint, TransactionScheduling, TransactionTarget}; use super::TransactionCategory; -#[cfg(any(feature = "std", test))] +#[cfg(any(all(feature = "std", feature = "testing"), test))] use super::TransactionConfig; #[cfg(doc)] use super::TransactionV1; use crate::bytesrepr::{self, FromBytes, ToBytes}; -#[cfg(any(feature = "std", test))] +#[cfg(any(all(feature = "std", feature = "testing"), test))] use crate::InvalidTransactionV1; -#[cfg(any(feature = "std", test))] +#[cfg(any(all(feature = "std", feature = "testing"), test))] use crate::TransactionV1ExcessiveSizeError; #[cfg(any(all(feature = "std", feature = "testing"), test))] use crate::{ @@ -131,7 +131,7 @@ impl TransactionV1Body { (self.args, self.target, self.entry_point, self.scheduling) } - #[cfg(any(feature = "std", test))] + #[cfg(any(all(feature = "std", feature = "testing"), test))] pub(super) fn is_valid(&self, config: &TransactionConfig) -> Result<(), InvalidTransactionV1> { let kind = self.transaction_category; if !config.transaction_v1_config.is_supported(kind) { diff --git a/types/src/transaction/transaction_v1/transaction_v1_body/arg_handling.rs b/types/src/transaction/transaction_v1/transaction_v1_body/arg_handling.rs index 51b071c970..a136ce3cae 100644 --- a/types/src/transaction/transaction_v1/transaction_v1_body/arg_handling.rs +++ b/types/src/transaction/transaction_v1/transaction_v1_body/arg_handling.rs @@ -2,12 +2,12 @@ use core::marker::PhantomData; use tracing::debug; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::{account::AccountHash, system::auction::ARG_VALIDATOR, CLType}; use crate::{ - account::AccountHash, bytesrepr::{FromBytes, ToBytes}, - system::auction::ARG_VALIDATOR, - CLType, CLTyped, CLValue, CLValueError, InvalidTransactionV1, PublicKey, RuntimeArgs, - TransferTarget, URef, U512, + CLTyped, CLValue, CLValueError, InvalidTransactionV1, PublicKey, RuntimeArgs, TransferTarget, + URef, U512, }; const TRANSFER_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); @@ -39,6 +39,7 @@ const REDELEGATE_ARG_VALIDATOR: RequiredArg = RequiredArg::new("valid const REDELEGATE_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); const REDELEGATE_ARG_NEW_VALIDATOR: RequiredArg = RequiredArg::new("new_validator"); +#[cfg(any(all(feature = "std", feature = "testing"), test))] const ACTIVATE_BID_ARG_VALIDATOR: RequiredArg = RequiredArg::new(ARG_VALIDATOR); const CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY: RequiredArg = RequiredArg::new("public_key"); @@ -92,6 +93,7 @@ impl OptionalArg { } } + #[cfg(any(all(feature = "std", feature = "testing"), test))] fn get(&self, args: &RuntimeArgs) -> Result, InvalidTransactionV1> where T: CLTyped + FromBytes, @@ -162,6 +164,7 @@ pub(in crate::transaction::transaction_v1) fn new_transfer_args< } /// Checks the given `RuntimeArgs` are suitable for use in a transfer transaction. +#[cfg(any(all(feature = "std", feature = "testing"), test))] pub(in crate::transaction::transaction_v1) fn has_valid_transfer_args( args: &RuntimeArgs, native_transfer_minimum_motes: u64, @@ -235,6 +238,7 @@ pub(in crate::transaction::transaction_v1) fn new_add_bid_args>( } /// Checks the given `RuntimeArgs` are suitable for use in an add_bid transaction. +#[cfg(any(all(feature = "std", feature = "testing"), test))] pub(in crate::transaction::transaction_v1) fn has_valid_add_bid_args( args: &RuntimeArgs, ) -> Result<(), InvalidTransactionV1> { @@ -256,6 +260,7 @@ pub(in crate::transaction::transaction_v1) fn new_withdraw_bid_args Result<(), InvalidTransactionV1> { @@ -278,6 +283,7 @@ pub(in crate::transaction::transaction_v1) fn new_delegate_args>( } /// Checks the given `RuntimeArgs` are suitable for use in a delegate transaction. +#[cfg(any(all(feature = "std", feature = "testing"), test))] pub(in crate::transaction::transaction_v1) fn has_valid_delegate_args( args: &RuntimeArgs, ) -> Result<(), InvalidTransactionV1> { @@ -301,6 +307,7 @@ pub(in crate::transaction::transaction_v1) fn new_undelegate_args> } /// Checks the given `RuntimeArgs` are suitable for use in an undelegate transaction. +#[cfg(any(all(feature = "std", feature = "testing"), test))] pub(in crate::transaction::transaction_v1) fn has_valid_undelegate_args( args: &RuntimeArgs, ) -> Result<(), InvalidTransactionV1> { @@ -326,6 +333,7 @@ pub(in crate::transaction::transaction_v1) fn new_redelegate_args> } /// Checks the given `RuntimeArgs` are suitable for use in a redelegate transaction. +#[cfg(any(all(feature = "std", feature = "testing"), test))] pub(in crate::transaction::transaction_v1) fn has_valid_redelegate_args( args: &RuntimeArgs, ) -> Result<(), InvalidTransactionV1> { @@ -337,6 +345,7 @@ pub(in crate::transaction::transaction_v1) fn has_valid_redelegate_args( } /// Checks the given `RuntimeArgs` are suitable for use in an activate bid transaction. +#[cfg(any(all(feature = "std", feature = "testing"), test))] pub(in crate::transaction::transaction_v1) fn has_valid_activate_bid_args( args: &RuntimeArgs, ) -> Result<(), InvalidTransactionV1> { @@ -345,7 +354,8 @@ pub(in crate::transaction::transaction_v1) fn has_valid_activate_bid_args( } /// Checks the given `RuntimeArgs` are suitable for use in a change bid public key transaction. -pub(in crate::transaction::transaction_v1) fn has_valid_change_bid_public_key_args( +#[allow(dead_code)] +pub(super) fn has_valid_change_bid_public_key_args( args: &RuntimeArgs, ) -> Result<(), InvalidTransactionV1> { let _public_key = CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY.get(args)?;