diff --git a/Cargo.lock b/Cargo.lock index 9df3bf4900..a3b5a3b68c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -621,6 +621,8 @@ dependencies = [ "serde-map-to-array", "serde_json", "serde_test", + "strum 0.26.2", + "strum_macros 0.26.4", "thiserror", "tokio-util 0.6.10", "tracing", @@ -730,7 +732,7 @@ dependencies = [ "serde", "serde_bytes", "serde_json", - "strum", + "strum 0.24.1", "tempfile", "thiserror", "tracing", @@ -909,7 +911,7 @@ dependencies = [ "static_assertions", "stats_alloc", "structopt", - "strum", + "strum 0.24.1", "sys-info", "tempfile", "thiserror", @@ -1041,7 +1043,7 @@ dependencies = [ "serde_bytes", "serde_json", "serde_test", - "strum", + "strum 0.24.1", "tempfile", "thiserror", "tracing", @@ -2386,6 +2388,14 @@ dependencies = [ "casper-types", ] +[[package]] +name = "endless-loop-with-effects" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + [[package]] name = "enum-iterator" version = "0.6.0" @@ -2891,6 +2901,14 @@ dependencies = [ "casper-types", ] +[[package]] +name = "gh-4771-regression" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + [[package]] name = "gimli" version = "0.26.2" @@ -3588,9 +3606,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", @@ -3666,6 +3684,12 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hello-world" version = "0.1.0" @@ -6232,9 +6256,15 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" dependencies = [ - "strum_macros", + "strum_macros 0.24.3", ] +[[package]] +name = "strum" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" + [[package]] name = "strum_macros" version = "0.24.3" @@ -6248,6 +6278,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck 0.5.0", + "proc-macro2 1.0.78", + "quote 1.0.35", + "rustversion", + "syn 2.0.52", +] + [[package]] name = "subtle" version = "2.4.1" diff --git a/Makefile b/Makefile index ee9f025d73..f9d376d31c 100644 --- a/Makefile +++ b/Makefile @@ -145,7 +145,7 @@ lint-smart-contracts: .PHONY: audit-rs audit-rs: - $(CARGO) audit --ignore RUSTSEC-2024-0332 + $(CARGO) audit --ignore RUSTSEC-2024-0332 --ignore RUSTSEC-2024-0344 --ignore RUSTSEC-2024-0348 --ignore RUSTSEC-2024-0349 --ignore RUSTSEC-2024-0351 --ignore RUSTSEC-2024-0350 --ignore RUSTSEC-2024-0352 --ignore RUSTSEC-2024-0353 .PHONY: audit-as audit-as: diff --git a/binary_port/Cargo.toml b/binary_port/Cargo.toml index f7f51a2a40..5cdc05882c 100644 --- a/binary_port/Cargo.toml +++ b/binary_port/Cargo.toml @@ -27,6 +27,8 @@ tracing = "0.1.18" casper-types = { path = "../types", features = ["datasize", "json-schema", "std", "testing"] } serde_json = "1" serde_test = "1" +strum = "0.26.2" +strum_macros = "0.26.4" [package.metadata.docs.rs] all-features = true diff --git a/binary_port/src/error_code.rs b/binary_port/src/error_code.rs index 4566166c0a..5f617a46bc 100644 --- a/binary_port/src/error_code.rs +++ b/binary_port/src/error_code.rs @@ -2,9 +2,13 @@ use core::{convert::TryFrom, fmt}; use casper_types::{InvalidDeploy, InvalidTransaction, InvalidTransactionV1}; +#[cfg(test)] +use strum_macros::EnumIter; + /// The error code indicating the result of handling the binary request. -#[derive(Debug, Clone, thiserror::Error)] +#[derive(Debug, Copy, Clone, thiserror::Error, Eq, PartialEq)] #[repr(u16)] +#[cfg_attr(test, derive(EnumIter))] pub enum ErrorCode { /// Request executed correctly. #[error("request executed correctly")] @@ -198,6 +202,75 @@ pub enum ErrorCode { /// Invalid binary port version. #[error("binary protocol version mismatch")] BinaryProtocolVersionMismatch = 61, + /// Blockchain is empty + #[error("blockchain is empty")] + EmptyBlockchain = 62, + /// Expected deploy, but got transaction + #[error("expected deploy, got transaction")] + ExpectedDeploy = 63, + /// Expected transaction, but got deploy + #[error("expected transaction V1, got deploy")] + ExpectedTransaction = 64, + /// Transaction has expired + #[error("transaction has expired")] + TransactionExpired = 65, + /// Transactions parameters are missing or incorrect + #[error("missing or incorrect transaction parameters")] + MissingOrIncorrectParameters = 66, + /// No such addressable entity + #[error("no such addressable entity")] + NoSuchAddressableEntity = 67, + // No such contract at hash + #[error("no such contract at hash")] + NoSuchContractAtHash = 68, + /// No such entry point + #[error("no such entry point")] + NoSuchEntryPoint = 69, + /// No such package at hash + #[error("no such package at hash")] + NoSuchPackageAtHash = 70, + /// Invalid entity at version + #[error("invalid entity at version")] + InvalidEntityAtVersion = 71, + /// Disabled entity at version + #[error("disabled entity at version")] + DisabledEntityAtVersion = 72, + /// Missing entity at version + #[error("missing entity at version")] + MissingEntityAtVersion = 73, + /// Invalid associated keys + #[error("invalid associated keys")] + InvalidAssociatedKeys = 74, + /// Insufficient signature weight + #[error("insufficient signature weight")] + InsufficientSignatureWeight = 75, + /// Insufficient balance + #[error("insufficient balance")] + InsufficientBalance = 76, + /// Unknown balance + #[error("unknown balance")] + UnknownBalance = 77, + /// Invalid payment variant for deploy + #[error("invalid payment variant for deploy")] + DeployInvalidPaymentVariant = 78, + /// Missing payment amount for deploy + #[error("missing payment amount for deploy")] + DeployMissingPaymentAmount = 79, + /// Failed to parse payment amount for deploy + #[error("failed to parse payment amount for deploy")] + DeployFailedToParsePaymentAmount = 80, + /// Missing transfer target for deploy + #[error("missing transfer target for deploy")] + DeployMissingTransferTarget = 81, + /// Missing module bytes for deploy + #[error("missing module bytes for deploy")] + DeployMissingModuleBytes = 82, + /// Entry point cannot be 'call' + #[error("entry point cannot be 'call'")] + InvalidTransactionEntryPointCannotBeCall = 83, + /// Invalid transaction kind + #[error("invalid transaction kind")] + InvalidTransactionInvalidTransactionKind = 84, } impl TryFrom for ErrorCode { @@ -266,6 +339,30 @@ impl TryFrom for ErrorCode { 58 => Ok(ErrorCode::SwitchBlockNotFound), 59 => Ok(ErrorCode::SwitchBlockParentNotFound), 60 => Ok(ErrorCode::UnsupportedRewardsV1Request), + 61 => Ok(ErrorCode::BinaryProtocolVersionMismatch), + 62 => Ok(ErrorCode::EmptyBlockchain), + 63 => Ok(ErrorCode::ExpectedDeploy), + 64 => Ok(ErrorCode::ExpectedTransaction), + 65 => Ok(ErrorCode::TransactionExpired), + 66 => Ok(ErrorCode::MissingOrIncorrectParameters), + 67 => Ok(ErrorCode::NoSuchAddressableEntity), + 68 => Ok(ErrorCode::NoSuchContractAtHash), + 69 => Ok(ErrorCode::NoSuchEntryPoint), + 70 => Ok(ErrorCode::NoSuchPackageAtHash), + 71 => Ok(ErrorCode::InvalidEntityAtVersion), + 72 => Ok(ErrorCode::DisabledEntityAtVersion), + 73 => Ok(ErrorCode::MissingEntityAtVersion), + 74 => Ok(ErrorCode::InvalidAssociatedKeys), + 75 => Ok(ErrorCode::InsufficientSignatureWeight), + 76 => Ok(ErrorCode::InsufficientBalance), + 77 => Ok(ErrorCode::UnknownBalance), + 78 => Ok(ErrorCode::DeployInvalidPaymentVariant), + 79 => Ok(ErrorCode::DeployMissingPaymentAmount), + 80 => Ok(ErrorCode::DeployFailedToParsePaymentAmount), + 81 => Ok(ErrorCode::DeployMissingTransferTarget), + 82 => Ok(ErrorCode::DeployMissingModuleBytes), + 83 => Ok(ErrorCode::InvalidTransactionEntryPointCannotBeCall), + 84 => Ok(ErrorCode::InvalidTransactionInvalidTransactionKind), _ => Err(UnknownErrorCode), } } @@ -394,7 +491,35 @@ impl From for ErrorCode { InvalidTransactionV1::InvalidPricingMode { .. } => { ErrorCode::InvalidTransactionPricingMode } + InvalidTransactionV1::EntryPointCannotBeCall => { + ErrorCode::InvalidTransactionEntryPointCannotBeCall + } + InvalidTransactionV1::InvalidTransactionKind(_) => { + ErrorCode::InvalidTransactionInvalidTransactionKind + } _ => ErrorCode::InvalidTransactionUnspecified, } } } + +#[cfg(test)] +mod tests { + use std::convert::TryFrom; + + use strum::IntoEnumIterator; + + use crate::ErrorCode; + + #[test] + fn try_from_decoded_all_variants() { + for variant in ErrorCode::iter() { + let as_int = variant as u16; + let decoded = ErrorCode::try_from(as_int); + assert!( + decoded.is_ok(), + "variant {} not covered by TryFrom implementation", + as_int + ); + } + } +} diff --git a/binary_port/src/global_state_query_result.rs b/binary_port/src/global_state_query_result.rs index 6dfab35e76..009e5b60a5 100644 --- a/binary_port/src/global_state_query_result.rs +++ b/binary_port/src/global_state_query_result.rs @@ -11,9 +11,10 @@ use casper_types::testing::TestRng; #[cfg(test)] use casper_types::{ByteCode, ByteCodeKind}; +use serde::Serialize; /// Carries the successful result of the global state query. -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Serialize)] pub struct GlobalStateQueryResult { /// Stored value. value: StoredValue, diff --git a/binary_port/src/information_request.rs b/binary_port/src/information_request.rs index a4057b5ad5..d389525d01 100644 --- a/binary_port/src/information_request.rs +++ b/binary_port/src/information_request.rs @@ -62,6 +62,8 @@ pub enum InformationRequest { /// If `None`, the reward for the validator is returned. delegator: Option>, }, + /// Returns the current Casper protocol version. + ProtocolVersion, } impl InformationRequest { @@ -91,6 +93,7 @@ impl InformationRequest { InformationRequestTag::LatestSwitchBlockHeader } InformationRequest::Reward { .. } => InformationRequestTag::Reward, + InformationRequest::ProtocolVersion => InformationRequestTag::ProtocolVersion, } } @@ -131,6 +134,7 @@ impl InformationRequest { validator: PublicKey::random(rng).into(), delegator: rng.gen::().then(|| PublicKey::random(rng).into()), }, + InformationRequestTag::ProtocolVersion => InformationRequest::ProtocolVersion, } } } @@ -169,7 +173,8 @@ impl ToBytes for InformationRequest { | InformationRequest::ConsensusStatus | InformationRequest::ChainspecRawBytes | InformationRequest::NodeStatus - | InformationRequest::LatestSwitchBlockHeader => Ok(()), + | InformationRequest::LatestSwitchBlockHeader + | InformationRequest::ProtocolVersion => Ok(()), InformationRequest::Reward { era_identifier, validator, @@ -207,7 +212,8 @@ impl ToBytes for InformationRequest { | InformationRequest::ConsensusStatus | InformationRequest::ChainspecRawBytes | InformationRequest::NodeStatus - | InformationRequest::LatestSwitchBlockHeader => 0, + | InformationRequest::LatestSwitchBlockHeader + | InformationRequest::ProtocolVersion => 0, InformationRequest::Reward { era_identifier, validator, @@ -283,6 +289,9 @@ impl TryFrom<(InformationRequestTag, &[u8])> for InformationRequest { remainder, ) } + InformationRequestTag::ProtocolVersion => { + (InformationRequest::ProtocolVersion, key_bytes) + } }; if !remainder.is_empty() { return Err(bytesrepr::Error::LeftOverBytes); @@ -340,12 +349,14 @@ pub enum InformationRequestTag { LatestSwitchBlockHeader = 15, /// Reward for a validator or a delegator in a specific era. Reward = 16, + /// Protocol version request. + ProtocolVersion = 17, } impl InformationRequestTag { #[cfg(test)] pub(crate) fn random(rng: &mut TestRng) -> Self { - match rng.gen_range(0..16) { + match rng.gen_range(0..18) { 0 => InformationRequestTag::BlockHeader, 1 => InformationRequestTag::SignedBlock, 2 => InformationRequestTag::Transaction, @@ -363,6 +374,7 @@ impl InformationRequestTag { 14 => InformationRequestTag::NodeStatus, 15 => InformationRequestTag::LatestSwitchBlockHeader, 16 => InformationRequestTag::Reward, + 17 => InformationRequestTag::ProtocolVersion, _ => unreachable!(), } } @@ -390,6 +402,7 @@ impl TryFrom for InformationRequestTag { 14 => Ok(InformationRequestTag::NodeStatus), 15 => Ok(InformationRequestTag::LatestSwitchBlockHeader), 16 => Ok(InformationRequestTag::Reward), + 17 => Ok(InformationRequestTag::ProtocolVersion), _ => Err(UnknownInformationRequestTag(value)), } } diff --git a/binary_port/src/node_status.rs b/binary_port/src/node_status.rs index f0d8a8c36f..3666412df7 100644 --- a/binary_port/src/node_status.rs +++ b/binary_port/src/node_status.rs @@ -1,19 +1,22 @@ use casper_types::{ bytesrepr::{self, FromBytes, ToBytes}, - AvailableBlockRange, BlockHash, BlockSynchronizerStatus, Digest, NextUpgrade, Peers, PublicKey, - TimeDiff, Timestamp, + AvailableBlockRange, BlockHash, BlockSynchronizerStatus, Digest, NextUpgrade, Peers, + ProtocolVersion, PublicKey, TimeDiff, Timestamp, }; #[cfg(test)] use casper_types::testing::TestRng; #[cfg(test)] use rand::Rng; +use serde::Serialize; use crate::{minimal_block_info::MinimalBlockInfo, type_wrappers::ReactorStateName}; /// Status information about the node. -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Serialize)] pub struct NodeStatus { + /// The current protocol version. + pub protocol_version: ProtocolVersion, /// The node ID and network address of each connected peer. pub peers: Peers, /// The compiled node version. @@ -48,6 +51,7 @@ impl NodeStatus { #[cfg(test)] pub(crate) fn random(rng: &mut TestRng) -> Self { Self { + protocol_version: ProtocolVersion::from_parts(rng.gen(), rng.gen(), rng.gen()), peers: Peers::random(rng), build_version: rng.random_string(5..10), chainspec_name: rng.random_string(5..10), @@ -70,7 +74,8 @@ impl NodeStatus { impl FromBytes for NodeStatus { fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (peers, remainder) = FromBytes::from_bytes(bytes)?; + let (protocol_version, remainder) = ProtocolVersion::from_bytes(bytes)?; + let (peers, remainder) = Peers::from_bytes(remainder)?; let (build_version, remainder) = String::from_bytes(remainder)?; let (chainspec_name, remainder) = String::from_bytes(remainder)?; let (starting_state_root_hash, remainder) = Digest::from_bytes(remainder)?; @@ -86,6 +91,7 @@ impl FromBytes for NodeStatus { let (latest_switch_block_hash, remainder) = Option::::from_bytes(remainder)?; Ok(( NodeStatus { + protocol_version, peers, build_version, chainspec_name, @@ -115,6 +121,7 @@ impl ToBytes for NodeStatus { fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { let NodeStatus { + protocol_version, peers, build_version, chainspec_name, @@ -130,6 +137,7 @@ impl ToBytes for NodeStatus { block_sync, latest_switch_block_hash, } = self; + protocol_version.write_bytes(writer)?; peers.write_bytes(writer)?; build_version.write_bytes(writer)?; chainspec_name.write_bytes(writer)?; @@ -147,7 +155,8 @@ impl ToBytes for NodeStatus { } fn serialized_length(&self) -> usize { - self.peers.serialized_length() + self.protocol_version.serialized_length() + + self.peers.serialized_length() + self.build_version.serialized_length() + self.chainspec_name.serialized_length() + self.starting_state_root_hash.serialized_length() diff --git a/binary_port/src/payload_type.rs b/binary_port/src/payload_type.rs index 268f784be0..51000d7300 100644 --- a/binary_port/src/payload_type.rs +++ b/binary_port/src/payload_type.rs @@ -13,7 +13,7 @@ use casper_types::{ execution::{ExecutionResult, ExecutionResultV1}, AvailableBlockRange, BlockBody, BlockBodyV1, BlockHeader, BlockHeaderV1, BlockSignatures, BlockSignaturesV1, BlockSynchronizerStatus, ChainspecRawBytes, Deploy, NextUpgrade, Peers, - SignedBlock, StoredValue, Transaction, Transfer, + ProtocolVersion, SignedBlock, StoredValue, Transaction, Transfer, }; use crate::{ @@ -108,6 +108,8 @@ pub enum PayloadType { BalanceResponse, /// Reward response. Reward, + /// Protocol version. + ProtocolVersion, } impl PayloadType { @@ -199,6 +201,7 @@ impl TryFrom for PayloadType { x if x == PayloadType::WasmV1Result as u8 => Ok(PayloadType::WasmV1Result), x if x == PayloadType::BalanceResponse as u8 => Ok(PayloadType::BalanceResponse), x if x == PayloadType::Reward as u8 => Ok(PayloadType::Reward), + x if x == PayloadType::ProtocolVersion as u8 => Ok(PayloadType::ProtocolVersion), _ => Err(()), } } @@ -253,6 +256,7 @@ impl fmt::Display for PayloadType { PayloadType::DictionaryQueryResult => write!(f, "DictionaryQueryResult"), PayloadType::BalanceResponse => write!(f, "BalanceResponse"), PayloadType::Reward => write!(f, "Reward"), + PayloadType::ProtocolVersion => write!(f, "ProtocolVersion"), } } } @@ -391,6 +395,10 @@ impl PayloadEntity for RewardResponse { const PAYLOAD_TYPE: PayloadType = PayloadType::Reward; } +impl PayloadEntity for ProtocolVersion { + const PAYLOAD_TYPE: PayloadType = PayloadType::ProtocolVersion; +} + #[cfg(test)] mod tests { use super::*; diff --git a/binary_port/src/type_wrappers.rs b/binary_port/src/type_wrappers.rs index c933226bf5..26fddbd99a 100644 --- a/binary_port/src/type_wrappers.rs +++ b/binary_port/src/type_wrappers.rs @@ -6,8 +6,10 @@ use datasize::DataSize; use casper_types::{ bytesrepr::{self, Bytes, FromBytes, ToBytes}, + system::auction::DelegationRate, EraId, ExecutionInfo, Key, PublicKey, TimeDiff, Timestamp, Transaction, ValidatorChange, U512, }; +use serde::Serialize; use super::GlobalStateQueryResult; @@ -39,7 +41,7 @@ macro_rules! impl_bytesrepr_for_type_wrapper { } /// Type representing uptime. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] pub struct Uptime(u64); impl Uptime { @@ -69,7 +71,7 @@ impl TryFrom for TimeDiff { } /// Type representing changes in consensus validators. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Serialize)] #[cfg_attr(feature = "datasize", derive(DataSize))] pub struct ConsensusValidatorChanges(BTreeMap>); @@ -92,7 +94,7 @@ impl From for BTreeMap for String { } /// Type representing the reactor state name. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Serialize)] pub struct ReactorStateName(String); impl ReactorStateName { @@ -136,7 +138,7 @@ impl From for String { } /// Type representing last progress of the sync process. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Serialize)] pub struct LastProgress(Timestamp); impl LastProgress { @@ -174,16 +176,21 @@ impl GetTrieFullResult { } /// Type representing the reward of a validator or a delegator. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Serialize)] pub struct RewardResponse { amount: U512, era_id: EraId, + delegation_rate: DelegationRate, } impl RewardResponse { /// Constructs new reward response. - pub fn new(amount: U512, era_id: EraId) -> Self { - Self { amount, era_id } + pub fn new(amount: U512, era_id: EraId, delegation_rate: DelegationRate) -> Self { + Self { + amount, + era_id, + delegation_rate, + } } /// Returns the amount of the reward. @@ -195,6 +202,11 @@ impl RewardResponse { pub fn era_id(&self) -> EraId { self.era_id } + + /// Returns the delegation rate of the validator. + pub fn delegation_rate(&self) -> DelegationRate { + self.delegation_rate + } } impl ToBytes for RewardResponse { @@ -205,12 +217,15 @@ impl ToBytes for RewardResponse { } fn serialized_length(&self) -> usize { - self.amount.serialized_length() + self.era_id.serialized_length() + self.amount.serialized_length() + + self.era_id.serialized_length() + + self.delegation_rate.serialized_length() } fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { self.amount.write_bytes(writer)?; - self.era_id.write_bytes(writer) + self.era_id.write_bytes(writer)?; + self.delegation_rate.write_bytes(writer) } } @@ -218,12 +233,16 @@ impl FromBytes for RewardResponse { fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { let (amount, remainder) = FromBytes::from_bytes(bytes)?; let (era_id, remainder) = FromBytes::from_bytes(remainder)?; - Ok((RewardResponse::new(amount, era_id), remainder)) + let (delegation_rate, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + RewardResponse::new(amount, era_id, delegation_rate), + remainder, + )) } } /// Describes the consensus status. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Serialize)] pub struct ConsensusStatus { validator_public_key: PublicKey, round_length: Option, @@ -278,7 +297,7 @@ impl FromBytes for ConsensusStatus { } /// A transaction with execution info. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Serialize)] pub struct TransactionWithExecutionInfo { transaction: Transaction, execution_info: Option, @@ -435,6 +454,7 @@ mod tests { bytesrepr::test_serialization_roundtrip(&RewardResponse::new( rng.gen(), EraId::random(rng), + rng.gen(), )); } diff --git a/ci/nightly-test.sh b/ci/nightly-test.sh index 92861ac3ed..dfe3b3f1f4 100755 --- a/ci/nightly-test.sh +++ b/ci/nightly-test.sh @@ -130,6 +130,7 @@ run_test_and_count 'start_run_teardown "swap_validator_set.sh"' run_test_and_count 'start_run_teardown "sync_upgrade_test.sh node=6 era=5 timeout=500"' run_test_and_count 'start_run_teardown "validators_disconnect.sh"' run_test_and_count 'start_run_teardown "event_stream.sh"' +run_test_and_count 'start_run_teardown "regression_4771.sh"' # Without start_run_teardown - these ones perform their own assets setup, network start and teardown run_test_and_count 'source "$SCENARIOS_DIR/upgrade_after_emergency_upgrade_test_pre_1.5.sh"' run_test_and_count 'source "$SCENARIOS_DIR/regression_3976.sh"' diff --git a/execution_engine/src/execution/executor.rs b/execution_engine/src/execution/executor.rs index 381707b6ab..6df0f3ac6b 100644 --- a/execution_engine/src/execution/executor.rs +++ b/execution_engine/src/execution/executor.rs @@ -6,9 +6,10 @@ use casper_storage::{ AddressGenerator, }; use casper_types::{ - account::AccountHash, addressable_entity::NamedKeys, execution::Effects, AddressableEntity, - AddressableEntityHash, BlockTime, ContextAccessRights, EntryPointType, Gas, Key, Phase, - ProtocolVersion, RuntimeArgs, StoredValue, Tagged, TransactionHash, U512, + account::AccountHash, addressable_entity::NamedKeys, contract_messages::Messages, + execution::Effects, AddressableEntity, AddressableEntityHash, BlockTime, ContextAccessRights, + EntryPointType, Gas, Key, Phase, ProtocolVersion, RuntimeArgs, StoredValue, Tagged, + TransactionHash, U512, }; use crate::{ @@ -130,19 +131,24 @@ impl Executor { } }; - let err = match result { - Ok(_) => None, - Err(error) => Some(error.into()), - }; - - return WasmV1Result::new( - gas_limit, - runtime.context().gas_counter(), - runtime.context().effects(), - runtime.context().transfers().to_owned(), - runtime.context().messages(), - err, - ); + match result { + Ok(_) => WasmV1Result::new( + gas_limit, + runtime.context().gas_counter(), + runtime.context().effects(), + runtime.context().transfers().to_owned(), + runtime.context().messages(), + None, + ), + Err(error) => WasmV1Result::new( + gas_limit, + runtime.context().gas_counter(), + Effects::new(), + vec![], + Messages::new(), + Some(error.into()), + ), + } } /// Creates new runtime context. diff --git a/execution_engine/src/runtime/mint_internal.rs b/execution_engine/src/runtime/mint_internal.rs index 482ed85d5b..e3981eafb8 100644 --- a/execution_engine/src/runtime/mint_internal.rs +++ b/execution_engine/src/runtime/mint_internal.rs @@ -189,4 +189,19 @@ where } } -impl<'a, R> Mint for Runtime<'a, R> where R: StateReader {} +impl<'a, R> Mint for Runtime<'a, R> +where + R: StateReader, +{ + fn purse_exists(&mut self, uref: URef) -> Result { + let maybe_value = self + .context + .read_gs(&Key::Balance(uref.addr())) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage))?; + match maybe_value { + Some(StoredValue::CLValue(value)) => Ok(*value.cl_type() == U512::cl_type()), + Some(_non_cl_value) => Err(Error::CLValue), + None => Ok(false), + } + } +} diff --git a/execution_engine/src/runtime/mod.rs b/execution_engine/src/runtime/mod.rs index cd17af7512..035c828a95 100644 --- a/execution_engine/src/runtime/mod.rs +++ b/execution_engine/src/runtime/mod.rs @@ -2639,7 +2639,7 @@ where Ok(()) => { let protocol_version = self.context.protocol_version(); let byte_code_hash = ByteCodeHash::default(); - let entity_hash = AddressableEntityHash::new(self.context.new_hash_address()?); + let entity_hash = AddressableEntityHash::new(target.value()); let package_hash = PackageHash::new(self.context.new_hash_address()?); let main_purse = target_purse; let associated_keys = AssociatedKeys::new(target, Weight::new(1)); diff --git a/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs b/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs index a8c25a30ad..3c13e92b53 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs @@ -824,7 +824,8 @@ fn should_forcibly_undelegate_after_setting_validator_limits() { builder.forced_undelegate(None, DEFAULT_PROTOCOL_VERSION, DEFAULT_BLOCK_TIME); let bids = builder.get_bids(); - assert_eq!(bids.len(), 1); + // The undelegation itself doesn't remove bids, only process_unbond does. + assert_eq!(bids.len(), 3); assert!(builder.get_validator_weights(new_era + 1).is_none()); @@ -839,7 +840,10 @@ fn should_forcibly_undelegate_after_setting_validator_limits() { assert_eq!( *validator_weights.get(&NON_FOUNDER_VALIDATOR_1_PK).unwrap(), - U512::from(ADD_BID_AMOUNT_1 + 1_000) + // The validator has now bid ADD_BID_AMOUNT_1 + 1_000. + // Delegator 1's delegation has been decreased to the maximum of DELEGATE_AMOUNT_1 - 1_000. + // Delegator 2's delegation was below minimum, so it has been completely unbonded. + U512::from(ADD_BID_AMOUNT_1 + 1_000 + DELEGATE_AMOUNT_1 - 1_000) ); let unbonding_purses: UnbondingPurses = builder.get_unbonds(); @@ -5536,9 +5540,10 @@ fn credits_are_considered_when_determining_validators() { // Add a credit for node 1 artificially (assume it has proposed a block with a transaction and // received credit). + let credit_amount = U512::from(2001); let add_credit = HandleFeeMode::credit( Box::new(ACCOUNT_1_PK.clone()), - U512::from(2001), + credit_amount, INITIAL_ERA_ID, ); builder.handle_fee( @@ -5566,8 +5571,9 @@ fn credits_are_considered_when_determining_validators() { Some(&U512::from(ACCOUNT_2_BOND)) ); assert!(!new_validator_weights.contains_key(&BID_ACCOUNT_1_PK)); + let expected_amount = credit_amount.saturating_add(U512::from(ACCOUNT_1_BOND)); assert_eq!( new_validator_weights.get(&ACCOUNT_1_PK), - Some(&U512::from(ACCOUNT_1_BOND)) + Some(&expected_amount) ); } diff --git a/execution_engine_testing/tests/src/test/system_contracts/genesis.rs b/execution_engine_testing/tests/src/test/system_contracts/genesis.rs index 32e16308dc..3811d02a19 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/genesis.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/genesis.rs @@ -78,6 +78,12 @@ fn should_run_genesis() { .get_entity_by_account_hash(PublicKey::System.to_account_hash()) .expect("system account should exist"); + let account_1_addr = builder + .get_entity_hash_by_account_hash(*ACCOUNT_1_ADDR) + .expect("must get addr for entity account 1"); + + assert_eq!(account_1_addr.value(), ACCOUNT_1_ADDR.value()); + let account_1 = builder .get_entity_by_account_hash(*ACCOUNT_1_ADDR) .expect("account 1 should exist"); diff --git a/execution_engine_testing/tests/src/test/upgrade.rs b/execution_engine_testing/tests/src/test/upgrade.rs index 22e0fc7e12..873459a7a8 100644 --- a/execution_engine_testing/tests/src/test/upgrade.rs +++ b/execution_engine_testing/tests/src/test/upgrade.rs @@ -986,6 +986,16 @@ fn call_and_migrate_purse_holder_contract(migration_scenario: MigrationScenario) .map(PackageHash::new) .unwrap(); + // There is only one version present, post migration there should also + // be only one. + let version_count = builder + .get_package(package_hash) + .expect("must have package") + .versions() + .version_count(); + + assert_eq!(version_count, 1usize); + let execute_request = match migration_scenario { MigrationScenario::ByPackageName(maybe_contract_version) => { ExecuteRequestBuilder::versioned_contract_call_by_name( @@ -1057,6 +1067,15 @@ fn call_and_migrate_purse_holder_contract(migration_scenario: MigrationScenario) if let MigrationScenario::ByUpgrader = migration_scenario { let expect_associated_keys = AssociatedKeys::new(*DEFAULT_ACCOUNT_ADDR, Weight::new(1)); assert_eq!(actual_associated_keys, &expect_associated_keys); + // Post migration by upgrade there should be previous + 1 versions + // present in the package. (previous = 1) + let version_count = builder + .get_package(package_hash) + .expect("must have package") + .versions() + .version_count(); + + assert_eq!(version_count, 2usize); } else { assert_eq!(actual_associated_keys, &AssociatedKeys::default()); } diff --git a/node/BINARY_PORT_PROTOCOL.md b/node/BINARY_PORT_PROTOCOL.md index 064466cbc3..d1fc124cd8 100644 --- a/node/BINARY_PORT_PROTOCOL.md +++ b/node/BINARY_PORT_PROTOCOL.md @@ -1,58 +1,58 @@ -# Binary port protocol -The specification of the protocol used to communicate between the RPC sidecar and binary port casper-node. +# The Binary Port Protocol +This page specifies the communication protocol between the [RPC Sidecar](https://github.com/casper-network/casper-sidecar) and a Casper node's binary port. ## Synopsis -This is a binary protocol which follows a simple request-response model. The protocol consists of one party (the client) sending requests to another party (the server) and the server sending responses back to the client. Both requests and responses are wrapped in envelopes containing a version and a payload type tag. The versioning scheme is based on [SemVer](https://semver.org/), see [versioning](#versioning) for more details. The payload type tags are used to interpret the contents of the payloads. +The communication protocol between the Sidecar and the binary port is a binary protocol that follows a simple request-response model. The protocol consists of one party (the client) sending requests to another party (the server) and the server sending responses back to the client. Both requests and responses are wrapped in envelopes containing a version and a payload type tag. The versioning scheme is based on [SemVer](https://semver.org/). See [versioning](#versioning) for more details. The payload type tags are used to interpret the contents of the payloads. ### Request format | Size in bytes | Field | Description | |---------------|-----------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| 2 | Version of the binary port header | Version of the binary port header serialized as a single u16 number. Upon receiving the request, binary port component will first read data from this field and check it against the currently supported version. In case of version mismatch the appropriate error response will be sent. | +| 2 | Version of the binary port header | Version of the binary port header serialized as a single u16 number. Upon receiving the request, the binary port component will first read data from this field and check it against the currently supported version. In case of a version mismatch, the appropriate error response will be sent. | | 12 | Chain protocol version | Chain protocol version as a u32 triplet (major, minor, patch). This parameter is used to determine whether an incoming request is compatible (according to semver rules) with the current chain protocol version. If not, the appropriate error response will be sent. | | 1 | BinaryRequestTag | Tag identifying the request. | -| ... | RequestPayload | Payload to be interpreted according to `BinaryRequestTag`. | +| Variable | RequestPayload | Payload to be interpreted according to the `BinaryRequestTag`. | -Request bytes can be constructed from bytesrepr-serialized `BinaryRequestHeader` followed by bytesrepr-serialized `BinaryRequest`. +Request bytes can be constructed from the bytesrepr-serialized `BinaryRequestHeader` followed by the bytesrepr-serialized `BinaryRequest`. ### Response format | Size in bytes | Field | Description | |-----------------|-----------------|--------------------------------------------------------------------------| -| 2 | Request id | Request id (u16). | -| 4 | LengthOfRequest | Length of the request (encoded as bytes) being responded to. | -| LengthOfRequest | RequestBytes | The request being responded to encoded as bytes. | +| 2 | Request ID | Request ID as a u16 number. | +| 4 | LengthOfRequest | Length of the request (encoded as bytes) for this response. | +| LengthOfRequest | RequestBytes | The request, encoded as bytes, corresponding to this response. | | 12 | ProtocolVersion | Protocol version as a u32 triplet (major, minor, patch). | | 2 | ErrorCode | Error code, where 0 indicates success. | | 1-2 | PayloadType | Optional payload type tag (first byte being 1 indicates that it exists). | -| ... | Payload | Payload to be interpreted according to `PayloadTag`. | +| Variable | Payload | Payload to be interpreted according to the `PayloadTag`. | `BinaryResponseAndRequest` object can be bytesrepr-deserialized from these bytes. -**Notes:** `...` means that the payload size is variable in size and depends on the tag. +**Notes:** `Variable` means that the payload size is variable and depends on the tag. ## Versioning -Versioning is based on the protocol version of the Casper Platform and the request/response model was designed to support **backwards-compatible** changes to some parts of it. These are allowed to change between **MINOR** versions: -- addition of new [`BinaryRequestTag`](#request-format) with its own payload -- addition of new [`PayloadType`](#response-format) with its own payload -- addition of new [`RecordId`](#request-model-details) -- addition of new [`InformationRequestTag`](#request-model-details) -- addition of new [`ErrorCode`](#response-format) +Versioning is based on the protocol version of the Casper Platform. The request/response model was designed to support **backward-compatible** changes to some parts, which are allowed to change between **MINOR** versions: +- addition of a new [`BinaryRequestTag`](#request-format) with its own payload +- addition of a new [`PayloadType`](#response-format) with its own payload +- addition of a new [`RecordId`](#request-model-details) +- addition of a new [`InformationRequestTag`](#request-model-details) +- addition of a new [`ErrorCode`](#response-format) -Implementations of the protocol can handle requests/responses with a different **MINOR** version than their own. It is possible that they receive a payload they don't support if their version is lower. In that case they should respond with an error code indicating the lack of support for the given payload (`ErrorCode::UnsupportedRequest`). +Implementations of the protocol can handle requests/responses with a different **MINOR** version than their own. It is possible that they receive a payload they don't support if their version is lower. In that case, they should respond with an error code indicating the lack of support for the given payload (`ErrorCode::UnsupportedRequest`). -Other changes to the protocol such as changes to the format of existing requests/responses or removal of existing requests/responses are only allowed between **MAJOR** versions. Implementations of the protocol should not handle requests/responses with a different **MAJOR** version than their own and immediately respond with an error code indicating the lack of support for the given version (`ErrorCode::UnsupportedRequest`). +Other changes to the protocol, such as changes to the format of existing requests/responses or removal of existing requests/responses, are only allowed between **MAJOR** versions. Implementations of the protocol should not handle requests/responses with a different **MAJOR** version than their own and immediately respond with an error code indicating the lack of support for the given version (`ErrorCode::UnsupportedRequest`). -Changes to the envelopes (the request/response headers) are allowed, but are breaking. When such a change is required, the "Header version" should in the request header should also be changed to prevent binary port from trying to handle requests it can't process. +Changes to the envelopes (the request/response headers) are allowed but are breaking. When such a change is required, the "Header version" should in the request header should also be changed to prevent binary port from trying to handle requests it can't process. ## Request model details -There are currently 3 supported types of requests, but the request model can be extended with new variants according to the [versioning](#versioning) rules. The request types are: -- `Get` request, which is one of: - - `Record` request asking for a record with an [**extensible**](#versioning) `RecordId` tag and a key - - `Information` request asking for a piece of information with an [**extensible**](#versioning) `InformationRequestTag` tag and a key - - `State` request asking for some data from the global state - - `Item` request asking for a single item by a `Key` - - `AllItems` request asking for all items by a `KeyTag` - - `Trie` request asking for a trie by a `Digest` -- `TryAcceptTransaction` request a transaction to be accepted and executed -- `TrySpeculativeExec` request a transaction to be speculatively executed +Currently, there are 3 supported types of requests, but the request model can be extended with new variants according to the [versioning](#versioning) rules. The request types are: +- A `Get` request, which is one of: + - A `Record` request asking for a record with an [**extensible**](#versioning) `RecordId` tag and a key + - An `Information` request asking for a piece of information with an [**extensible**](#versioning) `InformationRequestTag` tag and a key + - A `State` request asking for some data from global state. This can be: + - An `Item` request asking for a single item given a `Key` + - An `AllItems` request asking for all items given a `KeyTag` + - A `Trie` request asking for a trie given a `Digest` +- A `TryAcceptTransaction` request for a transaction to be accepted and executed +- A `TrySpeculativeExec` request for a transaction to be executed speculatively, without saving the transaction effects in global state diff --git a/node/CHANGELOG.md b/node/CHANGELOG.md index f4ffe98364..8766af4e21 100644 --- a/node/CHANGELOG.md +++ b/node/CHANGELOG.md @@ -9,24 +9,24 @@ All notable changes to this project will be documented in this file. The format [comment]: <> (Fixed: any bug fixes) [comment]: <> (Security: in case of vulnerabilities) - - ## Unreleased (2.0.0) ### Added * Add `BinaryPort` interface along with the relevant config entries. +* Added chainspec settings `finders_fee`, `finality_signature_proportion` and `signature_rewards_max_delay` to control behavior of the new seigniorage model. ### Changed * All SSE events are emitted via the `/events` endpoint. None of the previous ones (`/events/main`, `/events/deploys`, and `/events/sigs`) is available any longer. * `DeployBuffer` was renamed to `TransactionBuffer` along with the related metrics. +* Switch blocks and the creation and propagation of signatures on them are now rewarded. +* Era end reports now record rewards as motes rather than scores. +* Seigniorage model is now independent of the details of consensus (and compatible with both Highway and Zug) and based solely upon block proposals, signature generation and signature distribution by validators. ### Removed * Remove the JSON-RPC and speculative execution interfaces. * Remove chainspec setting `highway.performance_meter.blocks_to_consider` and the entire `highway.performance_meter` section. * Remove chainspec setting `highway.reduced_reward_multiplier` - - ## 1.5.6 ### Changed @@ -34,15 +34,11 @@ All notable changes to this project will be documented in this file. The format * If an upgrade with the same activation point as the current one is detected on startup, the node will immediately shut down for upgrade. * Reduce chainspec setting `deploys.max_ttl` from 18 hours to 2 hours. - - ## 1.5.5 ### Added * New chainspec setting `highway.performance_meter.blocks_to_consider` with a value of 10, meaning that nodes will take 10 most recent blocks into account when determining their performance in Highway for the purpose of choosing their round lengths. - - ## 1.5.4 ### Added diff --git a/node/src/components/binary_port.rs b/node/src/components/binary_port.rs index 38649e382f..2515f9cfb6 100644 --- a/node/src/components/binary_port.rs +++ b/node/src/components/binary_port.rs @@ -146,6 +146,10 @@ where BinaryRequest::TrySpeculativeExec { transaction } => { metrics.binary_port_try_speculative_exec_count.inc(); if !config.allow_request_speculative_exec { + debug!( + hash = %transaction.hash(), + "received a request for speculative execution while the feature is disabled" + ); return BinaryResponse::new_error(ErrorCode::FunctionDisabled, protocol_version); } let response = @@ -198,6 +202,7 @@ where } if RecordId::try_from(record_type_tag) == Ok(RecordId::Transfer) => { metrics.binary_port_get_record_count.inc(); let Ok(block_hash) = bytesrepr::deserialize_from_slice(&key) else { + debug!("received an incorrectly serialized key for a transfer record"); return BinaryResponse::new_error(ErrorCode::BadRequest, protocol_version); }; let Some(transfers) = effect_builder @@ -236,12 +241,19 @@ where GetRequest::Information { info_type_tag, key } => { metrics.binary_port_get_info_count.inc(); let Ok(tag) = InformationRequestTag::try_from(info_type_tag) else { + debug!( + tag = info_type_tag, + "received an unknown information request tag" + ); return BinaryResponse::new_error(ErrorCode::UnsupportedRequest, protocol_version); }; - let Ok(req) = InformationRequest::try_from((tag, &key[..])) else { - return BinaryResponse::new_error(ErrorCode::BadRequest, protocol_version); - }; - handle_info_request(req, effect_builder, protocol_version).await + match InformationRequest::try_from((tag, &key[..])) { + Ok(req) => handle_info_request(req, effect_builder, protocol_version).await, + Err(error) => { + debug!(?tag, %error, "failed to parse an information request"); + BinaryResponse::new_error(ErrorCode::BadRequest, protocol_version) + } + } } GetRequest::State(req) => { metrics.binary_port_get_state_count.inc(); @@ -261,7 +273,7 @@ where { let Some(state_root_hash) = resolve_state_root_hash(effect_builder, state_identifier).await else { - return BinaryResponse::new_empty(protocol_version); + return BinaryResponse::new_error(ErrorCode::RootNotFound, protocol_version); }; let storage_key_prefix = match key_prefix { KeyPrefix::DelegatorBidAddrsByValidator(hash) => { @@ -287,7 +299,8 @@ where PrefixedValuesResult::RootNotFound => { BinaryResponse::new_error(ErrorCode::RootNotFound, protocol_version) } - PrefixedValuesResult::Failure(_err) => { + PrefixedValuesResult::Failure(error) => { + debug!(%error, "failed when querying for values by prefix"); BinaryResponse::new_error(ErrorCode::InternalError, protocol_version) } } @@ -304,7 +317,7 @@ where { let Some(state_root_hash) = resolve_state_root_hash(effect_builder, state_identifier).await else { - return BinaryResponse::new_empty(protocol_version); + return BinaryResponse::new_error(ErrorCode::RootNotFound, protocol_version); }; let request = TaggedValuesRequest::new(state_root_hash, TaggedValuesSelection::All(key_tag)); match effect_builder.get_tagged_values(request).await { @@ -314,7 +327,8 @@ where TaggedValuesResult::RootNotFound => { BinaryResponse::new_error(ErrorCode::RootNotFound, protocol_version) } - TaggedValuesResult::Failure(_err) => { + TaggedValuesResult::Failure(error) => { + debug!(%error, "failed when querying for all values by tag"); BinaryResponse::new_error(ErrorCode::InternalError, protocol_version) } } @@ -342,7 +356,7 @@ where let Some(state_root_hash) = resolve_state_root_hash(effect_builder, state_identifier).await else { - return BinaryResponse::new_empty(protocol_version); + return BinaryResponse::new_error(ErrorCode::RootNotFound, protocol_version); }; match get_global_state_item(effect_builder, state_root_hash, base_key, path).await { Ok(Some(result)) => BinaryResponse::from_value(result, protocol_version), @@ -355,6 +369,7 @@ where key_tag, } => { if !config.allow_request_get_all_values { + debug!(%key_tag, "received a request for items by key tag while the feature is disabled"); BinaryResponse::new_error(ErrorCode::FunctionDisabled, protocol_version) } else { handle_get_all_items(state_identifier, key_tag, effect_builder, protocol_version) @@ -363,6 +378,7 @@ where } GlobalStateRequest::Trie { trie_key } => { if !config.allow_request_get_trie { + debug!(%trie_key, "received a trie request while the feature is disabled"); BinaryResponse::new_error(ErrorCode::FunctionDisabled, protocol_version) } else { let req = TrieRequest::new(trie_key, None); @@ -371,7 +387,8 @@ where GetTrieFullResult::new(result.map(TrieRaw::into_inner)), protocol_version, ), - Err(_err) => { + Err(error) => { + debug!(%error, "failed when querying for a trie"); BinaryResponse::new_error(ErrorCode::InternalError, protocol_version) } } @@ -384,7 +401,7 @@ where let Some(state_root_hash) = resolve_state_root_hash(effect_builder, state_identifier).await else { - return BinaryResponse::new_empty(protocol_version); + return BinaryResponse::new_error(ErrorCode::RootNotFound, protocol_version); }; let result = match identifier { DictionaryItemIdentifier::AccountNamedKey { @@ -501,9 +518,19 @@ where let named_keys = match &*value { StoredValue::Account(account) => account.named_keys(), StoredValue::Contract(contract) => contract.named_keys(), - _ => return Err(ErrorCode::DictionaryURefNotFound), + value => { + debug!( + value_type = value.type_name(), + "unexpected stored value found when querying for a dictionary" + ); + return Err(ErrorCode::DictionaryURefNotFound); + } }; let Some(uref) = named_keys.get(&dictionary_name).and_then(Key::as_uref) else { + debug!( + dictionary_name, + "dictionary seed URef not found in named keys" + ); return Err(ErrorCode::DictionaryURefNotFound); }; let key = Key::dictionary(*uref, dictionary_item_key.as_bytes()); @@ -515,10 +542,18 @@ where Ok(Some(DictionaryQueryResult::new(key, query_result))) } - QueryResult::RootNotFound | QueryResult::ValueNotFound(_) => { + QueryResult::RootNotFound => { + debug!("root not found when querying for a dictionary seed URef"); + Err(ErrorCode::DictionaryURefNotFound) + } + QueryResult::ValueNotFound(error) => { + debug!(%error, "value not found when querying for a dictionary seed URef"); Err(ErrorCode::DictionaryURefNotFound) } - QueryResult::Failure(_) => Err(ErrorCode::FailedQuery), + QueryResult::Failure(error) => { + debug!(%error, "failed when querying for a dictionary seed URef"); + Err(ErrorCode::FailedQuery) + } } } @@ -538,11 +573,25 @@ where let req = QueryRequest::new(state_root_hash, Key::NamedKey(key_addr), vec![]); match effect_builder.query_global_state(req).await { QueryResult::Success { value, .. } => { - let StoredValue::NamedKey(key_val) = &*value else { - return Err(ErrorCode::DictionaryURefNotFound); + let key_val = match &*value { + StoredValue::NamedKey(key_val) => key_val, + value => { + debug!( + value_type = value.type_name(), + "unexpected stored value found when querying for a dictionary" + ); + return Err(ErrorCode::DictionaryURefNotFound); + } }; - let Ok(Key::URef(uref)) = key_val.get_key() else { - return Err(ErrorCode::DictionaryURefNotFound); + let uref = match key_val.get_key() { + Ok(Key::URef(uref)) => uref, + result => { + debug!( + ?result, + "unexpected named key result when querying for a dictionary" + ); + return Err(ErrorCode::DictionaryURefNotFound); + } }; let key = Key::dictionary(uref, dictionary_item_key.as_bytes()); let Some(query_result) = @@ -552,10 +601,18 @@ where }; Ok(Some(DictionaryQueryResult::new(key, query_result))) } - QueryResult::RootNotFound | QueryResult::ValueNotFound(_) => { + QueryResult::RootNotFound => { + debug!("root not found when querying for a dictionary seed URef"); Err(ErrorCode::DictionaryURefNotFound) } - QueryResult::Failure(_) => Err(ErrorCode::FailedQuery), + QueryResult::ValueNotFound(error) => { + debug!(%error, "value not found when querying for a dictionary seed URef"); + Err(ErrorCode::DictionaryURefNotFound) + } + QueryResult::Failure(error) => { + debug!(%error, "failed when querying for a dictionary seed URef"); + Err(ErrorCode::FailedQuery) + } } } @@ -637,8 +694,14 @@ where Ok(Some(GlobalStateQueryResult::new(*value, proofs))) } QueryResult::RootNotFound => Err(ErrorCode::RootNotFound), - QueryResult::ValueNotFound(_) => Err(ErrorCode::NotFound), - QueryResult::Failure(_) => Err(ErrorCode::FailedQuery), + QueryResult::ValueNotFound(error) => { + debug!(%error, "value not found when querying for a global state item"); + Err(ErrorCode::NotFound) + } + QueryResult::Failure(error) => { + debug!(%error, "failed when querying for a global state item"); + Err(ErrorCode::FailedQuery) + } } } @@ -786,6 +849,7 @@ where }; let status = NodeStatus { + protocol_version, peers: Peers::from(peers), build_version: crate::VERSION_STRING.clone(), chainspec_name: network_name.into(), @@ -816,6 +880,7 @@ where }; let Some(previous_height) = header.height().checked_sub(1) else { // there's not going to be any rewards for the genesis block + debug!("received a request for rewards in the genesis block"); return BinaryResponse::new_empty(protocol_version); }; let Some(parent_header) = effect_builder @@ -829,7 +894,7 @@ where }; let snapshot_request = SeigniorageRecipientsRequest::new( *parent_header.state_root_hash(), - protocol_version, + parent_header.protocol_version(), ); let snapshot = match effect_builder @@ -842,16 +907,25 @@ where SeigniorageRecipientsResult::RootNotFound => { return BinaryResponse::new_error(ErrorCode::RootNotFound, protocol_version) } - SeigniorageRecipientsResult::Failure(_) => { - return BinaryResponse::new_error(ErrorCode::FailedQuery, protocol_version) + SeigniorageRecipientsResult::Failure(error) => { + warn!(%error, "failed when querying for seigniorage recipients"); + return BinaryResponse::new_error(ErrorCode::FailedQuery, protocol_version); } - SeigniorageRecipientsResult::AuctionNotFound - | SeigniorageRecipientsResult::ValueNotFound(_) => { - return BinaryResponse::new_error(ErrorCode::InternalError, protocol_version) + SeigniorageRecipientsResult::AuctionNotFound => { + warn!("auction not found when querying for seigniorage recipients"); + return BinaryResponse::new_error(ErrorCode::InternalError, protocol_version); + } + SeigniorageRecipientsResult::ValueNotFound(error) => { + warn!(%error, "value not found when querying for seigniorage recipients"); + return BinaryResponse::new_error(ErrorCode::InternalError, protocol_version); } }; let Some(era_end) = header.clone_era_end() else { // switch block should have an era end + warn!( + hash = %header.block_hash(), + "era end not found in the switch block retrieved from storage" + ); return BinaryResponse::new_error(ErrorCode::InternalError, protocol_version); }; let block_rewards = match era_end.rewards() { @@ -866,21 +940,36 @@ where let Some(validator_rewards) = block_rewards.get(&validator) else { return BinaryResponse::new_empty(protocol_version); }; - match auction::reward( + + let seigniorage_recipient = snapshot + .get(&header.era_id()) + .and_then(|era| era.get(&validator)); + let reward = auction::reward( &validator, delegator.as_deref(), header.era_id(), validator_rewards, &snapshot, - ) { - Ok(Some(reward)) => { - let response = RewardResponse::new(reward, header.era_id()); + ); + match (reward, seigniorage_recipient) { + (Ok(Some(reward)), Some(seigniorage_recipient)) => { + let response = RewardResponse::new( + reward, + header.era_id(), + *seigniorage_recipient.delegation_rate(), + ); BinaryResponse::from_value(response, protocol_version) } - Ok(None) => BinaryResponse::new_empty(protocol_version), - Err(_) => BinaryResponse::new_error(ErrorCode::InternalError, protocol_version), + (Err(error), _) => { + warn!(%error, "failed when calculating rewards"); + BinaryResponse::new_error(ErrorCode::InternalError, protocol_version) + } + _ => BinaryResponse::new_empty(protocol_version), } } + InformationRequest::ProtocolVersion => { + BinaryResponse::from_value(protocol_version, protocol_version) + } } } @@ -923,8 +1012,9 @@ where .await; match result { - SpeculativeExecutionResult::InvalidTransaction(ite) => { - BinaryResponse::new_error(ite.into(), protocol_version) + SpeculativeExecutionResult::InvalidTransaction(error) => { + debug!(%error, "invalid transaction submitted for speculative execution"); + BinaryResponse::new_error(error.into(), protocol_version) } SpeculativeExecutionResult::WasmV1(spec_exec_result) => { BinaryResponse::from_value(spec_exec_result, protocol_version) @@ -989,11 +1079,13 @@ fn extract_header(payload: &[u8]) -> Result<(BinaryRequestHeader, &[u8]), ErrorC return Err(ErrorCode::BinaryProtocolVersionMismatch); } - let Ok((header, remainder)) = BinaryRequestHeader::from_bytes(payload) else { - return Err(ErrorCode::BadRequest); - }; - - Ok((header, remainder)) + match BinaryRequestHeader::from_bytes(payload) { + Ok((header, remainder)) => Ok((header, remainder)), + Err(error) => { + debug!(%error, "failed to parse binary request header"); + Err(ErrorCode::BadRequest) + } + } } async fn handle_payload( @@ -1037,7 +1129,7 @@ where let request = match BinaryRequest::try_from((tag, remainder)) { Ok(request) => request, Err(error) => { - warn!(?tag, ?error, "failed to parse request"); + debug!(%error, "failed to parse binary request body"); return ( BinaryResponse::new_error(ErrorCode::BadRequest, protocol_version), request_id, diff --git a/node/src/components/block_validator/state.rs b/node/src/components/block_validator/state.rs index 6da7ac54ad..951e2e919f 100644 --- a/node/src/components/block_validator/state.rs +++ b/node/src/components/block_validator/state.rs @@ -655,7 +655,7 @@ mod tests { // Please note: values in the following test cases must match the production chainspec. const MAX_LARGE_COUNT: u64 = 3; const MAX_AUCTION_COUNT: u64 = 145; - const MAX_INSTALL_UPGRADE_COUNT: u64 = 2; + const MAX_INSTALL_UPGRADE_COUNT: u64 = 1; const MAX_MINT_COUNT: u64 = 650; struct TestCase { @@ -712,6 +712,7 @@ mod tests { }, }; + #[allow(dead_code)] const LESS_THAN_MAX_INSTALL_UPGRADE: TestCase = TestCase { install_upgrade_count: FULL_INSTALL_UPGRADE.install_upgrade_count - 1, state_validator: |(state, responder)| { @@ -819,7 +820,8 @@ mod tests { let mut rng = TestRng::new(); run_test_case(TOO_MANY_INSTALL_UPGRADE, &mut rng); run_test_case(FULL_INSTALL_UPGRADE, &mut rng); - run_test_case(LESS_THAN_MAX_INSTALL_UPGRADE, &mut rng); + //TODO: Fix test setup so this isn't identical to the no transactions case + //run_test_case(LESS_THAN_MAX_INSTALL_UPGRADE, &mut rng); } #[test] @@ -919,7 +921,7 @@ mod tests { fn should_add_responder_if_in_progress() { let mut rng = TestRng::new(); let mut fixture = Fixture::new(&mut rng); - let (mut state, _maybe_responder) = fixture.new_state(2, 2, 2, 2); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 2); assert!(matches!(state, BlockValidationState::InProgress { .. })); assert_eq!(state.responder_count(), 1); @@ -960,7 +962,7 @@ mod tests { fn should_add_new_holder_if_in_progress() { let mut rng = TestRng::new(); let mut fixture = Fixture::new(&mut rng); - let (mut state, _maybe_responder) = fixture.new_state(2, 2, 2, 2); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 2); assert!(matches!(state, BlockValidationState::InProgress { .. })); assert_eq!(state.holders_mut().unwrap().len(), 1); @@ -977,7 +979,7 @@ mod tests { fn should_not_change_holder_state() { let mut rng = TestRng::new(); let mut fixture = Fixture::new(&mut rng); - let (mut state, _maybe_responder) = fixture.new_state(2, 2, 2, 2); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 2); assert!(matches!(state, BlockValidationState::InProgress { .. })); let (holder, holder_state) = state .holders_mut() @@ -1000,7 +1002,7 @@ mod tests { fn should_start_fetching() { let mut rng = TestRng::new(); let mut fixture = Fixture::new(&mut rng); - let (mut state, _maybe_responder) = fixture.new_state(2, 2, 2, 2); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 2); assert!(matches!(state, BlockValidationState::InProgress { .. })); let (holder, holder_state) = state .holders_mut() @@ -1028,7 +1030,7 @@ mod tests { .. } => { assert_eq!(holder, original_holder); - assert_eq!(missing_transactions.len(), 8); + assert_eq!(missing_transactions.len(), 7); } _ => panic!("unexpected return value"), } @@ -1042,7 +1044,7 @@ mod tests { fn start_fetching_should_return_ongoing_if_any_holder_in_asked_state() { let mut rng = TestRng::new(); let mut fixture = Fixture::new(&mut rng); - let (mut state, _maybe_responder) = fixture.new_state(2, 2, 2, 2); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 2); assert!(matches!(state, BlockValidationState::InProgress { .. })); // Change the current (only) holder's state to `Asked`. @@ -1087,7 +1089,7 @@ mod tests { fn start_fetching_should_return_unable_if_all_holders_in_failed_state() { let mut rng = TestRng::new(); let mut fixture = Fixture::new(&mut rng); - let (mut state, _maybe_responder) = fixture.new_state(2, 2, 2, 2); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 2); assert!(matches!(state, BlockValidationState::InProgress { .. })); // Set the original holder's state to `Failed` and add some more failed. @@ -1139,7 +1141,7 @@ mod tests { fn state_should_change_to_validation_succeeded() { let mut rng = TestRng::new(); let mut fixture = Fixture::new(&mut rng); - let (mut state, _maybe_responder) = fixture.new_state(2, 2, 2, 1); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1); assert!(matches!(state, BlockValidationState::InProgress { .. })); // While there is still at least one missing transaction, `try_add_transaction_footprint` @@ -1168,7 +1170,7 @@ mod tests { fn unrelated_transaction_added_should_not_change_state() { let mut rng = TestRng::new(); let mut fixture = Fixture::new(&mut rng); - let (mut state, _maybe_responder) = fixture.new_state(2, 2, 2, 2); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 2); let (appendable_block_before, missing_transactions_before, holders_before) = match &state { BlockValidationState::InProgress { appendable_block, @@ -1223,7 +1225,7 @@ mod tests { new_standard(fixture.rng, Timestamp::MAX, TimeDiff::from_seconds(1)); let invalid_transaction_hash = invalid_transaction.hash(); fixture.transactions.push(invalid_transaction.clone()); - let (mut state, _maybe_responder) = fixture.new_state(2, 2, 2, 2); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 2); assert!(matches!(state, BlockValidationState::InProgress { .. })); if let BlockValidationState::InProgress { ref mut missing_transactions, diff --git a/node/src/components/contract_runtime.rs b/node/src/components/contract_runtime.rs index 83e8486d9c..ec8e3894b2 100644 --- a/node/src/components/contract_runtime.rs +++ b/node/src/components/contract_runtime.rs @@ -30,9 +30,9 @@ use tracing::{debug, error, info, trace}; use casper_execution_engine::engine_state::{EngineConfigBuilder, ExecutionEngineV1}; use casper_storage::{ data_access_layer::{ - AddressableEntityRequest, BlockStore, DataAccessLayer, EntryPointsRequest, - ExecutionResultsChecksumRequest, FlushRequest, FlushResult, GenesisRequest, GenesisResult, - TrieRequest, + AddressableEntityRequest, AddressableEntityResult, BlockStore, DataAccessLayer, + EntryPointsRequest, ExecutionResultsChecksumRequest, FlushRequest, FlushResult, + GenesisRequest, GenesisResult, TrieRequest, }, global_state::{ state::{lmdb::LmdbGlobalState, CommitProvider, StateProvider}, @@ -43,7 +43,8 @@ use casper_storage::{ tracking_copy::TrackingCopyError, }; use casper_types::{ - ActivationPoint, Chainspec, ChainspecRawBytes, ChainspecRegistry, EraId, PublicKey, + account::AccountHash, ActivationPoint, Chainspec, ChainspecRawBytes, ChainspecRegistry, + EntityAddr, EraId, Key, PublicKey, }; use crate::{ @@ -399,7 +400,7 @@ impl ContractRuntime { } ContractRuntimeRequest::GetAddressableEntity { state_root_hash, - key, + entity_addr, responder, } => { trace!(?state_root_hash, "get addressable entity"); @@ -407,8 +408,29 @@ impl ContractRuntime { let data_access_layer = Arc::clone(&self.data_access_layer); async move { let start = Instant::now(); - let request = AddressableEntityRequest::new(state_root_hash, key); + let entity_key = match entity_addr { + EntityAddr::SmartContract(_) | EntityAddr::System(_) => Key::AddressableEntity(entity_addr), + EntityAddr::Account(account) => Key::Account(AccountHash::new(account)), + }; + let request = AddressableEntityRequest::new(state_root_hash, entity_key); let result = data_access_layer.addressable_entity(request); + let result = match &result { + AddressableEntityResult::ValueNotFound(msg) => { + if entity_addr.is_contract() { + trace!(%msg, "can not read addressable entity by Key::AddressableEntity or Key::Account, will try by Key::Hash"); + let entity_key = Key::Hash(entity_addr.value()); + let request = AddressableEntityRequest::new(state_root_hash, entity_key); + data_access_layer.addressable_entity(request) + } + else { + result + } + }, + AddressableEntityResult::RootNotFound | + AddressableEntityResult::Success { .. } | + AddressableEntityResult::Failure(_) => result, + }; + metrics .addressable_entity .observe(start.elapsed().as_secs_f64()); diff --git a/node/src/components/contract_runtime/rewards.rs b/node/src/components/contract_runtime/rewards.rs index 955dc4bcd9..956e18a972 100644 --- a/node/src/components/contract_runtime/rewards.rs +++ b/node/src/components/contract_runtime/rewards.rs @@ -15,6 +15,7 @@ use futures::stream::{self, StreamExt as _, TryStreamExt as _}; use itertools::Itertools; use num_rational::Ratio; use num_traits::{CheckedAdd, CheckedMul}; +use tracing::trace; use crate::{ effect::{ @@ -474,6 +475,12 @@ pub(crate) fn rewards_for_era( // Collect all rewards as a ratio: for block in rewards_info.blocks_from_era(current_era_id) { // Transfer the block production reward for this block proposer: + trace!( + proposer=?block.proposer, + amount=%production_reward.to_integer(), + block=%block.height, + "proposer reward" + ); increase_value_for_key_and_era(block.proposer.clone(), current_era_id, production_reward)?; // Now, let's compute the reward attached to each signed block reported by the block @@ -504,6 +511,21 @@ pub(crate) fn rewards_for_era( .checked_mul(&rewards_info.reward(signed_block_era)?) .ok_or(RewardsError::ArithmeticOverflow)?; + trace!( + signer=?signing_validator, + amount=%contribution_reward.to_integer(), + block=%block.height, + signed_block=%signed_block_height, + "signature contribution reward" + ); + trace!( + collector=?block.proposer, + signer=?signing_validator, + amount=%collection_reward.to_integer(), + block=%block.height, + signed_block=%signed_block_height, + "signature collection reward" + ); increase_value_for_key_and_era( signing_validator, signed_block_era, @@ -511,7 +533,7 @@ pub(crate) fn rewards_for_era( )?; increase_value_for_key_and_era( block.proposer.clone(), - signed_block_era, + current_era_id, collection_reward, )?; } diff --git a/node/src/components/contract_runtime/rewards/tests.rs b/node/src/components/contract_runtime/rewards/tests.rs index d0265cc869..59dfb54a2e 100644 --- a/node/src/components/contract_runtime/rewards/tests.rs +++ b/node/src/components/contract_runtime/rewards/tests.rs @@ -283,10 +283,8 @@ fn all_signatures_rewards_without_contribution_fee() { ratio(1) * ratio(era_2_reward_per_round) * ratio(core_config.production_rewards_proportion()) - }; - let validator_1_expected_payout_prev_era = { - // All finality signature collected: - ratio(era_1_reward_per_round) * ratio(core_config.collection_rewards_proportion()) + // All finality signature collected (paid out in era 2): + + ratio(era_1_reward_per_round) * ratio(core_config.collection_rewards_proportion()) }; let validator_2_expected_payout = { // 1 block produced: @@ -303,8 +301,7 @@ fn all_signatures_rewards_without_contribution_fee() { assert_eq!( map! { - VALIDATOR_1.clone() => vec![validator_1_expected_payout.to_integer(), - validator_1_expected_payout_prev_era.to_integer()], + VALIDATOR_1.clone() => vec![validator_1_expected_payout.to_integer()], VALIDATOR_2.clone() => vec![validator_2_expected_payout.to_integer()], VALIDATOR_3.clone() => vec![validator_3_expected_payout.to_integer()], }, @@ -634,6 +631,10 @@ fn mixed_signatures_pattern() { + ratio(1) * constructor.weight(era, VALIDATOR_3.deref()) + ratio(1) * constructor.weight(era, VALIDATOR_4.deref()) ) * ratio(era_2_reward_per_round) + // collected one signature from era 1 + + ( + ratio(1) * constructor.weight(1, VALIDATOR_1.deref()) + ) * ratio(era_1_reward_per_round) } // Finality signed: + contribution * { @@ -641,12 +642,7 @@ fn mixed_signatures_pattern() { ratio(3) * ratio(era_2_reward_per_round) * constructor.weight(era, VALIDATOR_3.deref()) }).to_integer(), // for era 1 - (collection * { - ( - ratio(1) * constructor.weight(1, VALIDATOR_1.deref()) - ) * ratio(era_1_reward_per_round) - } - + contribution * { + (contribution * { // 1 in previous era: ratio(1) * ratio(era_1_reward_per_round) * constructor.weight(1, VALIDATOR_3.deref()) }).to_integer() @@ -663,16 +659,14 @@ fn mixed_signatures_pattern() { + ratio(2) * constructor.weight(era, VALIDATOR_3.deref()) + ratio(1) * constructor.weight(era, VALIDATOR_4.deref()) ) * ratio(era_2_reward_per_round) + // collected one signature from era 1 + + ( + ratio(1) * constructor.weight(1, VALIDATOR_3.deref()) + ) * ratio(era_1_reward_per_round) } // 3 finality signed: + ratio(2) * contribution * ratio(era_2_reward_per_round) * constructor.weight(era, VALIDATOR_4.deref())) .to_integer(), - // for era 1 - (collection * { - ( - ratio(1) * constructor.weight(1, VALIDATOR_3.deref()) - ) * ratio(era_1_reward_per_round) - }).to_integer() ]; assert_eq!( diff --git a/node/src/components/transaction_acceptor.rs b/node/src/components/transaction_acceptor.rs index f91d3b6bb7..4a38ef5b64 100644 --- a/node/src/components/transaction_acceptor.rs +++ b/node/src/components/transaction_acceptor.rs @@ -13,13 +13,7 @@ use tracing::{debug, error, trace}; use casper_execution_engine::engine_state::MAX_PAYMENT; use casper_storage::data_access_layer::{balance::BalanceHandling, BalanceRequest, ProofHandling}; use casper_types::{ - account::AccountHash, addressable_entity::AddressableEntity, contracts::ContractHash, - system::auction::ARG_AMOUNT, AddressableEntityHash, AddressableEntityIdentifier, BlockHeader, - Chainspec, EntityAddr, EntityKind, EntityVersion, EntityVersionKey, EntryPoint, EntryPointAddr, - ExecutableDeployItem, ExecutableDeployItemIdentifier, InitiatorAddr, Key, Package, PackageAddr, - PackageHash, PackageIdentifier, Transaction, TransactionEntryPoint, - TransactionInvocationTarget, TransactionRuntime, TransactionTarget, DEFAULT_ENTRY_POINT_NAME, - U512, + account::AccountHash, addressable_entity::AddressableEntity, system::auction::ARG_AMOUNT, AddressableEntityHash, AddressableEntityIdentifier, BlockHeader, Chainspec, EntityAddr, EntityKind, EntityVersion, EntityVersionKey, EntryPoint, EntryPointAddr, ExecutableDeployItem, ExecutableDeployItemIdentifier, InitiatorAddr, Key, Package, PackageAddr, PackageHash, PackageIdentifier, Transaction, TransactionEntryPoint, TransactionInvocationTarget, TransactionRuntime, TransactionTarget, DEFAULT_ENTRY_POINT_NAME, U512 }; use crate::{ @@ -178,12 +172,13 @@ impl TransactionAcceptor { }; if event_metadata.source.is_client() { - let account_key = match event_metadata.transaction.initiator_addr() { - InitiatorAddr::PublicKey(public_key) => Key::from(public_key.to_account_hash()), - InitiatorAddr::AccountHash(account_hash) => Key::from(account_hash), + let account_hash = match event_metadata.transaction.initiator_addr() { + InitiatorAddr::PublicKey(public_key) => public_key.to_account_hash(), + InitiatorAddr::AccountHash(account_hash) => account_hash, }; + let entity_addr = EntityAddr::Account(account_hash.value()); effect_builder - .get_addressable_entity(*block_header.state_root_hash(), account_key) + .get_addressable_entity(*block_header.state_root_hash(), entity_addr) .event(move |result| Event::GetAddressableEntityResult { event_metadata, maybe_entity: result.into_option(), @@ -311,32 +306,26 @@ impl TransactionAcceptor { ExecutableDeployItemIdentifier::AddressableEntity( AddressableEntityIdentifier::Hash(contract_hash), ) => { - let query_key = Key::from(ContractHash::new(contract_hash.value())); + let entity_addr = EntityAddr::SmartContract(contract_hash.value()); effect_builder - .get_addressable_entity(*block_header.state_root_hash(), query_key) - .event(move |result| { - debug!(?result, "get_addressable_entity result 4"); - Event::GetContractResult { - event_metadata, - block_header, - is_payment: true, - contract_hash, - maybe_entity: result.into_option(), - } - }) - } - ExecutableDeployItemIdentifier::AddressableEntity( - AddressableEntityIdentifier::Addr(entity_addr), - ) => { - let query_key = Key::AddressableEntity(entity_addr); - effect_builder - .get_addressable_entity(*block_header.state_root_hash(), query_key) - .event(move |result| Event::GetAddressableEntityResult { + .get_addressable_entity(*block_header.state_root_hash(), entity_addr) + .event(move |result| Event::GetContractResult { event_metadata, block_header, + is_payment: true, + contract_hash, maybe_entity: result.into_option(), }) } + ExecutableDeployItemIdentifier::AddressableEntity( + AddressableEntityIdentifier::Addr(entity_addr), + ) => effect_builder + .get_addressable_entity(*block_header.state_root_hash(), entity_addr) + .event(move |result| Event::GetAddressableEntityResult { + event_metadata, + block_header, + maybe_entity: result.into_option(), + }), ExecutableDeployItemIdentifier::Package( ref contract_package_identifier @ PackageIdentifier::Hash { package_hash, .. }, ) => { @@ -433,32 +422,26 @@ impl TransactionAcceptor { ExecutableDeployItemIdentifier::AddressableEntity( AddressableEntityIdentifier::Hash(entity_hash), ) => { - let key = Key::from(ContractHash::new(entity_hash.value())); + let entity_addr = EntityAddr::SmartContract(entity_hash.value()); effect_builder - .get_addressable_entity(*block_header.state_root_hash(), key) - .event(move |result| { - debug!(?result, "get_addressable_entity result 3"); - Event::GetContractResult { - event_metadata, - block_header, - is_payment: false, - contract_hash: entity_hash, - maybe_entity: result.into_option(), - } - }) - } - ExecutableDeployItemIdentifier::AddressableEntity( - AddressableEntityIdentifier::Addr(entity_addr), - ) => { - let key = Key::AddressableEntity(entity_addr); - effect_builder - .get_addressable_entity(*block_header.state_root_hash(), key) - .event(move |result| Event::GetAddressableEntityResult { + .get_addressable_entity(*block_header.state_root_hash(), entity_addr) + .event(move |result| Event::GetContractResult { event_metadata, block_header, + is_payment: false, + contract_hash: entity_hash, maybe_entity: result.into_option(), }) } + ExecutableDeployItemIdentifier::AddressableEntity( + AddressableEntityIdentifier::Addr(entity_addr), + ) => effect_builder + .get_addressable_entity(*block_header.state_root_hash(), entity_addr) + .event(move |result| Event::GetAddressableEntityResult { + event_metadata, + block_header, + maybe_entity: result.into_option(), + }), ExecutableDeployItemIdentifier::Package( ref package_identifier @ PackageIdentifier::Hash { package_hash, .. }, ) => { @@ -525,22 +508,14 @@ impl TransactionAcceptor { NextStep::GetContract(entity_addr, runtime) => { // Use `Key::Hash` variant so that we try to retrieve the entity as either an // AddressableEntity, or fall back to retrieving an un-migrated Contract. - let key = match runtime { - TransactionRuntime::VmCasperV1 => Key::Hash(entity_addr.value()), // Is that - TransactionRuntime::VmCasperV2 => Key::AddressableEntity(entity_addr), - }; - effect_builder - .get_addressable_entity(*block_header.state_root_hash(), key) - .event(move |result| { - debug!(?result, ?key, ?runtime, "get_addressable_entity result 2"); - Event::GetContractResult { - event_metadata, - block_header, - is_payment: false, - contract_hash: AddressableEntityHash::new(entity_addr.value()), - maybe_entity: result.into_option(), - } + .get_addressable_entity(*block_header.state_root_hash(), entity_addr) + .event(move |result| Event::GetContractResult { + event_metadata, + block_header, + is_payment: false, + contract_hash: AddressableEntityHash::new(entity_addr.value()), + maybe_entity: result.into_option(), }) } NextStep::GetPackage(package_addr, maybe_package_version) => { @@ -744,18 +719,15 @@ impl TransactionAcceptor { match package.lookup_entity_hash(entity_version_key) { Some(&contract_hash) => { - let key = Key::from(ContractHash::new(contract_hash.value())); + let entity_addr = EntityAddr::SmartContract(contract_hash.value()); effect_builder - .get_addressable_entity(*block_header.state_root_hash(), key) - .event(move |result| { - debug!(?result, "get_addressable_entity result 1"); - Event::GetContractResult { - event_metadata, - block_header, - is_payment, - contract_hash, - maybe_entity: result.into_option(), - } + .get_addressable_entity(*block_header.state_root_hash(), entity_addr) + .event(move |result| Event::GetContractResult { + event_metadata, + block_header, + is_payment, + contract_hash, + maybe_entity: result.into_option(), }) } None => { diff --git a/node/src/components/transaction_acceptor/error.rs b/node/src/components/transaction_acceptor/error.rs index 68f431ccf6..7f9cbecaa3 100644 --- a/node/src/components/transaction_acceptor/error.rs +++ b/node/src/components/transaction_acceptor/error.rs @@ -68,13 +68,58 @@ impl Error { impl From for BinaryPortErrorCode { fn from(err: Error) -> Self { match err { - Error::EmptyBlockchain - | Error::Parameters { .. } - | Error::Expired { .. } - | Error::ExpectedDeploy - | Error::ExpectedTransactionV1 => { - BinaryPortErrorCode::InvalidTransactionOrDeployUnspecified - } + Error::EmptyBlockchain => BinaryPortErrorCode::EmptyBlockchain, + Error::ExpectedDeploy => BinaryPortErrorCode::ExpectedDeploy, + Error::ExpectedTransactionV1 => BinaryPortErrorCode::ExpectedTransaction, + Error::Expired { .. } => BinaryPortErrorCode::TransactionExpired, + Error::Parameters { failure, .. } => match failure { + ParameterFailure::NoSuchAddressableEntity { .. } => { + BinaryPortErrorCode::NoSuchAddressableEntity + } + ParameterFailure::NoSuchContractAtHash { .. } => { + BinaryPortErrorCode::NoSuchContractAtHash + } + ParameterFailure::NoSuchEntryPoint { .. } => BinaryPortErrorCode::NoSuchEntryPoint, + ParameterFailure::NoSuchPackageAtHash { .. } => { + BinaryPortErrorCode::NoSuchPackageAtHash + } + ParameterFailure::InvalidEntityAtVersion { .. } => { + BinaryPortErrorCode::InvalidEntityAtVersion + } + ParameterFailure::DisabledEntityAtVersion { .. } => { + BinaryPortErrorCode::DisabledEntityAtVersion + } + ParameterFailure::MissingEntityAtVersion { .. } => { + BinaryPortErrorCode::MissingEntityAtVersion + } + ParameterFailure::InvalidAssociatedKeys => { + BinaryPortErrorCode::InvalidAssociatedKeys + } + ParameterFailure::InsufficientSignatureWeight => { + BinaryPortErrorCode::InsufficientSignatureWeight + } + ParameterFailure::InsufficientBalance { .. } => { + BinaryPortErrorCode::InsufficientBalance + } + ParameterFailure::UnknownBalance { .. } => BinaryPortErrorCode::UnknownBalance, + ParameterFailure::Deploy(deploy_failure) => match deploy_failure { + DeployParameterFailure::InvalidPaymentVariant => { + BinaryPortErrorCode::DeployInvalidPaymentVariant + } + DeployParameterFailure::MissingPaymentAmount => { + BinaryPortErrorCode::DeployMissingPaymentAmount + } + DeployParameterFailure::FailedToParsePaymentAmount => { + BinaryPortErrorCode::DeployFailedToParsePaymentAmount + } + DeployParameterFailure::MissingTransferTarget => { + BinaryPortErrorCode::DeployMissingTransferTarget + } + DeployParameterFailure::MissingModuleBytes => { + BinaryPortErrorCode::DeployMissingModuleBytes + } + }, + }, Error::InvalidTransaction(invalid_transaction) => { BinaryPortErrorCode::from(invalid_transaction) } diff --git a/node/src/components/transaction_acceptor/tests.rs b/node/src/components/transaction_acceptor/tests.rs index 74c21cda4f..0d2081f16a 100644 --- a/node/src/components/transaction_acceptor/tests.rs +++ b/node/src/components/transaction_acceptor/tests.rs @@ -841,7 +841,7 @@ impl reactor::Reactor for Reactor { } ContractRuntimeRequest::GetAddressableEntity { state_root_hash: _, - key, + entity_addr, responder, } => { let result = if matches!( @@ -852,12 +852,13 @@ impl reactor::Reactor for Reactor { TestScenario::FromPeerMissingAccount(_) ) { AddressableEntityResult::ValueNotFound("missing account".to_string()) - } else if let Key::Account(account_hash) = key { - let account = create_account(account_hash, self.test_scenario); + } else if let EntityAddr::Account(account_hash) = entity_addr { + let account = + create_account(AccountHash::new(account_hash), self.test_scenario); AddressableEntityResult::Success { entity: AddressableEntity::from(account), } - } else if let Key::Hash(..) = key { + } else if let EntityAddr::SmartContract(..) = entity_addr { match self.test_scenario { TestScenario::FromPeerCustomPaymentContract( ContractScenario::MissingContractAtHash, @@ -894,10 +895,12 @@ impl reactor::Reactor for Reactor { entity: AddressableEntity::from(contract), } } - _ => panic!("unexpected GetAddressableEntity: {:?}", key), + _ => panic!("unexpected GetAddressableEntity: {:?}", entity_addr), } } else { - panic!("should GetAddressableEntity using Key's Account or Hash variant"); + panic!( + "should GetAddressableEntity using Account or SmartContract variant" + ); }; responder.respond(result).ignore() } diff --git a/node/src/effect.rs b/node/src/effect.rs index 330c7d82a2..55e801c460 100644 --- a/node/src/effect.rs +++ b/node/src/effect.rs @@ -132,9 +132,9 @@ use casper_storage::{ use casper_types::{ execution::{Effects as ExecutionEffects, ExecutionResult}, Approval, AvailableBlockRange, Block, BlockHash, BlockHeader, BlockSignatures, - BlockSynchronizerStatus, BlockV2, ChainspecRawBytes, DeployHash, Digest, EraId, ExecutionInfo, - FinalitySignature, FinalitySignatureId, FinalitySignatureV2, Key, NextUpgrade, Package, - ProtocolUpgradeConfig, ProtocolVersion, PublicKey, TimeDiff, Timestamp, Transaction, + BlockSynchronizerStatus, BlockV2, ChainspecRawBytes, DeployHash, Digest, EntityAddr, EraId, + ExecutionInfo, FinalitySignature, FinalitySignatureId, FinalitySignatureV2, Key, NextUpgrade, + Package, ProtocolUpgradeConfig, ProtocolVersion, PublicKey, TimeDiff, Timestamp, Transaction, TransactionHash, TransactionHeader, TransactionId, Transfer, U512, }; @@ -2050,11 +2050,12 @@ impl EffectBuilder { .await } - /// Retrieves an `AddressableEntity` from under the given key in global state if present. + /// Retrieves an `AddressableEntity` from under the given entity address (or key, if the former + /// is not found) in global state. pub(crate) async fn get_addressable_entity( self, state_root_hash: Digest, - key: Key, + entity_addr: EntityAddr, ) -> AddressableEntityResult where REv: From, @@ -2062,7 +2063,7 @@ impl EffectBuilder { self.make_request( |responder| ContractRuntimeRequest::GetAddressableEntity { state_root_hash, - key, + entity_addr, responder, }, QueueKind::ContractRuntime, diff --git a/node/src/effect/requests.rs b/node/src/effect/requests.rs index f65f1ab407..c5f89403b2 100644 --- a/node/src/effect/requests.rs +++ b/node/src/effect/requests.rs @@ -33,9 +33,9 @@ use casper_storage::{ use casper_types::{ execution::ExecutionResult, Approval, AvailableBlockRange, Block, BlockHash, BlockHeader, BlockSignatures, BlockSynchronizerStatus, BlockV2, ChainspecRawBytes, DeployHash, Digest, - DisplayIter, EraId, ExecutionInfo, FinalitySignature, FinalitySignatureId, Key, NextUpgrade, - ProtocolUpgradeConfig, ProtocolVersion, PublicKey, TimeDiff, Timestamp, Transaction, - TransactionHash, TransactionHeader, TransactionId, Transfer, + DisplayIter, EntityAddr, EraId, ExecutionInfo, FinalitySignature, FinalitySignatureId, Key, + NextUpgrade, ProtocolUpgradeConfig, ProtocolVersion, PublicKey, TimeDiff, Timestamp, + Transaction, TransactionHash, TransactionHeader, TransactionId, Transfer, }; use super::{AutoClosingResponder, GossipTarget, Responder}; @@ -822,13 +822,13 @@ pub(crate) enum ContractRuntimeRequest { state_root_hash: Digest, responder: Responder, }, - /// Returns an `AddressableEntity` if found under the given key. If a legacy `Account` + /// Returns an `AddressableEntity` if found under the given entity_addr. If a legacy `Account` /// or contract exists under the given key, it will be migrated to an `AddressableEntity` /// and returned. However, global state is not altered and the migrated record does not /// actually exist. GetAddressableEntity { state_root_hash: Digest, - key: Key, + entity_addr: EntityAddr, responder: Responder, }, /// Returns a singular entry point based under the given state root hash and entry @@ -925,13 +925,13 @@ impl Display for ContractRuntimeRequest { ), ContractRuntimeRequest::GetAddressableEntity { state_root_hash, - key, + entity_addr, .. } => { write!( formatter, "get addressable_entity {} under {}", - key, state_root_hash + entity_addr, state_root_hash ) } ContractRuntimeRequest::GetTrie { request, .. } => { diff --git a/node/src/reactor/main_reactor/tests.rs b/node/src/reactor/main_reactor/tests.rs index 96657c0219..3bf9e94142 100644 --- a/node/src/reactor/main_reactor/tests.rs +++ b/node/src/reactor/main_reactor/tests.rs @@ -464,6 +464,24 @@ impl TestFixture { .expect("node 0 should have a complete block") } + /// Get block by height + fn get_block_by_height(&self, block_height: u64) -> Block { + let node_0 = self + .node_contexts + .first() + .expect("should have at least one node") + .id; + + self.network + .nodes() + .get(&node_0) + .expect("should have node 0") + .main_reactor() + .storage() + .read_block_by_height(block_height) + .expect("failure to read block at height") + } + #[track_caller] fn get_block_gas_price_by_public_key(&self, maybe_public_key: Option<&PublicKey>) -> u8 { let node_id = match maybe_public_key { @@ -1841,6 +1859,89 @@ async fn run_withdraw_bid_network() { fixture.check_bid_existence_at_tip(&alice_public_key, None, false); } +#[tokio::test] +async fn node_should_rejoin_after_ejection() { + let initial_stakes = InitialStakes::AllEqual { + count: 5, + stake: 1_000_000_000, + }; + let minimum_era_height = 4; + let configs_override = ConfigsOverride { + minimum_era_height, + minimum_block_time: "4096 ms".parse().unwrap(), + round_seigniorage_rate: Ratio::new(1, 1_000_000_000_000), + ..Default::default() + }; + let mut fixture = TestFixture::new(initial_stakes, Some(configs_override)).await; + + // Run through the first era. + fixture + .run_until_block_height(minimum_era_height, ONE_MIN) + .await; + + let stopped_node = fixture.remove_and_stop_node(1); + let stopped_secret_key = Arc::clone(&stopped_node.secret_key); + let stopped_public_key = PublicKey::from(&*stopped_secret_key); + + // Wait until the stopped node is ejected and removed from the validators set. + fixture + .run_until_consensus_in_era( + (fixture.chainspec.core_config.auction_delay + 3).into(), + ONE_MIN, + ) + .await; + + // Restart the node. + // Use the hash of the current highest complete block as the trusted hash. + let mut config = stopped_node.config; + config.node.trusted_hash = Some(*fixture.highest_complete_block().hash()); + fixture + .add_node(stopped_node.secret_key, config, stopped_node.storage_dir) + .await; + + // Create & sign deploy to reactivate the stopped node's bid. + // The bid amount will make sure that the rejoining validator proposes soon after it rejoins. + let mut deploy = Deploy::add_bid( + fixture.chainspec.network_config.name.clone(), + fixture.system_contract_hash(AUCTION), + stopped_public_key.clone(), + 1_000_000_000_000_000_000_u64.into(), + 10, + Timestamp::now(), + TimeDiff::from_seconds(60), + ); + deploy.sign(&stopped_secret_key); + let txn = Transaction::Deploy(deploy); + let txn_hash = txn.hash(); + + // Inject the transaction and run the network until executed. + fixture.inject_transaction(txn).await; + fixture + .run_until_executed_transaction(&txn_hash, TEN_SECS) + .await; + + // Ensure execution succeeded and that there is a Write transform for the bid's key. + let bid_key = Key::BidAddr(BidAddr::from(stopped_public_key.clone())); + fixture + .successful_execution_transforms(&txn_hash) + .iter() + .find(|transform| match transform.kind() { + TransformKindV2::Write(StoredValue::BidKind(bid_kind)) => { + Key::from(bid_kind.bid_addr()) == bid_key + } + _ => false, + }) + .expect("should have a write record for bid"); + + // Wait until the auction delay passes, plus one era for a margin of error. + fixture + .run_until_consensus_in_era( + (2 * fixture.chainspec.core_config.auction_delay + 6).into(), + ONE_MIN, + ) + .await; +} + #[tokio::test] async fn run_undelegate_bid_network() { let alice_stake = 200_000_000_000_u64; @@ -2389,9 +2490,11 @@ async fn run_rewards_network_scenario( .expect("expected current era validator"), total_previous_era_weights, ); + // collection always goes to the era in which the block citing the + // reward was created add_to_rewards( proposer.clone(), - switch_blocks.headers[i - 1].era_id(), + block.era_id(), fixture.chainspec.core_config.finders_fee.into_u512() * contributor_proportion * previous_signatures_reward, diff --git a/node/src/reactor/main_reactor/tests/binary_port.rs b/node/src/reactor/main_reactor/tests/binary_port.rs index f8ff03c466..f67e98babe 100644 --- a/node/src/reactor/main_reactor/tests/binary_port.rs +++ b/node/src/reactor/main_reactor/tests/binary_port.rs @@ -50,6 +50,7 @@ const MESSAGE_SIZE: u32 = 1024 * 1024 * 10; struct TestData { rng: TestRng, + protocol_version: ProtocolVersion, chainspec_raw_bytes: ChainspecRawBytes, highest_block: Block, secret_signing_key: Arc, @@ -149,6 +150,7 @@ async fn setup() -> ( .bind_address() .expect("should be bound"); + let protocol_version = first_node.main_reactor().chainspec.protocol_version(); // We let the entire network run in the background, until our request completes. let finish_cranking = fixture.run_until_stopped(rng.create_child()); @@ -164,6 +166,7 @@ async fn setup() -> ( finish_cranking, TestData { rng, + protocol_version, chainspec_raw_bytes, highest_block, secret_signing_key, @@ -298,6 +301,7 @@ async fn binary_port_component_handles_all_requests() { finish_cranking, TestData { mut rng, + protocol_version, chainspec_raw_bytes: network_chainspec_raw_bytes, highest_block, secret_signing_key, @@ -325,7 +329,7 @@ async fn binary_port_component_handles_all_requests() { consensus_status(), chainspec_raw_bytes(network_chainspec_raw_bytes), latest_switch_block_header(), - node_status(), + node_status(protocol_version), get_block_header(highest_block.clone_header()), get_block_transfers(highest_block.clone_header()), get_era_summary(state_root_hash), @@ -365,12 +369,11 @@ async fn binary_port_component_handles_all_requests() { None, ), get_reward( - Some(EraIdentifier::Block(BlockIdentifier::Hash( - *highest_block.hash(), - ))), + Some(EraIdentifier::Block(BlockIdentifier::Height(1))), era_one_validator, None, ), + get_protocol_version(protocol_version), ]; for ( @@ -656,7 +659,7 @@ fn latest_switch_block_header() -> TestCase { } } -fn node_status() -> TestCase { +fn node_status(expected_version: ProtocolVersion) -> TestCase { TestCase { name: "node_status", request: BinaryRequest::Get(GetRequest::Information { @@ -668,7 +671,8 @@ fn node_status() -> TestCase { response, Some(PayloadType::NodeStatus), |node_status| { - !node_status.peers.into_inner().is_empty() + node_status.protocol_version == expected_version + && !node_status.peers.into_inner().is_empty() && node_status.chainspec_name == "casper-example" && node_status.last_added_block_info.is_some() && node_status.our_public_signing_key.is_some() @@ -932,12 +936,32 @@ fn get_reward( }), asserter: Box::new(move |response| { assert_response::(response, Some(PayloadType::Reward), |reward| { - reward.amount() > U512::zero() + // test fixture sets delegation rate to 0 + reward.amount() > U512::zero() && reward.delegation_rate() == 0 }) }), } } +fn get_protocol_version(expected: ProtocolVersion) -> TestCase { + let key = InformationRequest::ProtocolVersion; + + TestCase { + name: "get_protocol_version", + request: BinaryRequest::Get(GetRequest::Information { + info_type_tag: key.tag().into(), + key: vec![], + }), + asserter: Box::new(move |response| { + assert_response::( + response, + Some(PayloadType::ProtocolVersion), + |version| expected == version, + ) + }), + } +} + fn try_accept_transaction(key: &SecretKey) -> TestCase { let transaction = Transaction::V1( TransactionV1Builder::new_targeting_invocable_entity_via_alias("Test", "call") diff --git a/node/src/reactor/main_reactor/tests/transactions.rs b/node/src/reactor/main_reactor/tests/transactions.rs index 57a2711998..bddad6eeef 100644 --- a/node/src/reactor/main_reactor/tests/transactions.rs +++ b/node/src/reactor/main_reactor/tests/transactions.rs @@ -277,25 +277,37 @@ fn get_entity_named_key( state_root_hash: Digest, entity_addr: EntityAddr, named_key: &str, -) -> Key { +) -> Option { + let key = Key::NamedKey( + NamedKeyAddr::new_from_string(entity_addr, named_key.to_owned()) + .expect("should be valid NamedKeyAddr"), + ); + + match query_global_state(fixture, state_root_hash, key) { + Some(val) => match &*val { + StoredValue::NamedKey(named_key) => { + Some(named_key.get_key().expect("should have a Key")) + } + value => panic!("Expected NamedKey but got {:?}", value), + }, + None => None, + } +} + +fn query_global_state( + fixture: &mut TestFixture, + state_root_hash: Digest, + key: Key, +) -> Option> { let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap(); match runner .main_reactor() .contract_runtime() .data_access_layer() - .query(QueryRequest::new( - state_root_hash, - Key::NamedKey( - NamedKeyAddr::new_from_string(entity_addr, named_key.to_owned()) - .expect("should be valid NamedKeyAddr"), - ), - vec![], - )) { - QueryResult::Success { value, .. } => match &*value { - StoredValue::NamedKey(named_key) => named_key.get_key().expect("should have a Key"), - value => panic!("Expected NamedKey but got {:?}", value), - }, - err => panic!("Expected QueryResult::Success but got {:?}", err), + .query(QueryRequest::new(state_root_hash, key, vec![])) + { + QueryResult::Success { value, .. } => Some(value), + _err => None, } } @@ -311,7 +323,7 @@ fn get_entity_by_account_hash( .data_access_layer() .addressable_entity(AddressableEntityRequest::new( state_root_hash, - Key::Account(account_hash), + Key::AddressableEntity(EntityAddr::Account(account_hash.value())), )) .into_option() .unwrap_or_else(|| { @@ -3053,7 +3065,8 @@ async fn insufficient_funds_transfer_from_purse() { state_root_hash, BOB_PUBLIC_KEY.to_account_hash(), ); - let key = get_entity_named_key(&mut test.fixture, state_root_hash, entity_addr, purse_name); + let key = get_entity_named_key(&mut test.fixture, state_root_hash, entity_addr, purse_name) + .expect("expected a key"); let uref = *key.as_uref().expect("Expected a URef"); // now we try to transfer from the purse we just created @@ -3371,7 +3384,8 @@ async fn successful_purse_to_purse_transfer() { BOB_PUBLIC_KEY.to_account_hash(), ); let bob_purse_key = - get_entity_named_key(&mut test.fixture, state_root_hash, bob_addr, purse_name); + get_entity_named_key(&mut test.fixture, state_root_hash, bob_addr, purse_name) + .expect("expected a key"); let bob_purse = *bob_purse_key.as_uref().expect("Expected a URef"); let alice_addr = get_entity_addr_from_account_hash( @@ -3463,7 +3477,8 @@ async fn successful_purse_to_account_transfer() { BOB_PUBLIC_KEY.to_account_hash(), ); let bob_purse_key = - get_entity_named_key(&mut test.fixture, state_root_hash, bob_addr, purse_name); + get_entity_named_key(&mut test.fixture, state_root_hash, bob_addr, purse_name) + .expect("expected a key"); let bob_purse = *bob_purse_key.as_uref().expect("Expected a URef"); // now we try to transfer from the purse we just created @@ -3571,3 +3586,67 @@ async fn native_transfer_deploy_without_source_purse_should_succeed() { let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; assert!(exec_result_is_success(&exec_result), "{:?}", exec_result); } + +#[tokio::test] +async fn out_of_gas_txn_does_not_produce_effects() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::PayToProposer); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + // This WASM creates named key called "new_key". Then it would loop endlessly trying to write a + // value to storage. Eventually it will run out of gas and it should exit causing a revert. + let revert_contract = RESOURCES_PATH + .join("..") + .join("target") + .join("wasm32-unknown-unknown") + .join("release") + .join("endless_loop_with_effects.wasm"); + let module_bytes = + Bytes::from(std::fs::read(revert_contract).expect("cannot read module bytes")); + + let mut txn = Transaction::from( + TransactionV1Builder::new_session(TransactionCategory::Large, module_bytes) + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(BOB_PUBLIC_KEY.clone()) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + assert!( + matches!( + &exec_result, + ExecutionResult::V2(res) if res.error_message.as_deref() == Some("Out of gas error") + ), + "{:?}", + exec_result + ); + + let state_root_hash = *test + .fixture + .get_block_by_height(block_height) + .state_root_hash(); + let bob_addr = get_entity_addr_from_account_hash( + &mut test.fixture, + state_root_hash, + BOB_PUBLIC_KEY.to_account_hash(), + ); + + // Named key should not exist since the execution was reverted because it was out of gas. + assert!( + get_entity_named_key(&mut test.fixture, state_root_hash, bob_addr, "new_key").is_none() + ); +} diff --git a/resources/local/chainspec.toml.in b/resources/local/chainspec.toml.in index e8c15a90e8..6fedb404c1 100644 --- a/resources/local/chainspec.toml.in +++ b/resources/local/chainspec.toml.in @@ -170,7 +170,7 @@ block_max_approval_count = 2600 # Maximum block size in bytes including transactions contained by the block. 0 means unlimited. max_block_size = 10_485_760 # The upper limit of total gas of all transactions in a block. -block_gas_limit = 10_000_000_000_000 +block_gas_limit = 3_300_000_000_000 # The minimum amount in motes for a valid native transfer. native_transfer_minimum_motes = 2_500_000_000 # The maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the config.toml file. @@ -195,7 +195,7 @@ runtime = "VmCasperV1" # [4] -> The maximum number of transactions the lane can contain native_mint_lane = [0, 1024, 1024, 65_000_000_000, 650] native_auction_lane = [1, 2048, 2048, 362_500_000_000, 145] -wasm_lanes = [[2, 1_048_576, 2048, 1_000_000_000_000, 2], [3, 262_144, 1024, 100_000_000_000, 3], [4, 131_072, 1024, 50_000_000_000, 5], [5, 8_192, 512, 1_500_000_000, 15]] +wasm_lanes = [[2, 1_048_576, 2048, 1_000_000_000_000, 1], [3, 344_064, 1024, 500_000_000_000, 3], [4, 172_032, 1024, 50_000_000_000, 7], [5, 12_288, 512, 1_500_000_000, 15]] [transactions.deploy] # The maximum number of Motes allowed to be spent during payment. 0 means unlimited. diff --git a/resources/production/chainspec.toml b/resources/production/chainspec.toml index 8f0d63fa7b..898abddaeb 100644 --- a/resources/production/chainspec.toml +++ b/resources/production/chainspec.toml @@ -180,7 +180,7 @@ block_max_approval_count = 2600 # Maximum block size in bytes including transactions contained by the block. 0 means unlimited. max_block_size = 5_242_880 # The upper limit of total gas of all transactions in a block. -block_gas_limit = 3_000_000_000_000 +block_gas_limit = 3_300_000_000_000 # The minimum amount in motes for a valid native transfer. native_transfer_minimum_motes = 2_500_000_000 # The maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the config.toml file. @@ -205,7 +205,7 @@ runtime = "VmCasperV1" # [4] -> The maximum number of transactions the lane can contain native_mint_lane = [0, 1024, 1024, 65_000_000_000, 650] native_auction_lane = [1, 2048, 2048, 362_500_000_000, 145] -wasm_lanes = [[2, 1_048_576, 2048, 1_000_000_000_000, 2], [3, 262_144, 1024, 100_000_000_000, 3], [4, 131_072, 1024, 50_000_000_000, 5], [5, 8_192, 512, 1_500_000_000, 15]] +wasm_lanes = [[2, 1_048_576, 2048, 1_000_000_000_000, 1], [3, 344_064, 1024, 500_000_000_000, 3], [4, 172_032, 1024, 50_000_000_000, 7], [5, 12_288, 512, 1_500_000_000, 15]] [transactions.deploy] # The maximum number of Motes allowed to be spent during payment. 0 means unlimited. diff --git a/smart_contracts/contracts/test/endless-loop-with-effects/Cargo.toml b/smart_contracts/contracts/test/endless-loop-with-effects/Cargo.toml new file mode 100644 index 0000000000..be5fe4a9b8 --- /dev/null +++ b/smart_contracts/contracts/test/endless-loop-with-effects/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "endless-loop-with-effects" +version = "0.1.0" +authors = ["Alex Sardan "] +edition = "2021" + +[[bin]] +name = "endless_loop_with_effects" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/endless-loop-with-effects/src/main.rs b/smart_contracts/contracts/test/endless-loop-with-effects/src/main.rs new file mode 100644 index 0000000000..a09765d17a --- /dev/null +++ b/smart_contracts/contracts/test/endless-loop-with-effects/src/main.rs @@ -0,0 +1,19 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use casper_contract::contract_api::{account, runtime, storage}; +use casper_types::Key; + +#[no_mangle] +pub extern "C" fn call() { + let mut data: u32 = 1; + let uref = storage::new_uref(data); + runtime::put_key("new_key", Key::from(uref)); + loop { + let _ = account::get_main_purse(); + data += 1; + storage::write(uref, data); + } +} diff --git a/smart_contracts/contracts/test/gh-4771-regression/Cargo.toml b/smart_contracts/contracts/test/gh-4771-regression/Cargo.toml new file mode 100644 index 0000000000..1d2016660b --- /dev/null +++ b/smart_contracts/contracts/test/gh-4771-regression/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "gh-4771-regression" +version = "0.1.0" +authors = ["RafaƂ Chabowski "] +edition = "2021" + +[[bin]] +name = "gh_4771_regression" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/gh-4771-regression/src/main.rs b/smart_contracts/contracts/test/gh-4771-regression/src/main.rs new file mode 100644 index 0000000000..f8af7bb361 --- /dev/null +++ b/smart_contracts/contracts/test/gh-4771-regression/src/main.rs @@ -0,0 +1,45 @@ +#![no_main] +#![no_std] + +extern crate alloc; + +use alloc::string::ToString; +use casper_contract::contract_api::{runtime, storage}; +use casper_types::{ + addressable_entity::Parameters, CLType, EntryPoint, EntryPointAccess, EntryPointPayment, + EntryPointType, EntryPoints, Key, +}; + +const METHOD_TEST_ENTRY_POINT: &str = "test_entry_point"; +const NEW_KEY_NAME: &str = "Hello"; +const NEW_KEY_VALUE: &str = "World"; +const CONTRACT_PACKAGE_KEY: &str = "contract_package"; +const CONTRACT_HASH_KEY: &str = "contract_hash"; + +#[no_mangle] +fn test_entry_point() { + let value = storage::new_uref(NEW_KEY_VALUE); + runtime::put_key(NEW_KEY_NAME, value.into()); +} + +#[no_mangle] +fn call() { + let mut entry_points = EntryPoints::new(); + entry_points.add_entry_point(EntryPoint::new( + METHOD_TEST_ENTRY_POINT, + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + + let (contract_hash, _version) = storage::new_contract( + entry_points, + None, + Some(CONTRACT_PACKAGE_KEY.to_string()), + None, + None, + ); + runtime::put_key(CONTRACT_HASH_KEY, Key::contract_entity_key(contract_hash)); +} diff --git a/storage/src/global_state/state/lmdb.rs b/storage/src/global_state/state/lmdb.rs index f7936ce4a0..96e33e2a88 100644 --- a/storage/src/global_state/state/lmdb.rs +++ b/storage/src/global_state/state/lmdb.rs @@ -1,5 +1,5 @@ use itertools::Itertools; -use std::{collections::BTreeMap, ops::Deref, sync::Arc}; +use std::{ops::Deref, sync::Arc}; use lmdb::{DatabaseFlags, RwTransaction}; @@ -118,7 +118,7 @@ impl LmdbGlobalState { pub fn put_stored_values( &self, prestate_hash: Digest, - stored_values: BTreeMap, + stored_values: Vec<(Key, StoredValue)>, ) -> Result { let scratch_trie = self.get_scratch_store(); let new_state_root = put_stored_values::<_, _, GlobalStateError>( diff --git a/storage/src/global_state/state/mod.rs b/storage/src/global_state/state/mod.rs index 7e0eef3721..b0684491c2 100644 --- a/storage/src/global_state/state/mod.rs +++ b/storage/src/global_state/state/mod.rs @@ -719,10 +719,18 @@ pub trait StateProvider: Send + Sync { }; let balance_holds = match request.balance_handling() { BalanceHandling::Total => BTreeMap::new(), - BalanceHandling::Available => match tc.get_balance_holds(purse_addr) { - Ok(holds) => holds, - Err(tce) => return tce.into(), - }, + BalanceHandling::Available => { + match tc.get_balance_hold_config(BalanceHoldAddrTag::Gas) { + Ok(Some((block_time, _, interval))) => { + match tc.get_balance_holds(purse_addr, block_time, interval) { + Ok(holds) => holds, + Err(tce) => return tce.into(), + } + } + Ok(None) => BTreeMap::new(), + Err(tce) => return tce.into(), + } + } }; (total_balance, ProofsResult::NotRequested { balance_holds }) } @@ -2183,7 +2191,7 @@ pub fn put_stored_values<'a, R, S, E>( environment: &'a R, store: &S, prestate_hash: Digest, - stored_values: BTreeMap, + stored_values: Vec<(Key, StoredValue)>, ) -> Result where R: TransactionSource<'a, Handle = S::Handle>, diff --git a/storage/src/global_state/state/scratch.rs b/storage/src/global_state/state/scratch.rs index be83ccce14..dec12c369b 100644 --- a/storage/src/global_state/state/scratch.rs +++ b/storage/src/global_state/state/scratch.rs @@ -1,6 +1,6 @@ use lmdb::RwTransaction; use std::{ - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, HashMap, VecDeque}, mem, ops::Deref, sync::{Arc, RwLock}, @@ -9,7 +9,7 @@ use std::{ use tracing::{debug, error}; use casper_types::{ - bytesrepr::ToBytes, + bytesrepr::{self, ToBytes}, execution::{Effects, TransformInstruction, TransformKindV2, TransformV2}, global_state::TrieMerkleProof, Digest, Key, StoredValue, @@ -40,15 +40,106 @@ use crate::tracking_copy::TrackingCopy; type SharedCache = Arc>; struct Cache { - cached_values: BTreeMap, + cached_values: HashMap, pruned: BTreeSet, + cached_keys: CacheTrie, +} + +struct CacheTrieNode { + children: BTreeMap>, + value: Option, +} + +impl CacheTrieNode { + fn new() -> Self { + CacheTrieNode { + children: BTreeMap::new(), + value: None, + } + } + + fn remove(&mut self, bytes: &[u8], depth: usize) -> bool { + if depth == bytes.len() { + if self.value.is_some() { + self.value = None; + return self.children.is_empty(); + } + return false; + } + + if let Some(child_node) = self.children.get_mut(&bytes[depth]) { + if child_node.remove(bytes, depth + 1) { + self.children.remove(&bytes[depth]); + return self.value.is_none() && self.children.is_empty(); + } + } + false + } +} + +struct CacheTrie { + root: CacheTrieNode, +} + +impl CacheTrie { + fn new() -> Self { + CacheTrie { + root: CacheTrieNode::new(), + } + } + + fn insert(&mut self, key_bytes: &[u8], key: T) { + let mut current_node = &mut self.root; + for &byte in key_bytes { + current_node = current_node + .children + .entry(byte) + .or_insert(CacheTrieNode::new()); + } + current_node.value = Some(key); + } + + fn keys_with_prefix(&self, prefix: &[u8]) -> Vec { + let mut current_node = &self.root; + let mut result = Vec::new(); + + for &byte in prefix { + match current_node.children.get(&byte) { + Some(node) => current_node = node, + None => return result, + } + } + + self.collect_keys(current_node, &mut result); + result + } + + fn collect_keys(&self, start_node: &CacheTrieNode, result: &mut Vec) { + let mut stack = VecDeque::new(); + stack.push_back(start_node); + + while let Some(node) = stack.pop_back() { + if let Some(key) = node.value { + result.push(key); + } + + for child_node in node.children.values() { + stack.push_back(child_node); + } + } + } + + fn remove(&mut self, key_bytes: &[u8]) -> bool { + self.root.remove(key_bytes, 0) + } } impl Cache { fn new() -> Self { Cache { - cached_values: BTreeMap::new(), + cached_values: HashMap::new(), pruned: BTreeSet::new(), + cached_keys: CacheTrie::new(), } } @@ -57,18 +148,27 @@ impl Cache { self.cached_values.is_empty() && self.pruned.is_empty() } - fn insert_write(&mut self, key: Key, value: StoredValue) { + fn insert_write(&mut self, key: Key, value: StoredValue) -> Result<(), bytesrepr::Error> { self.pruned.remove(&key); - self.cached_values.insert(key, (true, value)); + if self.cached_values.insert(key, (true, value)).is_none() { + let key_bytes = key.to_bytes()?; + self.cached_keys.insert(&key_bytes, key); + }; + Ok(()) } - fn insert_read(&mut self, key: Key, value: StoredValue) { + fn insert_read(&mut self, key: Key, value: StoredValue) -> Result<(), bytesrepr::Error> { + let key_bytes = key.to_bytes()?; + self.cached_keys.insert(&key_bytes, key); self.cached_values.entry(key).or_insert((false, value)); + Ok(()) } - fn prune(&mut self, key: Key) { + fn prune(&mut self, key: Key) -> Result<(), bytesrepr::Error> { self.cached_values.remove(&key); + self.cached_keys.remove(&key.to_bytes()?); self.pruned.insert(key); + Ok(()) } fn get(&self, key: &Key) -> Option<&StoredValue> { @@ -80,13 +180,23 @@ impl Cache { /// Consumes self and returns only written values as values that were only read must be filtered /// out to prevent unnecessary writes. - fn into_dirty_writes(self) -> (BTreeMap, BTreeSet) { - let keys_to_prune = self.pruned; - let stored_values: BTreeMap = self - .cached_values + fn into_dirty_writes(self) -> (Vec<(Key, StoredValue)>, BTreeSet) { + let stored_values: Vec<(Key, StoredValue)> = self + .cached_keys + .keys_with_prefix(&[]) .into_iter() - .filter_map(|(key, (dirty, value))| if dirty { Some((key, value)) } else { None }) + .filter_map(|key| { + self.cached_values.get(&key).and_then(|(dirty, value)| { + if *dirty { + Some((key, value.clone())) + } else { + None + } + }) + }) .collect(); + let keys_to_prune = self.pruned; + debug!( "Cache::into_dirty_writes prune_count: {} store_count: {}", keys_to_prune.len(), @@ -148,7 +258,7 @@ impl ScratchGlobalState { } /// Consume self and return inner cache. - pub fn into_inner(self) -> (BTreeMap, BTreeSet) { + pub fn into_inner(self) -> (Vec<(Key, StoredValue)>, BTreeSet) { let cache = mem::replace(&mut *self.cache.write().unwrap(), Cache::new()); cache.into_dirty_writes() } @@ -175,7 +285,10 @@ impl StateReader for ScratchGlobalStateView { key, )? { ReadResult::Found(value) => { - self.cache.write().unwrap().insert_read(*key, value.clone()); + self.cache + .write() + .expect("poisoned scratch cache lock") + .insert_read(*key, value.clone())?; Some(value) } ReadResult::NotFound => None, @@ -213,13 +326,9 @@ impl StateReader for ScratchGlobalStateView { fn keys_with_prefix(&self, prefix: &[u8]) -> Result, Self::Error> { let mut ret = Vec::new(); - let cache = self.cache.read().unwrap(); - for cached_key in cache.cached_values.keys() { - let serialized_key = cached_key.to_bytes()?; - if serialized_key.starts_with(prefix) && !cache.pruned.contains(cached_key) { - ret.push(*cached_key) - } - } + let cache = self.cache.read().expect("poisoned scratch cache mutex"); + let cached_keys = cache.cached_keys.keys_with_prefix(prefix); + ret.extend(cached_keys); let txn = self.environment.create_read_txn()?; let keys_iter = keys_with_prefix::( @@ -250,6 +359,7 @@ impl CommitProvider for ScratchGlobalState { /// State hash returned is the one provided, as we do not write to lmdb with this kind of global /// state. Note that the state hash is NOT used, and simply passed back to the caller. fn commit(&self, state_hash: Digest, effects: Effects) -> Result { + let txn = self.environment.create_read_txn()?; for (key, kind) in effects.value().into_iter().map(TransformV2::destructure) { let cached_value = self.cache.read().unwrap().get(&key).cloned(); let instruction = match (cached_value, kind) { @@ -261,16 +371,14 @@ impl CommitProvider for ScratchGlobalState { (None, transform_kind) => { // It might be the case that for `Add*` operations we don't have the previous // value in cache yet. - let txn = self.environment.create_read_txn()?; - let instruction = match read::< + match read::< Key, StoredValue, lmdb::RoTransaction, LmdbTrieStore, GlobalStateError, - >( - &txn, self.trie_store.deref(), &state_hash, &key - )? { + >(&txn, self.trie_store.deref(), &state_hash, &key)? + { ReadResult::Found(current_value) => { match transform_kind.apply(current_value.clone()) { Ok(instruction) => instruction, @@ -292,9 +400,7 @@ impl CommitProvider for ScratchGlobalState { error!(root_hash=?state_hash, "root not found"); return Err(CommitError::ReadRootNotFound(state_hash).into()); } - }; - txn.commit()?; - instruction + } } (Some(current_value), transform_kind) => { match transform_kind.apply(current_value) { @@ -309,13 +415,14 @@ impl CommitProvider for ScratchGlobalState { let mut cache = self.cache.write().unwrap(); match instruction { TransformInstruction::Store(value) => { - cache.insert_write(key, value); + cache.insert_write(key, value)?; } TransformInstruction::Prune(key) => { - cache.prune(key); + cache.prune(key)?; } } } + txn.commit()?; Ok(state_hash) } } @@ -607,10 +714,14 @@ pub(crate) mod tests { assert_eq!(all_keys.len(), stored_values.len()); for key in all_keys { - assert!(stored_values.get(&key).is_some()); assert_eq!( - stored_values.get(&key), - updated_checkout.read(&key).unwrap().as_ref() + stored_values + .iter() + .find(|(k, _)| k == &key) + .unwrap() + .1 + .clone(), + updated_checkout.read(&key).unwrap().unwrap() ); } @@ -702,4 +813,100 @@ pub(crate) mod tests { original_checkout.read(&test_pairs_updated[2].key).unwrap() ); } + + #[test] + fn cache_trie_basic_insert_get() { + let mut trie = CacheTrie::new(); + let key_hello = Key::Hash(*b"hello..........................."); + let key_world = Key::Hash(*b"world..........................."); + let key_hey = Key::Hash(*b"hey............................."); + + trie.insert(b"hello", key_hello); + trie.insert(b"world", key_world); + trie.insert(b"hey", key_hey); + + assert_eq!(trie.keys_with_prefix(b"he"), vec![key_hey, key_hello]); + assert_eq!(trie.keys_with_prefix(b"wo"), vec![key_world]); + } + + #[test] + fn cache_trie_overlapping_prefix() { + let mut trie = CacheTrie::new(); + let key_apple = Key::Hash(*b"apple..........................."); + let key_app = Key::Hash(*b"app............................."); + let key_apron = Key::Hash(*b"apron..........................."); + + trie.insert(b"apple", key_apple); + trie.insert(b"app", key_app); + trie.insert(b"apron", key_apron); + + assert_eq!( + trie.keys_with_prefix(b"ap"), + vec![key_apron, key_app, key_apple] + ); + assert_eq!(trie.keys_with_prefix(b"app"), vec![key_app, key_apple]); + } + + #[test] + fn cache_trie_leaf_removal() { + let mut trie = CacheTrie::new(); + let key_cat = Key::Hash(*b"cat............................."); + let key_category = Key::Hash(*b"category........................"); + + trie.insert(b"cat", key_cat); + trie.insert(b"category", key_category); + + trie.remove(b"category"); + assert_eq!(trie.keys_with_prefix(b"ca"), vec![key_cat]); + } + + #[test] + fn cache_trie_internal_node_removal() { + let mut trie = CacheTrie::new(); + let key_be = Key::Hash(*b"be.............................."); + let key_berry = Key::Hash(*b"berry..........................."); + + trie.insert(b"be", key_be); + trie.insert(b"berry", key_berry); + + trie.remove(b"be"); + assert_eq!(trie.keys_with_prefix(b"be"), vec![key_berry]); + } + + #[test] + fn cache_trie_non_existent_prefix() { + let mut trie = CacheTrie::new(); + + let key_apple = Key::Hash(*b"apple..........................."); + let key_mango = Key::Hash(*b"mango..........................."); + + trie.insert(b"apple", key_apple); + trie.insert(b"mango", key_mango); + + assert_eq!(trie.keys_with_prefix(b"b"), Vec::::new()); + } + + #[test] + fn cache_trie_empty_trie_search() { + let trie = CacheTrie::::new(); + + assert_eq!(trie.keys_with_prefix(b""), Vec::::new()); + } + + #[test] + fn cache_trie_empty_prefix_search_all_keys() { + let mut trie = CacheTrie::new(); + let key_hello = Key::Hash(*b"hello..........................."); + let key_world = Key::Hash(*b"world..........................."); + let key_hey = Key::Hash(*b"hey............................."); + + trie.insert(b"hello", key_hello); + trie.insert(b"world", key_world); + trie.insert(b"hey", key_hey); + + assert_eq!( + trie.keys_with_prefix(b""), + vec![key_world, key_hey, key_hello] + ); + } } diff --git a/storage/src/system/auction.rs b/storage/src/system/auction.rs index 7081896f44..927d34da0b 100644 --- a/storage/src/system/auction.rs +++ b/storage/src/system/auction.rs @@ -11,7 +11,7 @@ use tracing::{debug, error, warn}; use crate::system::auction::detail::{ process_with_vesting_schedule, read_delegator_bid, read_delegator_bids, read_validator_bid, - seigniorage_recipient, + seigniorage_recipients, }; use casper_types::{ account::AccountHash, @@ -399,9 +399,14 @@ pub trait Auction: let minimum_delegation_amount = U512::from(validator_bid.minimum_delegation_amount()); let maximum_delegation_amount = U512::from(validator_bid.maximum_delegation_amount()); - let mut delegators = read_delegator_bids(self, validator_public_key)?; - for delegator in delegators.iter_mut() { + let delegators = read_delegator_bids(self, validator_public_key)?; + for mut delegator in delegators { let staked_amount = delegator.staked_amount(); + if staked_amount.is_zero() { + // A delegator who has unbonded - nothing to do here, this will be removed when + // unbonding is processed. + continue; + } if staked_amount < minimum_delegation_amount || staked_amount > maximum_delegation_amount { @@ -419,20 +424,29 @@ pub trait Auction: amount, None, )?; - match delegator.decrease_stake(amount, era_end_timestamp_millis) { - Ok(_) => (), - // Work around the case when the locked amounts table has yet to be - // initialized (likely pre-90 day mark). - Err(Error::DelegatorFundsLocked) => continue, - Err(err) => return Err(err), - } + let updated_stake = + match delegator.decrease_stake(amount, era_end_timestamp_millis) { + Ok(updated_stake) => updated_stake, + // Work around the case when the locked amounts table has yet to be + // initialized (likely pre-90 day mark). + Err(Error::DelegatorFundsLocked) => continue, + Err(err) => return Err(err), + }; let delegator_bid_addr = BidAddr::new_from_public_keys( validator_public_key, Some(&delegator_public_key), ); - debug!("pruning delegator bid {}", delegator_bid_addr); - self.prune_bid(delegator_bid_addr) + debug!( + "forced undelegation for {} reducing {} by {} to {}", + delegator_bid_addr, staked_amount, amount, updated_stake + ); + + // Keep the bid for now - it will get pruned when the unbonds are processed. + self.write_bid( + delegator_bid_addr.into(), + BidKind::Delegator(Box::new(delegator)), + )?; } } } @@ -573,7 +587,6 @@ pub trait Auction: // Compute next auction winners let winners: ValidatorWeights = { let locked_validators = validator_bids_detail.validator_weights( - self, era_id, era_end_timestamp_millis, vesting_schedule_period_millis, @@ -585,7 +598,6 @@ pub trait Auction: let remaining_auction_slots = validator_slots.saturating_sub(locked_validators.len()); if remaining_auction_slots > 0 { let unlocked_validators = validator_bids_detail.validator_weights( - self, era_id, era_end_timestamp_millis, vesting_schedule_period_millis, @@ -613,10 +625,11 @@ pub trait Auction: } }; - let (validator_bids, validator_credits) = validator_bids_detail.destructure(); + let (validator_bids, validator_credits, delegator_bids) = + validator_bids_detail.destructure(); // call prune BEFORE incrementing the era - detail::prune_validator_credits(self, era_id, validator_credits); + detail::prune_validator_credits(self, era_id, &validator_credits); // Increment era era_id = era_id.checked_add(1).ok_or(Error::ArithmeticOverflow)?; @@ -628,16 +641,7 @@ pub trait Auction: // Update seigniorage recipients for current era { let mut snapshot = detail::get_seigniorage_recipients_snapshot(self)?; - let mut recipients = SeigniorageRecipients::new(); - - for era_validator in winners.keys() { - let seigniorage_recipient = match validator_bids.get(era_validator) { - Some(validator_bid) => seigniorage_recipient(self, validator_bid)?, - None => return Err(Error::BidNotFound.into()), - }; - recipients.insert(era_validator.clone(), seigniorage_recipient); - } - + let recipients = seigniorage_recipients(&winners, &validator_bids, &delegator_bids)?; let previous_recipients = snapshot.insert(delayed_era, recipients); assert!(previous_recipients.is_none()); diff --git a/storage/src/system/auction/auction_native.rs b/storage/src/system/auction/auction_native.rs index 91cf15d659..5b16fd2002 100644 --- a/storage/src/system/auction/auction_native.rs +++ b/storage/src/system/auction/auction_native.rs @@ -8,7 +8,7 @@ use crate::{ mint::Mint, runtime_native::RuntimeNative, }, - tracking_copy::TrackingCopyError, + tracking_copy::{TrackingCopyEntityExt, TrackingCopyError}, }; use casper_types::{ account::AccountHash, @@ -214,6 +214,20 @@ where fn unbond(&mut self, unbonding_purse: &UnbondingPurse) -> Result<(), Error> { let account_hash = AccountHash::from_public_key(unbonding_purse.unbonder_public_key(), crypto::blake2b); + + // Do a migration if the account hasn't been migrated yet. This is just a read if it has + // been migrated already. + self.tracking_copy() + .borrow_mut() + .migrate_account(account_hash, self.protocol_version()) + .map_err(|error| { + error!( + "MintProvider::unbond: couldn't migrate account: {:?}", + error + ); + Error::Storage + })?; + let maybe_value = self .tracking_copy() .borrow_mut() diff --git a/storage/src/system/auction/detail.rs b/storage/src/system/auction/detail.rs index 25d45a129a..597a55fc15 100644 --- a/storage/src/system/auction/detail.rs +++ b/storage/src/system/auction/detail.rs @@ -10,11 +10,11 @@ use casper_types::{ account::AccountHash, bytesrepr::{FromBytes, ToBytes}, system::auction::{ - BidAddr, BidKind, Delegator, Error, SeigniorageAllocation, SeigniorageRecipient, - SeigniorageRecipientsSnapshot, UnbondingPurse, UnbondingPurses, ValidatorBid, - ValidatorBids, ValidatorCredit, ValidatorCredits, AUCTION_DELAY_KEY, - ERA_END_TIMESTAMP_MILLIS_KEY, ERA_ID_KEY, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, - UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY, + BidAddr, BidKind, Delegator, DelegatorBids, Error, SeigniorageAllocation, + SeigniorageRecipient, SeigniorageRecipients, SeigniorageRecipientsSnapshot, UnbondingPurse, + UnbondingPurses, ValidatorBid, ValidatorBids, ValidatorCredit, ValidatorCredits, + AUCTION_DELAY_KEY, ERA_END_TIMESTAMP_MILLIS_KEY, ERA_ID_KEY, + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY, }, ApiError, CLTyped, EraId, Key, KeyTag, PublicKey, URef, U512, }; @@ -59,6 +59,7 @@ where pub struct ValidatorBidsDetail { validator_bids: ValidatorBids, validator_credits: ValidatorCredits, + delegator_bids: DelegatorBids, } impl ValidatorBidsDetail { @@ -66,6 +67,7 @@ impl ValidatorBidsDetail { ValidatorBidsDetail { validator_bids: BTreeMap::new(), validator_credits: BTreeMap::new(), + delegator_bids: BTreeMap::new(), } } @@ -74,7 +76,9 @@ impl ValidatorBidsDetail { &mut self, validator: PublicKey, validator_bid: Box, + delegators: Vec>, ) -> Option> { + self.delegator_bids.insert(validator.clone(), delegators); self.validator_bids.insert(validator, validator_bid) } @@ -110,19 +114,15 @@ impl ValidatorBidsDetail { /// Get validator weights. #[allow(clippy::too_many_arguments)] - pub fn validator_weights

( + pub fn validator_weights( &mut self, - provider: &mut P, era_ending: EraId, era_end_timestamp_millis: u64, vesting_schedule_period_millis: u64, locked: bool, include_credits: bool, cap: Ratio, - ) -> Result - where - P: RuntimeProvider + ?Sized + StorageProvider, - { + ) -> Result { let mut ret = BTreeMap::new(); for (validator_public_key, bid) in self.validator_bids.iter().filter(|(_, v)| { @@ -133,7 +133,13 @@ impl ValidatorBidsDetail { ) && !v.inactive() }) { - let staked_amount = total_staked_amount(provider, bid)?; + let mut staked_amount = bid.staked_amount(); + if let Some(delegators) = self.delegator_bids.get(validator_public_key) { + staked_amount = staked_amount + .checked_add(delegators.iter().map(|d| d.staked_amount()).sum()) + .ok_or(Error::InvalidAmount)?; + } + let credit_amount = self.credit_amount( validator_public_key, era_ending, @@ -182,8 +188,12 @@ impl ValidatorBidsDetail { } /// Consume self into in underlying collections. - pub fn destructure(self) -> (ValidatorBids, ValidatorCredits) { - (self.validator_bids, self.validator_credits) + pub fn destructure(self) -> (ValidatorBids, ValidatorCredits, DelegatorBids) { + ( + self.validator_bids, + self.validator_credits, + self.delegator_bids, + ) } } @@ -193,13 +203,13 @@ impl ValidatorBidsDetail { pub fn prune_validator_credits

( provider: &mut P, era_ending: EraId, - validator_credits: ValidatorCredits, + validator_credits: &ValidatorCredits, ) where P: StorageProvider + RuntimeProvider + ?Sized, { for (validator_public_key, inner) in validator_credits { if inner.contains_key(&era_ending) { - provider.prune_bid(BidAddr::new_credit(&validator_public_key, era_ending)) + provider.prune_bid(BidAddr::new_credit(validator_public_key, era_ending)) } } } @@ -215,7 +225,9 @@ where for key in bids_keys { match provider.read_bid(&key)? { Some(BidKind::Validator(validator_bid)) => { - ret.insert_bid(validator_bid.validator_public_key().clone(), validator_bid); + let validator_public_key = validator_bid.validator_public_key(); + let delegator_bids = delegators(provider, validator_public_key)?; + ret.insert_bid(validator_public_key.clone(), validator_bid, delegator_bids); } Some(BidKind::Credit(credit)) => { ret.insert_credit(credit.validator_public_key().clone(), era_id, credit); @@ -896,28 +908,44 @@ where } } -pub fn seigniorage_recipient

( - provider: &mut P, - validator_bid: &ValidatorBid, -) -> Result -where - P: RuntimeProvider + ?Sized + StorageProvider, -{ - let mut delegator_stake: BTreeMap = BTreeMap::new(); - for delegator_bid in read_delegator_bids(provider, validator_bid.validator_public_key())? { - if delegator_bid.staked_amount().is_zero() { - continue; +pub fn seigniorage_recipients( + validator_weights: &ValidatorWeights, + validator_bids: &ValidatorBids, + delegator_bids: &DelegatorBids, +) -> Result { + let mut recipients = SeigniorageRecipients::new(); + for (validator_public_key, validator_total_weight) in validator_weights { + // check if validator bid exists before processing. + let validator_bid = validator_bids + .get(validator_public_key) + .ok_or(Error::ValidatorNotFound)?; + // calculate delegator portion(s), if any + let mut delegators_weight = U512::zero(); + let mut delegators_stake: BTreeMap = BTreeMap::new(); + if let Some(delegators) = delegator_bids.get(validator_public_key) { + for delegator_bid in delegators { + if delegator_bid.staked_amount().is_zero() { + continue; + } + let delegator_staked_amount = delegator_bid.staked_amount(); + delegators_weight = delegators_weight.saturating_add(delegator_staked_amount); + delegators_stake.insert( + delegator_bid.delegator_public_key().clone(), + delegator_staked_amount, + ); + } } - delegator_stake.insert( - delegator_bid.delegator_public_key().clone(), - delegator_bid.staked_amount(), + + // determine validator's personal stake (total weight - sum of delegators weight) + let validator_stake = validator_total_weight.saturating_sub(delegators_weight); + let seigniorage_recipient = SeigniorageRecipient::new( + validator_stake, + *validator_bid.delegation_rate(), + delegators_stake, ); + recipients.insert(validator_public_key.clone(), seigniorage_recipient); } - Ok(SeigniorageRecipient::new( - validator_bid.staked_amount(), - *validator_bid.delegation_rate(), - delegator_stake, - )) + Ok(recipients) } /// Returns the era validators from a snapshot. @@ -990,26 +1018,25 @@ where } } -/// Returns the total staked amount of validator + all delegators -pub fn total_staked_amount

(provider: &mut P, validator_bid: &ValidatorBid) -> Result +pub fn delegators

( + provider: &mut P, + validator_public_key: &PublicKey, +) -> Result>, Error> where P: RuntimeProvider + ?Sized + StorageProvider, { - let bid_addr = BidAddr::from(validator_bid.validator_public_key().clone()); + let mut ret = vec![]; + let bid_addr = BidAddr::from(validator_public_key.clone()); let delegator_bid_keys = provider.get_keys_by_prefix( &bid_addr .delegators_prefix() .map_err(|_| Error::Serialization)?, )?; - let mut sum = U512::zero(); - for delegator_bid_key in delegator_bid_keys { let delegator = read_delegator_bid(provider, &delegator_bid_key)?; - let staked_amount = delegator.staked_amount(); - sum += staked_amount; + ret.push(delegator); } - sum.checked_add(validator_bid.staked_amount()) - .ok_or(Error::InvalidAmount) + Ok(ret) } diff --git a/storage/src/system/genesis.rs b/storage/src/system/genesis.rs index 2e16e67cf0..a181b5bbc9 100644 --- a/storage/src/system/genesis.rs +++ b/storage/src/system/genesis.rs @@ -731,11 +731,19 @@ where } else { ByteCodeHash::new(self.address_generator.borrow_mut().new_hash_address()) }; - let entity_hash = if entity_kind.is_system_account() { - let entity_hash_addr = PublicKey::System.to_account_hash().value(); - AddressableEntityHash::new(entity_hash_addr) - } else { - AddressableEntityHash::new(self.address_generator.borrow_mut().new_hash_address()) + + let entity_hash = match entity_kind { + EntityKind::System(_) | EntityKind::SmartContract(_) => { + AddressableEntityHash::new(self.address_generator.borrow_mut().new_hash_address()) + } + EntityKind::Account(account_hash) => { + if entity_kind.is_system_account() { + let entity_hash_addr = PublicKey::System.to_account_hash().value(); + AddressableEntityHash::new(entity_hash_addr) + } else { + AddressableEntityHash::new(account_hash.value()) + } + } }; let package_hash = PackageHash::new(self.address_generator.borrow_mut().new_hash_address()); diff --git a/storage/src/system/mint.rs b/storage/src/system/mint.rs index a9c4387a72..bbcd0b48b4 100644 --- a/storage/src/system/mint.rs +++ b/storage/src/system/mint.rs @@ -285,7 +285,7 @@ pub trait Mint: RuntimeProvider + StorageProvider + SystemProvider { // treat as noop return Ok(()); } - if self.available_balance(existing_purse)?.is_none() { + if !self.purse_exists(existing_purse)? { return Err(Error::PurseNotFound); } self.add_balance(existing_purse, amount)?; @@ -304,4 +304,7 @@ pub trait Mint: RuntimeProvider + StorageProvider + SystemProvider { self.add(total_supply_uref, amount)?; Ok(()) } + + /// Check if a purse exists. + fn purse_exists(&mut self, uref: URef) -> Result; } diff --git a/storage/src/system/mint/mint_native.rs b/storage/src/system/mint/mint_native.rs index 67aec1dcbf..7452607bbe 100644 --- a/storage/src/system/mint/mint_native.rs +++ b/storage/src/system/mint/mint_native.rs @@ -254,4 +254,21 @@ where } } -impl Mint for RuntimeNative where S: StateReader {} +impl Mint for RuntimeNative +where + S: StateReader, +{ + fn purse_exists(&mut self, uref: URef) -> Result { + let key = Key::Balance(uref.addr()); + match self + .tracking_copy() + .borrow_mut() + .read(&key) + .map_err(|_| Error::Storage)? + { + Some(StoredValue::CLValue(value)) => Ok(*value.cl_type() == U512::cl_type()), + Some(_non_cl_value) => Err(Error::CLValue), + None => Ok(false), + } + } +} diff --git a/storage/src/system/protocol_upgrade.rs b/storage/src/system/protocol_upgrade.rs index daaa52a432..2ca6f2de6c 100644 --- a/storage/src/system/protocol_upgrade.rs +++ b/storage/src/system/protocol_upgrade.rs @@ -506,7 +506,7 @@ where let mut address_generator = AddressGenerator::new(pre_state_hash.as_ref(), Phase::System); let byte_code_hash = ByteCodeHash::default(); - let entity_hash = AddressableEntityHash::new(address_generator.new_hash_address()); + let entity_hash = AddressableEntityHash::new(PublicKey::System.to_account_hash().value()); let package_hash = PackageHash::new(address_generator.new_hash_address()); let byte_code = ByteCode::new(ByteCodeKind::Empty, vec![]); diff --git a/storage/src/tracking_copy/ext.rs b/storage/src/tracking_copy/ext.rs index 87470e9d04..a5ae4067b1 100644 --- a/storage/src/tracking_copy/ext.rs +++ b/storage/src/tracking_copy/ext.rs @@ -86,6 +86,8 @@ pub trait TrackingCopyExt { fn get_balance_holds( &mut self, purse_addr: URefAddr, + block_time: BlockTime, + interval: u64, ) -> Result, Self::Error>; /// Gets the balance holds for a given balance, with Merkle proofs. @@ -327,7 +329,7 @@ where Some((block_time, handling, interval)) => (block_time, handling, interval), }; - let balance_holds = self.get_balance_holds(purse_addr)?; + let balance_holds = self.get_balance_holds(purse_addr, block_time, interval)?; let gas_handling = (handling, interval).into(); let processing_handling = ProcessingHoldBalanceHandling::new(); match balance_holds.available_balance( @@ -427,6 +429,8 @@ where fn get_balance_holds( &mut self, purse_addr: URefAddr, + block_time: BlockTime, + interval: u64, ) -> Result, Self::Error> { // NOTE: currently there are two kinds of holds, gas and processing. // Processing holds only effect one block to prevent double spend and are always @@ -439,14 +443,7 @@ where // for each hold kind and process each kind discretely in order and collate the // non-expired hold total at the end. let mut ret: BTreeMap = BTreeMap::new(); - let (block_time, interval) = match self.get_balance_hold_config(BalanceHoldAddrTag::Gas)? { - Some((block_time, _, interval)) => (block_time.value(), interval), - None => { - // if there is no holds config at this root hash, there can't be any holds - return Ok(ret); - } - }; - let holds_epoch = { HoldsEpoch::from_millis(block_time, interval) }; + let holds_epoch = { HoldsEpoch::from_millis(block_time.value(), interval) }; let holds = self.get_balance_hold_addresses(purse_addr)?; for balance_hold_addr in holds { let block_time = balance_hold_addr.block_time(); diff --git a/storage/src/tracking_copy/ext_entity.rs b/storage/src/tracking_copy/ext_entity.rs index 761d9e26bd..05dc06a1ae 100644 --- a/storage/src/tracking_copy/ext_entity.rs +++ b/storage/src/tracking_copy/ext_entity.rs @@ -449,7 +449,7 @@ where let mut generator = AddressGenerator::new(main_purse.addr().as_ref(), Phase::System); let byte_code_hash = ByteCodeHash::default(); - let entity_hash = AddressableEntityHash::new(generator.new_hash_address()); + let entity_hash = AddressableEntityHash::new(account_hash.value()); let package_hash = PackageHash::new(generator.new_hash_address()); let associated_keys = AssociatedKeys::new(account_hash, Weight::new(1)); @@ -592,7 +592,7 @@ where let access_uref = legacy_package.access_key(); let mut generator = AddressGenerator::new(access_uref.addr().as_ref(), Phase::System); - let mut package: Package = legacy_package.into(); + let package: Package = legacy_package.into(); for (_, contract_hash) in legacy_versions.into_iter() { let legacy_contract = match self.read(&Key::Hash(contract_hash.value()))? { @@ -667,8 +667,6 @@ where self.prune(Key::Hash(contract_hash.value())); // Prune the legacy Wasm record. self.prune(Key::Hash(contract_wasm_hash.value())); - - package.insert_entity_version(protocol_version.value().major, entity_hash); } let access_key_value = CLValue::from_t(access_uref).map_err(Self::Error::CLValue)?; diff --git a/types/src/addressable_entity.rs b/types/src/addressable_entity.rs index 81d8c8cc65..b4255ac91e 100644 --- a/types/src/addressable_entity.rs +++ b/types/src/addressable_entity.rs @@ -851,6 +851,16 @@ impl EntityAddr { || self.value() == PublicKey::System.to_account_hash().value() } + /// Is this a contract entity address? + pub fn is_contract(&self) -> bool { + self.tag() == EntityKindTag::SmartContract + } + + /// Is this an account entity address? + pub fn is_account(&self) -> bool { + self.tag() == EntityKindTag::Account + } + /// Returns the 32 bytes of the [`EntityAddr`]. pub fn value(&self) -> HashAddr { match self { diff --git a/types/src/package.rs b/types/src/package.rs index 480958e361..0055bb17eb 100644 --- a/types/src/package.rs +++ b/types/src/package.rs @@ -216,6 +216,11 @@ impl EntityVersions { None } } + + /// The number of versions present in the package. + pub fn version_count(&self) -> usize { + self.0.len() + } } impl ToBytes for EntityVersions { diff --git a/types/src/system/auction.rs b/types/src/system/auction.rs index a299967d0d..54b55afa31 100644 --- a/types/src/system/auction.rs +++ b/types/src/system/auction.rs @@ -49,6 +49,9 @@ pub type DelegationRate = u8; /// Validators mapped to their bids. pub type ValidatorBids = BTreeMap>; +/// Delegator bids mapped to their validator. +pub type DelegatorBids = BTreeMap>>; + /// Validators mapped to their credits by era. pub type ValidatorCredits = BTreeMap>>; diff --git a/types/src/transaction/deploy.rs b/types/src/transaction/deploy.rs index 13514f6f4b..5a2f30c541 100644 --- a/types/src/transaction/deploy.rs +++ b/types/src/transaction/deploy.rs @@ -46,8 +46,8 @@ use crate::runtime_args; use crate::{ bytesrepr::Bytes, system::auction::{ - ARG_AMOUNT as ARG_AUCTION_AMOUNT, ARG_DELEGATOR, ARG_NEW_VALIDATOR, - ARG_PUBLIC_KEY as ARG_AUCTION_PUBLIC_KEY, ARG_VALIDATOR, METHOD_DELEGATE, + ARG_AMOUNT as ARG_AUCTION_AMOUNT, ARG_DELEGATION_RATE, ARG_DELEGATOR, ARG_NEW_VALIDATOR, + ARG_PUBLIC_KEY as ARG_AUCTION_PUBLIC_KEY, ARG_VALIDATOR, METHOD_ADD_BID, METHOD_DELEGATE, METHOD_REDELEGATE, METHOD_UNDELEGATE, METHOD_WITHDRAW_BID, }, testing::TestRng, @@ -1008,6 +1008,44 @@ impl Deploy { ) } + /// Creates an add bid deploy, for testing. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn add_bid( + chain_name: String, + auction_contract_hash: AddressableEntityHash, + public_key: PublicKey, + amount: U512, + delegation_rate: u8, + timestamp: Timestamp, + ttl: TimeDiff, + ) -> Self { + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { ARG_AMOUNT => U512::from(3_000_000_000_u64) }, + }; + let args = runtime_args! { + ARG_AUCTION_AMOUNT => amount, + ARG_AUCTION_PUBLIC_KEY => public_key.clone(), + ARG_DELEGATION_RATE => delegation_rate, + }; + let session = ExecutableDeployItem::StoredContractByHash { + hash: auction_contract_hash, + entry_point: METHOD_ADD_BID.to_string(), + args, + }; + + Deploy::build( + timestamp, + ttl, + 1, + vec![], + chain_name, + payment, + session, + InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey(public_key)), + ) + } + /// Creates a withdraw bid deploy, for testing. #[cfg(any(all(feature = "std", feature = "testing"), test))] pub fn withdraw_bid( diff --git a/utils/global-state-update-gen/src/decode.rs b/utils/global-state-update-gen/src/decode.rs new file mode 100644 index 0000000000..d841f280e3 --- /dev/null +++ b/utils/global-state-update-gen/src/decode.rs @@ -0,0 +1,50 @@ +use std::{collections::BTreeMap, fmt, fs::File, io::Read}; + +use clap::ArgMatches; + +use casper_types::{ + bytesrepr::FromBytes, system::auction::SeigniorageRecipientsSnapshot, CLType, + GlobalStateUpdate, GlobalStateUpdateConfig, Key, StoredValue, +}; + +struct Entries(BTreeMap); + +impl fmt::Debug for Entries { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut map = f.debug_map(); + for (k, v) in &self.0 { + let debug_v: Box = match v { + StoredValue::CLValue(clv) => match clv.cl_type() { + CLType::Map { key, value: _ } if **key == CLType::U64 => { + // this should be the seigniorage recipient snapshot + let snapshot: SeigniorageRecipientsSnapshot = clv.clone().into_t().unwrap(); + Box::new(snapshot) + } + _ => Box::new(clv), + }, + _ => Box::new(v), + }; + map.key(k).value(&debug_v); + } + map.finish() + } +} + +pub(crate) fn decode_file(matches: &ArgMatches<'_>) { + let file_name = matches.value_of("file").unwrap(); + let mut file = File::open(file_name).unwrap(); + + let mut contents = String::new(); + file.read_to_string(&mut contents).unwrap(); + + let config: GlobalStateUpdateConfig = toml::from_str(&contents).unwrap(); + let update_data: GlobalStateUpdate = config.try_into().unwrap(); + + println!("validators = {:#?}", &update_data.validators); + let entries: BTreeMap<_, _> = update_data + .entries + .iter() + .map(|(key, bytes)| (*key, StoredValue::from_bytes(bytes).unwrap().0)) + .collect(); + println!("entries = {:#?}", Entries(entries)); +} diff --git a/utils/global-state-update-gen/src/generic.rs b/utils/global-state-update-gen/src/generic.rs index 62118ebfb0..56d344c90e 100644 --- a/utils/global-state-update-gen/src/generic.rs +++ b/utils/global-state-update-gen/src/generic.rs @@ -308,9 +308,15 @@ pub fn add_and_remove_bids( public_key.clone(), *bid.bonding_purse(), ))), - BidKind::Validator(validator_bid) => BidKind::Validator(Box::new( - ValidatorBid::empty(public_key.clone(), *validator_bid.bonding_purse()), - )), + BidKind::Validator(validator_bid) => { + let mut new_bid = + ValidatorBid::empty(public_key.clone(), *validator_bid.bonding_purse()); + new_bid.set_delegation_amount_boundaries( + validator_bid.minimum_delegation_amount(), + validator_bid.maximum_delegation_amount(), + ); + BidKind::Validator(Box::new(new_bid)) + } BidKind::Delegator(delegator_bid) => { BidKind::Delegator(Box::new(Delegator::empty( public_key.clone(), @@ -425,6 +431,8 @@ fn create_or_update_bid( *bid.delegation_rate(), delegator_stake, ), + 0, + u64::MAX, ) } BidKind::Validator(validator_bid) => { @@ -444,13 +452,17 @@ fn create_or_update_bid( *validator_bid.delegation_rate(), delegator_stake, ), + validator_bid.minimum_delegation_amount(), + validator_bid.maximum_delegation_amount(), ) } _ => unreachable!(), }); // existing bid - if let Some((bonding_purse, existing_recipient)) = maybe_existing_recipient { + if let Some((bonding_purse, existing_recipient, min_delegation_amount, max_delegation_amount)) = + maybe_existing_recipient + { if existing_recipient == *updated_recipient { return; // noop } @@ -524,8 +536,8 @@ fn create_or_update_bid( *bonding_purse, *updated_recipient.stake(), *updated_recipient.delegation_rate(), - 0, - u64::MAX, + min_delegation_amount, + max_delegation_amount, ); state.set_bid( diff --git a/utils/global-state-update-gen/src/generic/state_tracker.rs b/utils/global-state-update-gen/src/generic/state_tracker.rs index 98be1214ce..4b6a0c2110 100644 --- a/utils/global-state-update-gen/src/generic/state_tracker.rs +++ b/utils/global-state-update-gen/src/generic/state_tracker.rs @@ -163,7 +163,7 @@ impl StateTracker { let mut rng = rand::thread_rng(); - let entity_hash = AddressableEntityHash::new(rng.gen()); + let entity_hash = AddressableEntityHash::new(account_hash.value()); let package_hash = PackageHash::new(rng.gen()); let contract_wasm_hash = ByteCodeHash::new([0u8; 32]); diff --git a/utils/global-state-update-gen/src/main.rs b/utils/global-state-update-gen/src/main.rs index 85e150b72f..247860d25d 100644 --- a/utils/global-state-update-gen/src/main.rs +++ b/utils/global-state-update-gen/src/main.rs @@ -1,5 +1,6 @@ mod admins; mod balances; +mod decode; mod generic; mod system_entity_registry; mod utils; @@ -9,7 +10,7 @@ use admins::generate_admins; use clap::{crate_version, App, Arg, SubCommand}; use crate::{ - balances::generate_balances_update, generic::generate_generic_update, + balances::generate_balances_update, decode::decode_file, generic::generate_generic_update, system_entity_registry::generate_system_entity_registry, validators::generate_validators_update, }; @@ -184,6 +185,17 @@ fn main() { .number_of_values(1), ), ) + .subcommand( + SubCommand::with_name("decode") + .about("Decodes the global_state.toml file into a readable form") + .arg( + Arg::with_name("file") + .value_name("FILE") + .index(1) + .required(true) + .help("The file to be decoded"), + ), + ) .get_matches(); match matches.subcommand() { @@ -194,6 +206,7 @@ fn main() { } ("generic", Some(sub_matches)) => generate_generic_update(sub_matches), ("generate-admins", Some(sub_matches)) => generate_admins(sub_matches), + ("decode", Some(sub_matches)) => decode_file(sub_matches), (subcommand, _) => { println!("Unknown subcommand: \"{}\"", subcommand); }